code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
SetCodePage('ms932')
CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=
'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,
EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[
'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])
BuildStringList('@FileName', 'Vogt')
DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=
4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,
Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,
Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,
Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,
InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)
AddCharChip('ED6_DT07/CH01000 ._CH')
AddCharChipPat('ED6_DT07/CH01000P._CP')
DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,
ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,
TalkFunctionIndex=0, TalkScenaIndex=3)
ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',
'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')
def Function_0_D2():
pass
label('Function_0_D2')
Return()
def Function_1_D3():
pass
label('Function_1_D3')
OP_B0(0, 120)
OP_1C(0, 0, 5)
Return()
def Function_2_DD():
pass
label('Function_2_DD')
RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),
scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')
OP_99(254, 0, 7, 1650)
Jump('loc_244')
label('loc_102')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')
OP_99(254, 1, 7, 1600)
Jump('loc_244')
label('loc_11B')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')
OP_99(254, 2, 7, 1550)
Jump('loc_244')
label('loc_134')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')
OP_99(254, 3, 7, 1500)
Jump('loc_244')
label('loc_14D')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')
OP_99(254, 4, 7, 1450)
Jump('loc_244')
label('loc_166')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')
OP_99(254, 5, 7, 1400)
Jump('loc_244')
label('loc_17F')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')
OP_99(254, 6, 7, 1350)
Jump('loc_244')
label('loc_198')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')
OP_99(254, 0, 7, 1655)
Jump('loc_244')
label('loc_1B1')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')
OP_99(254, 1, 7, 1605)
Jump('loc_244')
label('loc_1CA')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')
OP_99(254, 2, 7, 1555)
Jump('loc_244')
label('loc_1E3')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')
OP_99(254, 3, 7, 1505)
Jump('loc_244')
label('loc_1FC')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')
OP_99(254, 4, 7, 1455)
Jump('loc_244')
label('loc_215')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')
OP_99(254, 5, 7, 1405)
Jump('loc_244')
label('loc_22E')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')
OP_99(254, 6, 7, 1355)
label('loc_244')
Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')
OP_99(254, 0, 7, 1500)
Jump('loc_244')
label('loc_259')
Return()
def Function_3_25A():
pass
label('Function_3_25A')
TalkBegin(254)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr
(EXPR_END)), 'loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_2B2')
ChrTalk(254, ('I reckon my happiness is right here in this\x01',
'lighthouse.\x02'))
CloseMessageWindow()
Jump('loc_34C')
label('loc_2B2')
ChrTalk(254, ("There's actually a shining stone here in this\x01",
"lighthouse, though, even if it's not what you\x01",
'are looking for.\x02'))
CloseMessageWindow()
ChrTalk(254, "I reckon that's my happiness...\x02")
CloseMessageWindow()
OP_A2(0)
label('loc_34C')
Jump('loc_6C1')
label('loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr
(EXPR_END)), 'loc_477')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_3DF')
ChrTalk(254, ("There's no shame in relying on others for\x01",
"help if you need it! Grab 'em by the collar\x01",
'and scream for help if you need it!\x02'))
CloseMessageWindow()
Jump('loc_474')
label('loc_3DF')
ChrTalk(254, "You lookin' for some help, young lady?\x02")
CloseMessageWindow()
ChrTalk(254, 'What do you need?\x02')
CloseMessageWindow()
ChrTalk(334, ("#1714FN-No. I'll be fine, honestly...\x02\x03",
'#1713FThank you for offering, sir.\x02'))
CloseMessageWindow()
OP_A2(0)
label('loc_474')
Jump('loc_6C1')
label('loc_477')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1280, 0, 202300, 270)
Sleep(1000)
ChrTalk(254, ("I swear, this is EXACTLY what's wrong\x01",
'with youngins these days...\x02'))
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What are you doing here, young lady?\x02')
CloseMessageWindow()
ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\x02\x03',
"You haven't seen a young girl other\x01",
'than me in here recently have you?\x02'))
CloseMessageWindow()
ChrTalk(254, "A young girl? 'Fraid not.\x02")
CloseMessageWindow()
ChrTalk(334, ('#1713FI-I see...\x02\x03', 'Sorry for troubling you...\x02')
)
CloseMessageWindow()
def lambda_639():
label('loc_639')
TurnDirection(254, 334, 0)
OP_48()
Jump('loc_639')
QueueWorkItem2(16, 3, lambda_639)
OP_43(334, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, 'They sure are a pain.\x02')
CloseMessageWindow()
OP_A2(12100)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_6C1')
Jump('loc_AE8')
label('loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr
(EXPR_END)), 'loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_721')
ChrTalk(254, 'A happiness stone, you say?\x02')
CloseMessageWindow()
ChrTalk(254, "You think somethin' like that exists?\x02")
CloseMessageWindow()
Jump('loc_ADE')
label('loc_721')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1250, 0, 202480, 270)
SetChrPos(335, -1060, 0, 201620, 270)
Sleep(1000)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What might you two be doing here?\x02')
CloseMessageWindow()
ChrTalk(334, '#1718FHello!\x02')
CloseMessageWindow()
OP_62(334, 0, 1600, 38, 39, 250, 1)
Sleep(500)
OP_63(334)
ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\x01',
"aren't they?\x02\x03",
"#1718FSir, you haven't seen a happiness stone before,\x01",
'have you?\x02'))
CloseMessageWindow()
ChrTalk(254, 'A-A happiness stone?!\x02')
CloseMessageWindow()
ChrTalk(335, "#1730FThey're really shiny and pretty!\x02")
CloseMessageWindow()
ChrTalk(254, ("N-No, I don't recall ever seein' any\x01",
'such thing in all my years...\x02'))
CloseMessageWindow()
ChrTalk(334, ("#1716FOh... That's too bad...\x02\x03",
'#1710FWell, thank you, anyway.\x02'))
CloseMessageWindow()
TurnDirection(334, 335, 400)
Sleep(400)
ChrTalk(334, "#1718FLet's keep looking, Polly! \x02")
CloseMessageWindow()
OP_43(334, 3, 0, 4)
Sleep(2000)
ChrTalk(335, '#1731FI hope your back feels better, mister!\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
def lambda_A1A():
label('loc_A1A')
TurnDirection(254, 335, 0)
OP_48()
Jump('loc_A1A')
QueueWorkItem2(16, 3, lambda_A1A)
OP_43(335, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, "...They're sharp little devils, aren't they?\x02")
CloseMessageWindow()
Sleep(500)
ChrTalk(254, 'A happiness stone, hmm...?\x02')
CloseMessageWindow()
OP_A2(12099)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_ADE')
Jump('loc_AE8')
label('loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr
(EXPR_END)), 'loc_AE8')
label('loc_AE8')
TalkEnd(254)
Return()
def Function_4_AEC():
pass
label('Function_4_AEC')
def lambda_AF2():
OP_8E(254, 2820, 0, 205060, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_AF2)
WaitChrThread(254, 1)
def lambda_B12():
OP_8E(254, 2820, 0, 206910, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B12)
WaitChrThread(254, 1)
def lambda_B32():
OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B32)
WaitChrThread(254, 1)
Return()
def Function_5_B4D():
pass
label('Function_5_B4D')
TalkBegin(255)
TalkEnd(255)
Return()
SaveToFile()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
SetCodePage('ms932')
CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=
'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,
EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[
'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])
BuildStringList('@FileName', 'Vogt')
DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=
4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,
Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,
Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,
Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,
InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)
AddCharChip('ED6_DT07/CH01000 ._CH')
AddCharChipPat('ED6_DT07/CH01000P._CP')
DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,
ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,
TalkFunctionIndex=0, TalkScenaIndex=3)
ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',
'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')
def Function_0_D2():
pass
label('Function_0_D2')
Return()
def Function_1_D3():
pass
label('Function_1_D3')
OP_B0(0, 120)
OP_1C(0, 0, 5)
Return()
def Function_2_DD():
pass
label('Function_2_DD')
RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),
scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')
OP_99(254, 0, 7, 1650)
Jump('loc_244')
label('loc_102')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')
OP_99(254, 1, 7, 1600)
Jump('loc_244')
label('loc_11B')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')
OP_99(254, 2, 7, 1550)
Jump('loc_244')
label('loc_134')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')
OP_99(254, 3, 7, 1500)
Jump('loc_244')
label('loc_14D')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')
OP_99(254, 4, 7, 1450)
Jump('loc_244')
label('loc_166')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')
OP_99(254, 5, 7, 1400)
Jump('loc_244')
label('loc_17F')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')
OP_99(254, 6, 7, 1350)
Jump('loc_244')
label('loc_198')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')
OP_99(254, 0, 7, 1655)
Jump('loc_244')
label('loc_1B1')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')
OP_99(254, 1, 7, 1605)
Jump('loc_244')
label('loc_1CA')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')
OP_99(254, 2, 7, 1555)
Jump('loc_244')
label('loc_1E3')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')
OP_99(254, 3, 7, 1505)
Jump('loc_244')
label('loc_1FC')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')
OP_99(254, 4, 7, 1455)
Jump('loc_244')
label('loc_215')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')
OP_99(254, 5, 7, 1405)
Jump('loc_244')
label('loc_22E')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')
OP_99(254, 6, 7, 1355)
label('loc_244')
Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')
OP_99(254, 0, 7, 1500)
Jump('loc_244')
label('loc_259')
Return()
def Function_3_25A():
pass
label('Function_3_25A')
TalkBegin(254)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr
(EXPR_END)), 'loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_2B2')
ChrTalk(254, ('I reckon my happiness is right here in this\x01',
'lighthouse.\x02'))
CloseMessageWindow()
Jump('loc_34C')
label('loc_2B2')
ChrTalk(254, ("There's actually a shining stone here in this\x01",
"lighthouse, though, even if it's not what you\x01",
'are looking for.\x02'))
CloseMessageWindow()
ChrTalk(254, "I reckon that's my happiness...\x02")
CloseMessageWindow()
OP_A2(0)
label('loc_34C')
Jump('loc_6C1')
label('loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr
(EXPR_END)), 'loc_477')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_3DF')
ChrTalk(254, ("There's no shame in relying on others for\x01",
"help if you need it! Grab 'em by the collar\x01",
'and scream for help if you need it!\x02'))
CloseMessageWindow()
Jump('loc_474')
label('loc_3DF')
ChrTalk(254, "You lookin' for some help, young lady?\x02")
CloseMessageWindow()
ChrTalk(254, 'What do you need?\x02')
CloseMessageWindow()
ChrTalk(334, ("#1714FN-No. I'll be fine, honestly...\x02\x03",
'#1713FThank you for offering, sir.\x02'))
CloseMessageWindow()
OP_A2(0)
label('loc_474')
Jump('loc_6C1')
label('loc_477')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1280, 0, 202300, 270)
Sleep(1000)
ChrTalk(254, ("I swear, this is EXACTLY what's wrong\x01",
'with youngins these days...\x02'))
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What are you doing here, young lady?\x02')
CloseMessageWindow()
ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\x02\x03',
"You haven't seen a young girl other\x01",
'than me in here recently have you?\x02'))
CloseMessageWindow()
ChrTalk(254, "A young girl? 'Fraid not.\x02")
CloseMessageWindow()
ChrTalk(334, ('#1713FI-I see...\x02\x03', 'Sorry for troubling you...\x02')
)
CloseMessageWindow()
def lambda_639():
label('loc_639')
TurnDirection(254, 334, 0)
OP_48()
Jump('loc_639')
QueueWorkItem2(16, 3, lambda_639)
OP_43(334, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, 'They sure are a pain.\x02')
CloseMessageWindow()
OP_A2(12100)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_6C1')
Jump('loc_AE8')
label('loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr
(EXPR_END)), 'loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_721')
ChrTalk(254, 'A happiness stone, you say?\x02')
CloseMessageWindow()
ChrTalk(254, "You think somethin' like that exists?\x02")
CloseMessageWindow()
Jump('loc_ADE')
label('loc_721')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1250, 0, 202480, 270)
SetChrPos(335, -1060, 0, 201620, 270)
Sleep(1000)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What might you two be doing here?\x02')
CloseMessageWindow()
ChrTalk(334, '#1718FHello!\x02')
CloseMessageWindow()
OP_62(334, 0, 1600, 38, 39, 250, 1)
Sleep(500)
OP_63(334)
ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\x01',
"aren't they?\x02\x03",
"#1718FSir, you haven't seen a happiness stone before,\x01",
'have you?\x02'))
CloseMessageWindow()
ChrTalk(254, 'A-A happiness stone?!\x02')
CloseMessageWindow()
ChrTalk(335, "#1730FThey're really shiny and pretty!\x02")
CloseMessageWindow()
ChrTalk(254, ("N-No, I don't recall ever seein' any\x01",
'such thing in all my years...\x02'))
CloseMessageWindow()
ChrTalk(334, ("#1716FOh... That's too bad...\x02\x03",
'#1710FWell, thank you, anyway.\x02'))
CloseMessageWindow()
TurnDirection(334, 335, 400)
Sleep(400)
ChrTalk(334, "#1718FLet's keep looking, Polly! \x02")
CloseMessageWindow()
OP_43(334, 3, 0, 4)
Sleep(2000)
ChrTalk(335, '#1731FI hope your back feels better, mister!\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
def lambda_A1A():
label('loc_A1A')
TurnDirection(254, 335, 0)
OP_48()
Jump('loc_A1A')
QueueWorkItem2(16, 3, lambda_A1A)
OP_43(335, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, "...They're sharp little devils, aren't they?\x02")
CloseMessageWindow()
Sleep(500)
ChrTalk(254, 'A happiness stone, hmm...?\x02')
CloseMessageWindow()
OP_A2(12099)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_ADE')
Jump('loc_AE8')
label('loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr
(EXPR_END)), 'loc_AE8')
label('loc_AE8')
TalkEnd(254)
Return()
def Function_4_AEC():
pass
label('Function_4_AEC')
def lambda_AF2():
OP_8E(254, 2820, 0, 205060, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_AF2)
WaitChrThread(254, 1)
def lambda_B12():
OP_8E(254, 2820, 0, 206910, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B12)
WaitChrThread(254, 1)
def lambda_B32():
OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B32)
WaitChrThread(254, 1)
Return()
def Function_5_B4D():
pass
label('Function_5_B4D')
TalkBegin(255)
TalkEnd(255)
Return()
SaveToFile()
Try(main)
<|reserved_special_token_1|>
from ED63RDScenarioHelper import *
def main():
SetCodePage('ms932')
CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=
'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,
EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[
'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])
BuildStringList('@FileName', 'Vogt')
DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=
4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,
Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,
Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,
Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,
InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)
AddCharChip('ED6_DT07/CH01000 ._CH')
AddCharChipPat('ED6_DT07/CH01000P._CP')
DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,
ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,
TalkFunctionIndex=0, TalkScenaIndex=3)
ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',
'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')
def Function_0_D2():
pass
label('Function_0_D2')
Return()
def Function_1_D3():
pass
label('Function_1_D3')
OP_B0(0, 120)
OP_1C(0, 0, 5)
Return()
def Function_2_DD():
pass
label('Function_2_DD')
RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),
scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')
OP_99(254, 0, 7, 1650)
Jump('loc_244')
label('loc_102')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')
OP_99(254, 1, 7, 1600)
Jump('loc_244')
label('loc_11B')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')
OP_99(254, 2, 7, 1550)
Jump('loc_244')
label('loc_134')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')
OP_99(254, 3, 7, 1500)
Jump('loc_244')
label('loc_14D')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')
OP_99(254, 4, 7, 1450)
Jump('loc_244')
label('loc_166')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')
OP_99(254, 5, 7, 1400)
Jump('loc_244')
label('loc_17F')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')
OP_99(254, 6, 7, 1350)
Jump('loc_244')
label('loc_198')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')
OP_99(254, 0, 7, 1655)
Jump('loc_244')
label('loc_1B1')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')
OP_99(254, 1, 7, 1605)
Jump('loc_244')
label('loc_1CA')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')
OP_99(254, 2, 7, 1555)
Jump('loc_244')
label('loc_1E3')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')
OP_99(254, 3, 7, 1505)
Jump('loc_244')
label('loc_1FC')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')
OP_99(254, 4, 7, 1455)
Jump('loc_244')
label('loc_215')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')
OP_99(254, 5, 7, 1405)
Jump('loc_244')
label('loc_22E')
Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(
EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')
OP_99(254, 6, 7, 1355)
label('loc_244')
Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')
OP_99(254, 0, 7, 1500)
Jump('loc_244')
label('loc_259')
Return()
def Function_3_25A():
pass
label('Function_3_25A')
TalkBegin(254)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr
(EXPR_END)), 'loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_2B2')
ChrTalk(254, ('I reckon my happiness is right here in this\x01',
'lighthouse.\x02'))
CloseMessageWindow()
Jump('loc_34C')
label('loc_2B2')
ChrTalk(254, ("There's actually a shining stone here in this\x01",
"lighthouse, though, even if it's not what you\x01",
'are looking for.\x02'))
CloseMessageWindow()
ChrTalk(254, "I reckon that's my happiness...\x02")
CloseMessageWindow()
OP_A2(0)
label('loc_34C')
Jump('loc_6C1')
label('loc_34F')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr
(EXPR_END)), 'loc_477')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(
EXPR_END)), 'loc_3DF')
ChrTalk(254, ("There's no shame in relying on others for\x01",
"help if you need it! Grab 'em by the collar\x01",
'and scream for help if you need it!\x02'))
CloseMessageWindow()
Jump('loc_474')
label('loc_3DF')
ChrTalk(254, "You lookin' for some help, young lady?\x02")
CloseMessageWindow()
ChrTalk(254, 'What do you need?\x02')
CloseMessageWindow()
ChrTalk(334, ("#1714FN-No. I'll be fine, honestly...\x02\x03",
'#1713FThank you for offering, sir.\x02'))
CloseMessageWindow()
OP_A2(0)
label('loc_474')
Jump('loc_6C1')
label('loc_477')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1280, 0, 202300, 270)
Sleep(1000)
ChrTalk(254, ("I swear, this is EXACTLY what's wrong\x01",
'with youngins these days...\x02'))
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What are you doing here, young lady?\x02')
CloseMessageWindow()
ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\x02\x03',
"You haven't seen a young girl other\x01",
'than me in here recently have you?\x02'))
CloseMessageWindow()
ChrTalk(254, "A young girl? 'Fraid not.\x02")
CloseMessageWindow()
ChrTalk(334, ('#1713FI-I see...\x02\x03', 'Sorry for troubling you...\x02')
)
CloseMessageWindow()
def lambda_639():
label('loc_639')
TurnDirection(254, 334, 0)
OP_48()
Jump('loc_639')
QueueWorkItem2(16, 3, lambda_639)
OP_43(334, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, 'They sure are a pain.\x02')
CloseMessageWindow()
OP_A2(12100)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_6C1')
Jump('loc_AE8')
label('loc_6C4')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr
(EXPR_END)), 'loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr
(EXPR_END)), 'loc_721')
ChrTalk(254, 'A happiness stone, you say?\x02')
CloseMessageWindow()
ChrTalk(254, "You think somethin' like that exists?\x02")
CloseMessageWindow()
Jump('loc_ADE')
label('loc_721')
EventBegin(1)
OP_8C(254, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(334, -1250, 0, 202480, 270)
SetChrPos(335, -1060, 0, 201620, 270)
Sleep(1000)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
OP_8C(254, 90, 500)
Sleep(500)
ChrTalk(254, 'Wh-What might you two be doing here?\x02')
CloseMessageWindow()
ChrTalk(334, '#1718FHello!\x02')
CloseMessageWindow()
OP_62(334, 0, 1600, 38, 39, 250, 1)
Sleep(500)
OP_63(334)
ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\x01',
"aren't they?\x02\x03",
"#1718FSir, you haven't seen a happiness stone before,\x01",
'have you?\x02'))
CloseMessageWindow()
ChrTalk(254, 'A-A happiness stone?!\x02')
CloseMessageWindow()
ChrTalk(335, "#1730FThey're really shiny and pretty!\x02")
CloseMessageWindow()
ChrTalk(254, ("N-No, I don't recall ever seein' any\x01",
'such thing in all my years...\x02'))
CloseMessageWindow()
ChrTalk(334, ("#1716FOh... That's too bad...\x02\x03",
'#1710FWell, thank you, anyway.\x02'))
CloseMessageWindow()
TurnDirection(334, 335, 400)
Sleep(400)
ChrTalk(334, "#1718FLet's keep looking, Polly! \x02")
CloseMessageWindow()
OP_43(334, 3, 0, 4)
Sleep(2000)
ChrTalk(335, '#1731FI hope your back feels better, mister!\x02')
CloseMessageWindow()
OP_62(16, 0, 2000, 2, 7, 80, 1)
OP_22(39, 0, 100)
Sleep(1000)
def lambda_A1A():
label('loc_A1A')
TurnDirection(254, 335, 0)
OP_48()
Jump('loc_A1A')
QueueWorkItem2(16, 3, lambda_A1A)
OP_43(335, 3, 0, 4)
Sleep(3000)
OP_62(16, 0, 2000, 24, 27, 250, 0)
Sleep(3000)
OP_63(16)
ChrTalk(254, 'I swear, kids these days...\x02')
CloseMessageWindow()
ChrTalk(254, "...They're sharp little devils, aren't they?\x02")
CloseMessageWindow()
Sleep(500)
ChrTalk(254, 'A happiness stone, hmm...?\x02')
CloseMessageWindow()
OP_A2(12099)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(16, 3)
NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)
IdleLoop()
label('loc_ADE')
Jump('loc_AE8')
label('loc_AE1')
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr
(EXPR_END)), 'loc_AE8')
label('loc_AE8')
TalkEnd(254)
Return()
def Function_4_AEC():
pass
label('Function_4_AEC')
def lambda_AF2():
OP_8E(254, 2820, 0, 205060, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_AF2)
WaitChrThread(254, 1)
def lambda_B12():
OP_8E(254, 2820, 0, 206910, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B12)
WaitChrThread(254, 1)
def lambda_B32():
OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)
ExitThread()
QueueWorkItem(254, 1, lambda_B32)
WaitChrThread(254, 1)
Return()
def Function_5_B4D():
pass
label('Function_5_B4D')
TalkBegin(255)
TalkEnd(255)
Return()
SaveToFile()
Try(main)
<|reserved_special_token_1|>
from ED63RDScenarioHelper import *
def main():
SetCodePage("ms932")
CreateScenaFile(
FileName = 'C2219 ._SN',
MapName = 'Ruan',
Location = 'C2219.x',
MapIndex = 84,
MapDefaultBGM = "ed60015",
Flags = 0,
EntryFunctionIndex = 0xFFFF,
Reserved = 0,
IncludedScenario = [
'ED6_DT21/C2219 ._SN',
'',
'',
'',
'',
'',
'',
''
],
)
BuildStringList(
'@FileName', # 8
'Vogt', # 9
)
DeclEntryPoint(
Unknown_00 = 0,
Unknown_04 = 0,
Unknown_08 = 6000,
Unknown_0C = 4,
Unknown_0E = 0,
Unknown_10 = 0,
Unknown_14 = 9500,
Unknown_18 = -10000,
Unknown_1C = 0,
Unknown_20 = 0,
Unknown_24 = 0,
Unknown_28 = 2800,
Unknown_2C = 262,
Unknown_30 = 45,
Unknown_32 = 0,
Unknown_34 = 360,
Unknown_36 = 0,
Unknown_38 = 0,
Unknown_3A = 0,
InitScenaIndex = 0,
InitFunctionIndex = 0,
EntryScenaIndex = 0,
EntryFunctionIndex = 1,
)
AddCharChip(
'ED6_DT07/CH01000 ._CH', # 00
)
AddCharChipPat(
'ED6_DT07/CH01000P._CP', # 00
)
DeclNpc(
X = -2870,
Z = 0,
Y = 202000,
Direction = 270,
Unknown2 = 0,
Unknown3 = 0,
ChipIndex = 0x0,
NpcIndex = 0x101,
InitFunctionIndex = 0,
InitScenaIndex = 2,
TalkFunctionIndex = 0,
TalkScenaIndex = 3,
)
ScpFunction(
"Function_0_D2", # 00, 0
"Function_1_D3", # 01, 1
"Function_2_DD", # 02, 2
"Function_3_25A", # 03, 3
"Function_4_AEC", # 04, 4
"Function_5_B4D", # 05, 5
)
def Function_0_D2(): pass
label("Function_0_D2")
Return()
# Function_0_D2 end
def Function_1_D3(): pass
label("Function_1_D3")
OP_B0(0x0, 0x78)
OP_1C(0x0, 0x0, 0x5)
Return()
# Function_1_D3 end
def Function_2_DD(): pass
label("Function_2_DD")
RunExpression(0x1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_102")
OP_99(0xFE, 0x0, 0x7, 0x672)
Jump("loc_244")
label("loc_102")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_11B")
OP_99(0xFE, 0x1, 0x7, 0x640)
Jump("loc_244")
label("loc_11B")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_134")
OP_99(0xFE, 0x2, 0x7, 0x60E)
Jump("loc_244")
label("loc_134")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_14D")
OP_99(0xFE, 0x3, 0x7, 0x5DC)
Jump("loc_244")
label("loc_14D")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_166")
OP_99(0xFE, 0x4, 0x7, 0x5AA)
Jump("loc_244")
label("loc_166")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_17F")
OP_99(0xFE, 0x5, 0x7, 0x578)
Jump("loc_244")
label("loc_17F")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_198")
OP_99(0xFE, 0x6, 0x7, 0x546)
Jump("loc_244")
label("loc_198")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1B1")
OP_99(0xFE, 0x0, 0x7, 0x677)
Jump("loc_244")
label("loc_1B1")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1CA")
OP_99(0xFE, 0x1, 0x7, 0x645)
Jump("loc_244")
label("loc_1CA")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1E3")
OP_99(0xFE, 0x2, 0x7, 0x613)
Jump("loc_244")
label("loc_1E3")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_1FC")
OP_99(0xFE, 0x3, 0x7, 0x5E1)
Jump("loc_244")
label("loc_1FC")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_215")
OP_99(0xFE, 0x4, 0x7, 0x5AF)
Jump("loc_244")
label("loc_215")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_22E")
OP_99(0xFE, 0x5, 0x7, 0x57D)
Jump("loc_244")
label("loc_22E")
Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), "loc_244")
OP_99(0xFE, 0x6, 0x7, 0x54B)
label("loc_244")
Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), "loc_259")
OP_99(0xFE, 0x0, 0x7, 0x5DC)
Jump("loc_244")
label("loc_259")
Return()
# Function_2_DD end
def Function_3_25A(): pass
label("Function_3_25A")
TalkBegin(0xFE)
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 2)), scpexpr(EXPR_END)), "loc_6C4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), "loc_34F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_2B2")
ChrTalk( #0
0xFE,
(
"I reckon my happiness is right here in this\x01",
"lighthouse.\x02",
)
)
CloseMessageWindow()
Jump("loc_34C")
label("loc_2B2")
ChrTalk( #1
0xFE,
(
"There's actually a shining stone here in this\x01",
"lighthouse, though, even if it's not what you\x01",
"are looking for.\x02",
)
)
CloseMessageWindow()
ChrTalk( #2
0xFE,
"I reckon that's my happiness...\x02",
)
CloseMessageWindow()
OP_A2(0x0)
label("loc_34C")
Jump("loc_6C1")
label("loc_34F")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 4)), scpexpr(EXPR_END)), "loc_477")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), "loc_3DF")
ChrTalk( #3
0xFE,
(
"There's no shame in relying on others for\x01",
"help if you need it! Grab 'em by the collar\x01",
"and scream for help if you need it!\x02",
)
)
CloseMessageWindow()
Jump("loc_474")
label("loc_3DF")
ChrTalk( #4
0xFE,
"You lookin' for some help, young lady?\x02",
)
CloseMessageWindow()
ChrTalk( #5
0xFE,
"What do you need?\x02",
)
CloseMessageWindow()
ChrTalk( #6
0x14E,
(
"#1714FN-No. I'll be fine, honestly...\x02\x03",
"#1713FThank you for offering, sir.\x02",
)
)
CloseMessageWindow()
OP_A2(0x0)
label("loc_474")
Jump("loc_6C1")
label("loc_477")
EventBegin(0x1)
OP_8C(0xFE, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(0x14E, -1280, 0, 202300, 270)
Sleep(1000)
ChrTalk( #7
0xFE,
(
"I swear, this is EXACTLY what's wrong\x01",
"with youngins these days...\x02",
)
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
OP_8C(0xFE, 90, 500)
Sleep(500)
ChrTalk( #8
0xFE,
"Wh-What are you doing here, young lady?\x02",
)
CloseMessageWindow()
ChrTalk( #9
0x14E,
(
"#1712FU-Umm... Excuse me, sir...\x02\x03",
"You haven't seen a young girl other\x01",
"than me in here recently have you?\x02",
)
)
CloseMessageWindow()
ChrTalk( #10
0xFE,
"A young girl? 'Fraid not.\x02",
)
CloseMessageWindow()
ChrTalk( #11
0x14E,
(
"#1713FI-I see...\x02\x03",
"Sorry for troubling you...\x02",
)
)
CloseMessageWindow()
def lambda_639():
label("loc_639")
TurnDirection(0xFE, 0x14E, 0)
OP_48()
Jump("loc_639")
QueueWorkItem2(0x10, 3, lambda_639)
OP_43(0x14E, 0x3, 0x0, 0x4)
Sleep(3000)
OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(3000)
OP_63(0x10)
ChrTalk( #12
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
ChrTalk( #13
0xFE,
"They sure are a pain.\x02",
)
CloseMessageWindow()
OP_A2(0x2F44)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(0x10, 0x3)
NewScene("ED6_DT21/C2219 ._SN", 107, 0, 0)
IdleLoop()
label("loc_6C1")
Jump("loc_AE8")
label("loc_6C4")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 0)), scpexpr(EXPR_END)), "loc_AE1")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), "loc_721")
ChrTalk( #14
0xFE,
"A happiness stone, you say?\x02",
)
CloseMessageWindow()
ChrTalk( #15
0xFE,
"You think somethin' like that exists?\x02",
)
CloseMessageWindow()
Jump("loc_ADE")
label("loc_721")
EventBegin(0x1)
OP_8C(0xFE, 270, 0)
Fade(1000)
OP_6D(-1600, 0, 202380, 0)
OP_67(0, 6000, -10000, 0)
OP_6B(3000, 0)
OP_6C(45000, 0)
OP_6E(280, 0)
SetChrPos(0x14E, -1250, 0, 202480, 270)
SetChrPos(0x14F, -1060, 0, 201620, 270)
Sleep(1000)
ChrTalk( #16
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
OP_8C(0xFE, 90, 500)
Sleep(500)
ChrTalk( #17
0xFE,
"Wh-What might you two be doing here?\x02",
)
CloseMessageWindow()
ChrTalk( #18
0x14E,
"#1718FHello!\x02",
)
CloseMessageWindow()
OP_62(0x14E, 0x0, 1600, 0x26, 0x27, 0xFA, 0x1)
Sleep(500)
OP_63(0x14E)
ChrTalk( #19
0x14E,
(
"#1714FActually, lighthouses are pretty high up,\x01",
"aren't they?\x02\x03",
"#1718FSir, you haven't seen a happiness stone before,\x01",
"have you?\x02",
)
)
CloseMessageWindow()
ChrTalk( #20
0xFE,
"A-A happiness stone?!\x02",
)
CloseMessageWindow()
ChrTalk( #21
0x14F,
"#1730FThey're really shiny and pretty!\x02",
)
CloseMessageWindow()
ChrTalk( #22
0xFE,
(
"N-No, I don't recall ever seein' any\x01",
"such thing in all my years...\x02",
)
)
CloseMessageWindow()
ChrTalk( #23
0x14E,
(
"#1716FOh... That's too bad...\x02\x03",
"#1710FWell, thank you, anyway.\x02",
)
)
CloseMessageWindow()
TurnDirection(0x14E, 0x14F, 400)
Sleep(400)
ChrTalk( #24
0x14E,
"#1718FLet's keep looking, Polly! \x02",
)
CloseMessageWindow()
OP_43(0x14E, 0x3, 0x0, 0x4)
Sleep(2000)
ChrTalk( #25
0x14F,
"#1731FI hope your back feels better, mister!\x02",
)
CloseMessageWindow()
OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)
OP_22(0x27, 0x0, 0x64)
Sleep(1000)
def lambda_A1A():
label("loc_A1A")
TurnDirection(0xFE, 0x14F, 0)
OP_48()
Jump("loc_A1A")
QueueWorkItem2(0x10, 3, lambda_A1A)
OP_43(0x14F, 0x3, 0x0, 0x4)
Sleep(3000)
OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)
Sleep(3000)
OP_63(0x10)
ChrTalk( #26
0xFE,
"I swear, kids these days...\x02",
)
CloseMessageWindow()
ChrTalk( #27
0xFE,
"...They're sharp little devils, aren't they?\x02",
)
CloseMessageWindow()
Sleep(500)
ChrTalk( #28
0xFE,
"A happiness stone, hmm...?\x02",
)
CloseMessageWindow()
OP_A2(0x2F43)
FadeToDark(2000, 0, -1)
OP_0D()
OP_44(0x10, 0x3)
NewScene("ED6_DT21/C2219 ._SN", 107, 0, 0)
IdleLoop()
label("loc_ADE")
Jump("loc_AE8")
label("loc_AE1")
Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E2, 7)), scpexpr(EXPR_END)), "loc_AE8")
label("loc_AE8")
TalkEnd(0xFE)
Return()
# Function_3_25A end
def Function_4_AEC(): pass
label("Function_4_AEC")
def lambda_AF2():
OP_8E(0xFE, 0xB04, 0x0, 0x32104, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_AF2)
WaitChrThread(0xFE, 0x1)
def lambda_B12():
OP_8E(0xFE, 0xB04, 0x0, 0x3283E, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_B12)
WaitChrThread(0xFE, 0x1)
def lambda_B32():
OP_8E(0xFE, 0xFFFFF254, 0xFFFFF830, 0x328F2, 0x7D0, 0x0)
ExitThread()
QueueWorkItem(0xFE, 1, lambda_B32)
WaitChrThread(0xFE, 0x1)
Return()
# Function_4_AEC end
def Function_5_B4D(): pass
label("Function_5_B4D")
TalkBegin(0xFF)
TalkEnd(0xFF)
Return()
# Function_5_B4D end
SaveToFile()
Try(main)
|
flexible
|
{
"blob_id": "55c2bf914a77c573d1b6835f54c82921d9fa6ad6",
"index": 1010,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n SetCodePage('ms932')\n CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=\n 'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,\n EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[\n 'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])\n BuildStringList('@FileName', 'Vogt')\n DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=\n 4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,\n Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,\n Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,\n Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,\n InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)\n AddCharChip('ED6_DT07/CH01000 ._CH')\n AddCharChipPat('ED6_DT07/CH01000P._CP')\n DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,\n ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,\n TalkFunctionIndex=0, TalkScenaIndex=3)\n ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',\n 'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')\n\n def Function_0_D2():\n pass\n label('Function_0_D2')\n Return()\n\n def Function_1_D3():\n pass\n label('Function_1_D3')\n OP_B0(0, 120)\n OP_1C(0, 0, 5)\n Return()\n\n def Function_2_DD():\n pass\n label('Function_2_DD')\n RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),\n scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')\n OP_99(254, 0, 7, 1650)\n Jump('loc_244')\n label('loc_102')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')\n OP_99(254, 1, 7, 1600)\n Jump('loc_244')\n label('loc_11B')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')\n OP_99(254, 2, 7, 1550)\n Jump('loc_244')\n label('loc_134')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')\n OP_99(254, 3, 7, 1500)\n Jump('loc_244')\n label('loc_14D')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')\n OP_99(254, 4, 7, 1450)\n Jump('loc_244')\n label('loc_166')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')\n OP_99(254, 5, 7, 1400)\n Jump('loc_244')\n label('loc_17F')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')\n OP_99(254, 6, 7, 1350)\n Jump('loc_244')\n label('loc_198')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')\n OP_99(254, 0, 7, 1655)\n Jump('loc_244')\n label('loc_1B1')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')\n OP_99(254, 1, 7, 1605)\n Jump('loc_244')\n label('loc_1CA')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')\n OP_99(254, 2, 7, 1555)\n Jump('loc_244')\n label('loc_1E3')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')\n OP_99(254, 3, 7, 1505)\n Jump('loc_244')\n label('loc_1FC')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')\n OP_99(254, 4, 7, 1455)\n Jump('loc_244')\n label('loc_215')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')\n OP_99(254, 5, 7, 1405)\n Jump('loc_244')\n label('loc_22E')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')\n OP_99(254, 6, 7, 1355)\n label('loc_244')\n Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')\n OP_99(254, 0, 7, 1500)\n Jump('loc_244')\n label('loc_259')\n Return()\n\n def Function_3_25A():\n pass\n label('Function_3_25A')\n TalkBegin(254)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr\n (EXPR_END)), 'loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_2B2')\n ChrTalk(254, ('I reckon my happiness is right here in this\\x01',\n 'lighthouse.\\x02'))\n CloseMessageWindow()\n Jump('loc_34C')\n label('loc_2B2')\n ChrTalk(254, (\"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n 'are looking for.\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"I reckon that's my happiness...\\x02\")\n CloseMessageWindow()\n OP_A2(0)\n label('loc_34C')\n Jump('loc_6C1')\n label('loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr\n (EXPR_END)), 'loc_477')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_3DF')\n ChrTalk(254, (\"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n 'and scream for help if you need it!\\x02'))\n CloseMessageWindow()\n Jump('loc_474')\n label('loc_3DF')\n ChrTalk(254, \"You lookin' for some help, young lady?\\x02\")\n CloseMessageWindow()\n ChrTalk(254, 'What do you need?\\x02')\n CloseMessageWindow()\n ChrTalk(334, (\"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n '#1713FThank you for offering, sir.\\x02'))\n CloseMessageWindow()\n OP_A2(0)\n label('loc_474')\n Jump('loc_6C1')\n label('loc_477')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1280, 0, 202300, 270)\n Sleep(1000)\n ChrTalk(254, (\"I swear, this is EXACTLY what's wrong\\x01\",\n 'with youngins these days...\\x02'))\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What are you doing here, young lady?\\x02')\n CloseMessageWindow()\n ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\\x02\\x03',\n \"You haven't seen a young girl other\\x01\",\n 'than me in here recently have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"A young girl? 'Fraid not.\\x02\")\n CloseMessageWindow()\n ChrTalk(334, ('#1713FI-I see...\\x02\\x03', 'Sorry for troubling you...\\x02')\n )\n CloseMessageWindow()\n\n def lambda_639():\n label('loc_639')\n TurnDirection(254, 334, 0)\n OP_48()\n Jump('loc_639')\n QueueWorkItem2(16, 3, lambda_639)\n OP_43(334, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, 'They sure are a pain.\\x02')\n CloseMessageWindow()\n OP_A2(12100)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_6C1')\n Jump('loc_AE8')\n label('loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr\n (EXPR_END)), 'loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_721')\n ChrTalk(254, 'A happiness stone, you say?\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"You think somethin' like that exists?\\x02\")\n CloseMessageWindow()\n Jump('loc_ADE')\n label('loc_721')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1250, 0, 202480, 270)\n SetChrPos(335, -1060, 0, 201620, 270)\n Sleep(1000)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What might you two be doing here?\\x02')\n CloseMessageWindow()\n ChrTalk(334, '#1718FHello!\\x02')\n CloseMessageWindow()\n OP_62(334, 0, 1600, 38, 39, 250, 1)\n Sleep(500)\n OP_63(334)\n ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\\x01',\n \"aren't they?\\x02\\x03\",\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n 'have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, 'A-A happiness stone?!\\x02')\n CloseMessageWindow()\n ChrTalk(335, \"#1730FThey're really shiny and pretty!\\x02\")\n CloseMessageWindow()\n ChrTalk(254, (\"N-No, I don't recall ever seein' any\\x01\",\n 'such thing in all my years...\\x02'))\n CloseMessageWindow()\n ChrTalk(334, (\"#1716FOh... That's too bad...\\x02\\x03\",\n '#1710FWell, thank you, anyway.\\x02'))\n CloseMessageWindow()\n TurnDirection(334, 335, 400)\n Sleep(400)\n ChrTalk(334, \"#1718FLet's keep looking, Polly! \\x02\")\n CloseMessageWindow()\n OP_43(334, 3, 0, 4)\n Sleep(2000)\n ChrTalk(335, '#1731FI hope your back feels better, mister!\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n\n def lambda_A1A():\n label('loc_A1A')\n TurnDirection(254, 335, 0)\n OP_48()\n Jump('loc_A1A')\n QueueWorkItem2(16, 3, lambda_A1A)\n OP_43(335, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"...They're sharp little devils, aren't they?\\x02\")\n CloseMessageWindow()\n Sleep(500)\n ChrTalk(254, 'A happiness stone, hmm...?\\x02')\n CloseMessageWindow()\n OP_A2(12099)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_ADE')\n Jump('loc_AE8')\n label('loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr\n (EXPR_END)), 'loc_AE8')\n label('loc_AE8')\n TalkEnd(254)\n Return()\n\n def Function_4_AEC():\n pass\n label('Function_4_AEC')\n\n def lambda_AF2():\n OP_8E(254, 2820, 0, 205060, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_AF2)\n WaitChrThread(254, 1)\n\n def lambda_B12():\n OP_8E(254, 2820, 0, 206910, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B12)\n WaitChrThread(254, 1)\n\n def lambda_B32():\n OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B32)\n WaitChrThread(254, 1)\n Return()\n\n def Function_5_B4D():\n pass\n label('Function_5_B4D')\n TalkBegin(255)\n TalkEnd(255)\n Return()\n SaveToFile()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n SetCodePage('ms932')\n CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=\n 'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,\n EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[\n 'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])\n BuildStringList('@FileName', 'Vogt')\n DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=\n 4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,\n Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,\n Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,\n Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,\n InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)\n AddCharChip('ED6_DT07/CH01000 ._CH')\n AddCharChipPat('ED6_DT07/CH01000P._CP')\n DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,\n ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,\n TalkFunctionIndex=0, TalkScenaIndex=3)\n ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',\n 'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')\n\n def Function_0_D2():\n pass\n label('Function_0_D2')\n Return()\n\n def Function_1_D3():\n pass\n label('Function_1_D3')\n OP_B0(0, 120)\n OP_1C(0, 0, 5)\n Return()\n\n def Function_2_DD():\n pass\n label('Function_2_DD')\n RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),\n scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')\n OP_99(254, 0, 7, 1650)\n Jump('loc_244')\n label('loc_102')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')\n OP_99(254, 1, 7, 1600)\n Jump('loc_244')\n label('loc_11B')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')\n OP_99(254, 2, 7, 1550)\n Jump('loc_244')\n label('loc_134')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')\n OP_99(254, 3, 7, 1500)\n Jump('loc_244')\n label('loc_14D')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')\n OP_99(254, 4, 7, 1450)\n Jump('loc_244')\n label('loc_166')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')\n OP_99(254, 5, 7, 1400)\n Jump('loc_244')\n label('loc_17F')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')\n OP_99(254, 6, 7, 1350)\n Jump('loc_244')\n label('loc_198')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')\n OP_99(254, 0, 7, 1655)\n Jump('loc_244')\n label('loc_1B1')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')\n OP_99(254, 1, 7, 1605)\n Jump('loc_244')\n label('loc_1CA')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')\n OP_99(254, 2, 7, 1555)\n Jump('loc_244')\n label('loc_1E3')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')\n OP_99(254, 3, 7, 1505)\n Jump('loc_244')\n label('loc_1FC')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')\n OP_99(254, 4, 7, 1455)\n Jump('loc_244')\n label('loc_215')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')\n OP_99(254, 5, 7, 1405)\n Jump('loc_244')\n label('loc_22E')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')\n OP_99(254, 6, 7, 1355)\n label('loc_244')\n Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')\n OP_99(254, 0, 7, 1500)\n Jump('loc_244')\n label('loc_259')\n Return()\n\n def Function_3_25A():\n pass\n label('Function_3_25A')\n TalkBegin(254)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr\n (EXPR_END)), 'loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_2B2')\n ChrTalk(254, ('I reckon my happiness is right here in this\\x01',\n 'lighthouse.\\x02'))\n CloseMessageWindow()\n Jump('loc_34C')\n label('loc_2B2')\n ChrTalk(254, (\"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n 'are looking for.\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"I reckon that's my happiness...\\x02\")\n CloseMessageWindow()\n OP_A2(0)\n label('loc_34C')\n Jump('loc_6C1')\n label('loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr\n (EXPR_END)), 'loc_477')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_3DF')\n ChrTalk(254, (\"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n 'and scream for help if you need it!\\x02'))\n CloseMessageWindow()\n Jump('loc_474')\n label('loc_3DF')\n ChrTalk(254, \"You lookin' for some help, young lady?\\x02\")\n CloseMessageWindow()\n ChrTalk(254, 'What do you need?\\x02')\n CloseMessageWindow()\n ChrTalk(334, (\"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n '#1713FThank you for offering, sir.\\x02'))\n CloseMessageWindow()\n OP_A2(0)\n label('loc_474')\n Jump('loc_6C1')\n label('loc_477')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1280, 0, 202300, 270)\n Sleep(1000)\n ChrTalk(254, (\"I swear, this is EXACTLY what's wrong\\x01\",\n 'with youngins these days...\\x02'))\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What are you doing here, young lady?\\x02')\n CloseMessageWindow()\n ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\\x02\\x03',\n \"You haven't seen a young girl other\\x01\",\n 'than me in here recently have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"A young girl? 'Fraid not.\\x02\")\n CloseMessageWindow()\n ChrTalk(334, ('#1713FI-I see...\\x02\\x03', 'Sorry for troubling you...\\x02')\n )\n CloseMessageWindow()\n\n def lambda_639():\n label('loc_639')\n TurnDirection(254, 334, 0)\n OP_48()\n Jump('loc_639')\n QueueWorkItem2(16, 3, lambda_639)\n OP_43(334, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, 'They sure are a pain.\\x02')\n CloseMessageWindow()\n OP_A2(12100)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_6C1')\n Jump('loc_AE8')\n label('loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr\n (EXPR_END)), 'loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_721')\n ChrTalk(254, 'A happiness stone, you say?\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"You think somethin' like that exists?\\x02\")\n CloseMessageWindow()\n Jump('loc_ADE')\n label('loc_721')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1250, 0, 202480, 270)\n SetChrPos(335, -1060, 0, 201620, 270)\n Sleep(1000)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What might you two be doing here?\\x02')\n CloseMessageWindow()\n ChrTalk(334, '#1718FHello!\\x02')\n CloseMessageWindow()\n OP_62(334, 0, 1600, 38, 39, 250, 1)\n Sleep(500)\n OP_63(334)\n ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\\x01',\n \"aren't they?\\x02\\x03\",\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n 'have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, 'A-A happiness stone?!\\x02')\n CloseMessageWindow()\n ChrTalk(335, \"#1730FThey're really shiny and pretty!\\x02\")\n CloseMessageWindow()\n ChrTalk(254, (\"N-No, I don't recall ever seein' any\\x01\",\n 'such thing in all my years...\\x02'))\n CloseMessageWindow()\n ChrTalk(334, (\"#1716FOh... That's too bad...\\x02\\x03\",\n '#1710FWell, thank you, anyway.\\x02'))\n CloseMessageWindow()\n TurnDirection(334, 335, 400)\n Sleep(400)\n ChrTalk(334, \"#1718FLet's keep looking, Polly! \\x02\")\n CloseMessageWindow()\n OP_43(334, 3, 0, 4)\n Sleep(2000)\n ChrTalk(335, '#1731FI hope your back feels better, mister!\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n\n def lambda_A1A():\n label('loc_A1A')\n TurnDirection(254, 335, 0)\n OP_48()\n Jump('loc_A1A')\n QueueWorkItem2(16, 3, lambda_A1A)\n OP_43(335, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"...They're sharp little devils, aren't they?\\x02\")\n CloseMessageWindow()\n Sleep(500)\n ChrTalk(254, 'A happiness stone, hmm...?\\x02')\n CloseMessageWindow()\n OP_A2(12099)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_ADE')\n Jump('loc_AE8')\n label('loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr\n (EXPR_END)), 'loc_AE8')\n label('loc_AE8')\n TalkEnd(254)\n Return()\n\n def Function_4_AEC():\n pass\n label('Function_4_AEC')\n\n def lambda_AF2():\n OP_8E(254, 2820, 0, 205060, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_AF2)\n WaitChrThread(254, 1)\n\n def lambda_B12():\n OP_8E(254, 2820, 0, 206910, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B12)\n WaitChrThread(254, 1)\n\n def lambda_B32():\n OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B32)\n WaitChrThread(254, 1)\n Return()\n\n def Function_5_B4D():\n pass\n label('Function_5_B4D')\n TalkBegin(255)\n TalkEnd(255)\n Return()\n SaveToFile()\n\n\nTry(main)\n",
"step-4": "from ED63RDScenarioHelper import *\n\n\ndef main():\n SetCodePage('ms932')\n CreateScenaFile(FileName='C2219 ._SN', MapName='Ruan', Location=\n 'C2219.x', MapIndex=84, MapDefaultBGM='ed60015', Flags=0,\n EntryFunctionIndex=65535, Reserved=0, IncludedScenario=[\n 'ED6_DT21/C2219 ._SN', '', '', '', '', '', '', ''])\n BuildStringList('@FileName', 'Vogt')\n DeclEntryPoint(Unknown_00=0, Unknown_04=0, Unknown_08=6000, Unknown_0C=\n 4, Unknown_0E=0, Unknown_10=0, Unknown_14=9500, Unknown_18=-10000,\n Unknown_1C=0, Unknown_20=0, Unknown_24=0, Unknown_28=2800,\n Unknown_2C=262, Unknown_30=45, Unknown_32=0, Unknown_34=360,\n Unknown_36=0, Unknown_38=0, Unknown_3A=0, InitScenaIndex=0,\n InitFunctionIndex=0, EntryScenaIndex=0, EntryFunctionIndex=1)\n AddCharChip('ED6_DT07/CH01000 ._CH')\n AddCharChipPat('ED6_DT07/CH01000P._CP')\n DeclNpc(X=-2870, Z=0, Y=202000, Direction=270, Unknown2=0, Unknown3=0,\n ChipIndex=0, NpcIndex=257, InitFunctionIndex=0, InitScenaIndex=2,\n TalkFunctionIndex=0, TalkScenaIndex=3)\n ScpFunction('Function_0_D2', 'Function_1_D3', 'Function_2_DD',\n 'Function_3_25A', 'Function_4_AEC', 'Function_5_B4D')\n\n def Function_0_D2():\n pass\n label('Function_0_D2')\n Return()\n\n def Function_1_D3():\n pass\n label('Function_1_D3')\n OP_B0(0, 120)\n OP_1C(0, 0, 5)\n Return()\n\n def Function_2_DD():\n pass\n label('Function_2_DD')\n RunExpression(1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 14),\n scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 0), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_102')\n OP_99(254, 0, 7, 1650)\n Jump('loc_244')\n label('loc_102')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 1), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_11B')\n OP_99(254, 1, 7, 1600)\n Jump('loc_244')\n label('loc_11B')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 2), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_134')\n OP_99(254, 2, 7, 1550)\n Jump('loc_244')\n label('loc_134')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 3), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_14D')\n OP_99(254, 3, 7, 1500)\n Jump('loc_244')\n label('loc_14D')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 4), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_166')\n OP_99(254, 4, 7, 1450)\n Jump('loc_244')\n label('loc_166')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 5), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_17F')\n OP_99(254, 5, 7, 1400)\n Jump('loc_244')\n label('loc_17F')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 6), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_198')\n OP_99(254, 6, 7, 1350)\n Jump('loc_244')\n label('loc_198')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 7), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1B1')\n OP_99(254, 0, 7, 1655)\n Jump('loc_244')\n label('loc_1B1')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 8), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1CA')\n OP_99(254, 1, 7, 1605)\n Jump('loc_244')\n label('loc_1CA')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 9), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1E3')\n OP_99(254, 2, 7, 1555)\n Jump('loc_244')\n label('loc_1E3')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 10), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_1FC')\n OP_99(254, 3, 7, 1505)\n Jump('loc_244')\n label('loc_1FC')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 11), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_215')\n OP_99(254, 4, 7, 1455)\n Jump('loc_244')\n label('loc_215')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 12), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_22E')\n OP_99(254, 5, 7, 1405)\n Jump('loc_244')\n label('loc_22E')\n Jc((scpexpr(EXPR_GET_RESULT, 1), scpexpr(EXPR_PUSH_LONG, 13), scpexpr(\n EXPR_EQU), scpexpr(EXPR_END)), 'loc_244')\n OP_99(254, 6, 7, 1355)\n label('loc_244')\n Jc((scpexpr(EXPR_PUSH_LONG, 1), scpexpr(EXPR_END)), 'loc_259')\n OP_99(254, 0, 7, 1500)\n Jump('loc_244')\n label('loc_259')\n Return()\n\n def Function_3_25A():\n pass\n label('Function_3_25A')\n TalkBegin(254)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 2)), scpexpr\n (EXPR_END)), 'loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_2B2')\n ChrTalk(254, ('I reckon my happiness is right here in this\\x01',\n 'lighthouse.\\x02'))\n CloseMessageWindow()\n Jump('loc_34C')\n label('loc_2B2')\n ChrTalk(254, (\"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n 'are looking for.\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"I reckon that's my happiness...\\x02\")\n CloseMessageWindow()\n OP_A2(0)\n label('loc_34C')\n Jump('loc_6C1')\n label('loc_34F')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 4)), scpexpr\n (EXPR_END)), 'loc_477')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0, 0)), scpexpr(\n EXPR_END)), 'loc_3DF')\n ChrTalk(254, (\"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n 'and scream for help if you need it!\\x02'))\n CloseMessageWindow()\n Jump('loc_474')\n label('loc_3DF')\n ChrTalk(254, \"You lookin' for some help, young lady?\\x02\")\n CloseMessageWindow()\n ChrTalk(254, 'What do you need?\\x02')\n CloseMessageWindow()\n ChrTalk(334, (\"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n '#1713FThank you for offering, sir.\\x02'))\n CloseMessageWindow()\n OP_A2(0)\n label('loc_474')\n Jump('loc_6C1')\n label('loc_477')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1280, 0, 202300, 270)\n Sleep(1000)\n ChrTalk(254, (\"I swear, this is EXACTLY what's wrong\\x01\",\n 'with youngins these days...\\x02'))\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What are you doing here, young lady?\\x02')\n CloseMessageWindow()\n ChrTalk(334, ('#1712FU-Umm... Excuse me, sir...\\x02\\x03',\n \"You haven't seen a young girl other\\x01\",\n 'than me in here recently have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, \"A young girl? 'Fraid not.\\x02\")\n CloseMessageWindow()\n ChrTalk(334, ('#1713FI-I see...\\x02\\x03', 'Sorry for troubling you...\\x02')\n )\n CloseMessageWindow()\n\n def lambda_639():\n label('loc_639')\n TurnDirection(254, 334, 0)\n OP_48()\n Jump('loc_639')\n QueueWorkItem2(16, 3, lambda_639)\n OP_43(334, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, 'They sure are a pain.\\x02')\n CloseMessageWindow()\n OP_A2(12100)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_6C1')\n Jump('loc_AE8')\n label('loc_6C4')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1508, 0)), scpexpr\n (EXPR_END)), 'loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1512, 3)), scpexpr\n (EXPR_END)), 'loc_721')\n ChrTalk(254, 'A happiness stone, you say?\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"You think somethin' like that exists?\\x02\")\n CloseMessageWindow()\n Jump('loc_ADE')\n label('loc_721')\n EventBegin(1)\n OP_8C(254, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(334, -1250, 0, 202480, 270)\n SetChrPos(335, -1060, 0, 201620, 270)\n Sleep(1000)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n OP_8C(254, 90, 500)\n Sleep(500)\n ChrTalk(254, 'Wh-What might you two be doing here?\\x02')\n CloseMessageWindow()\n ChrTalk(334, '#1718FHello!\\x02')\n CloseMessageWindow()\n OP_62(334, 0, 1600, 38, 39, 250, 1)\n Sleep(500)\n OP_63(334)\n ChrTalk(334, ('#1714FActually, lighthouses are pretty high up,\\x01',\n \"aren't they?\\x02\\x03\",\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n 'have you?\\x02'))\n CloseMessageWindow()\n ChrTalk(254, 'A-A happiness stone?!\\x02')\n CloseMessageWindow()\n ChrTalk(335, \"#1730FThey're really shiny and pretty!\\x02\")\n CloseMessageWindow()\n ChrTalk(254, (\"N-No, I don't recall ever seein' any\\x01\",\n 'such thing in all my years...\\x02'))\n CloseMessageWindow()\n ChrTalk(334, (\"#1716FOh... That's too bad...\\x02\\x03\",\n '#1710FWell, thank you, anyway.\\x02'))\n CloseMessageWindow()\n TurnDirection(334, 335, 400)\n Sleep(400)\n ChrTalk(334, \"#1718FLet's keep looking, Polly! \\x02\")\n CloseMessageWindow()\n OP_43(334, 3, 0, 4)\n Sleep(2000)\n ChrTalk(335, '#1731FI hope your back feels better, mister!\\x02')\n CloseMessageWindow()\n OP_62(16, 0, 2000, 2, 7, 80, 1)\n OP_22(39, 0, 100)\n Sleep(1000)\n\n def lambda_A1A():\n label('loc_A1A')\n TurnDirection(254, 335, 0)\n OP_48()\n Jump('loc_A1A')\n QueueWorkItem2(16, 3, lambda_A1A)\n OP_43(335, 3, 0, 4)\n Sleep(3000)\n OP_62(16, 0, 2000, 24, 27, 250, 0)\n Sleep(3000)\n OP_63(16)\n ChrTalk(254, 'I swear, kids these days...\\x02')\n CloseMessageWindow()\n ChrTalk(254, \"...They're sharp little devils, aren't they?\\x02\")\n CloseMessageWindow()\n Sleep(500)\n ChrTalk(254, 'A happiness stone, hmm...?\\x02')\n CloseMessageWindow()\n OP_A2(12099)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(16, 3)\n NewScene('ED6_DT21/C2219 ._SN', 107, 0, 0)\n IdleLoop()\n label('loc_ADE')\n Jump('loc_AE8')\n label('loc_AE1')\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(1506, 7)), scpexpr\n (EXPR_END)), 'loc_AE8')\n label('loc_AE8')\n TalkEnd(254)\n Return()\n\n def Function_4_AEC():\n pass\n label('Function_4_AEC')\n\n def lambda_AF2():\n OP_8E(254, 2820, 0, 205060, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_AF2)\n WaitChrThread(254, 1)\n\n def lambda_B12():\n OP_8E(254, 2820, 0, 206910, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B12)\n WaitChrThread(254, 1)\n\n def lambda_B32():\n OP_8E(254, 4294963796, 4294965296, 207090, 2000, 0)\n ExitThread()\n QueueWorkItem(254, 1, lambda_B32)\n WaitChrThread(254, 1)\n Return()\n\n def Function_5_B4D():\n pass\n label('Function_5_B4D')\n TalkBegin(255)\n TalkEnd(255)\n Return()\n SaveToFile()\n\n\nTry(main)\n",
"step-5": "from ED63RDScenarioHelper import *\n\ndef main():\n SetCodePage(\"ms932\")\n\n CreateScenaFile(\n FileName = 'C2219 ._SN',\n MapName = 'Ruan',\n Location = 'C2219.x',\n MapIndex = 84,\n MapDefaultBGM = \"ed60015\",\n Flags = 0,\n EntryFunctionIndex = 0xFFFF,\n Reserved = 0,\n IncludedScenario = [\n 'ED6_DT21/C2219 ._SN',\n '',\n '',\n '',\n '',\n '',\n '',\n ''\n ],\n )\n\n BuildStringList(\n '@FileName', # 8\n 'Vogt', # 9\n )\n\n DeclEntryPoint(\n Unknown_00 = 0,\n Unknown_04 = 0,\n Unknown_08 = 6000,\n Unknown_0C = 4,\n Unknown_0E = 0,\n Unknown_10 = 0,\n Unknown_14 = 9500,\n Unknown_18 = -10000,\n Unknown_1C = 0,\n Unknown_20 = 0,\n Unknown_24 = 0,\n Unknown_28 = 2800,\n Unknown_2C = 262,\n Unknown_30 = 45,\n Unknown_32 = 0,\n Unknown_34 = 360,\n Unknown_36 = 0,\n Unknown_38 = 0,\n Unknown_3A = 0,\n InitScenaIndex = 0,\n InitFunctionIndex = 0,\n EntryScenaIndex = 0,\n EntryFunctionIndex = 1,\n )\n\n\n AddCharChip(\n 'ED6_DT07/CH01000 ._CH', # 00\n )\n\n AddCharChipPat(\n 'ED6_DT07/CH01000P._CP', # 00\n )\n\n DeclNpc(\n X = -2870,\n Z = 0,\n Y = 202000,\n Direction = 270,\n Unknown2 = 0,\n Unknown3 = 0,\n ChipIndex = 0x0,\n NpcIndex = 0x101,\n InitFunctionIndex = 0,\n InitScenaIndex = 2,\n TalkFunctionIndex = 0,\n TalkScenaIndex = 3,\n )\n\n\n ScpFunction(\n \"Function_0_D2\", # 00, 0\n \"Function_1_D3\", # 01, 1\n \"Function_2_DD\", # 02, 2\n \"Function_3_25A\", # 03, 3\n \"Function_4_AEC\", # 04, 4\n \"Function_5_B4D\", # 05, 5\n )\n\n\n def Function_0_D2(): pass\n\n label(\"Function_0_D2\")\n\n Return()\n\n # Function_0_D2 end\n\n def Function_1_D3(): pass\n\n label(\"Function_1_D3\")\n\n OP_B0(0x0, 0x78)\n OP_1C(0x0, 0x0, 0x5)\n Return()\n\n # Function_1_D3 end\n\n def Function_2_DD(): pass\n\n label(\"Function_2_DD\")\n\n RunExpression(0x1, (scpexpr(EXPR_RAND), scpexpr(EXPR_PUSH_LONG, 0xE), scpexpr(EXPR_IMOD), scpexpr(EXPR_STUB), scpexpr(EXPR_END)))\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x0), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_102\")\n OP_99(0xFE, 0x0, 0x7, 0x672)\n Jump(\"loc_244\")\n\n label(\"loc_102\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_11B\")\n OP_99(0xFE, 0x1, 0x7, 0x640)\n Jump(\"loc_244\")\n\n label(\"loc_11B\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x2), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_134\")\n OP_99(0xFE, 0x2, 0x7, 0x60E)\n Jump(\"loc_244\")\n\n label(\"loc_134\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x3), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_14D\")\n OP_99(0xFE, 0x3, 0x7, 0x5DC)\n Jump(\"loc_244\")\n\n label(\"loc_14D\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x4), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_166\")\n OP_99(0xFE, 0x4, 0x7, 0x5AA)\n Jump(\"loc_244\")\n\n label(\"loc_166\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x5), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_17F\")\n OP_99(0xFE, 0x5, 0x7, 0x578)\n Jump(\"loc_244\")\n\n label(\"loc_17F\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x6), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_198\")\n OP_99(0xFE, 0x6, 0x7, 0x546)\n Jump(\"loc_244\")\n\n label(\"loc_198\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x7), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1B1\")\n OP_99(0xFE, 0x0, 0x7, 0x677)\n Jump(\"loc_244\")\n\n label(\"loc_1B1\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x8), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1CA\")\n OP_99(0xFE, 0x1, 0x7, 0x645)\n Jump(\"loc_244\")\n\n label(\"loc_1CA\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0x9), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1E3\")\n OP_99(0xFE, 0x2, 0x7, 0x613)\n Jump(\"loc_244\")\n\n label(\"loc_1E3\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xA), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_1FC\")\n OP_99(0xFE, 0x3, 0x7, 0x5E1)\n Jump(\"loc_244\")\n\n label(\"loc_1FC\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xB), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_215\")\n OP_99(0xFE, 0x4, 0x7, 0x5AF)\n Jump(\"loc_244\")\n\n label(\"loc_215\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xC), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_22E\")\n OP_99(0xFE, 0x5, 0x7, 0x57D)\n Jump(\"loc_244\")\n\n label(\"loc_22E\")\n\n Jc((scpexpr(EXPR_GET_RESULT, 0x1), scpexpr(EXPR_PUSH_LONG, 0xD), scpexpr(EXPR_EQU), scpexpr(EXPR_END)), \"loc_244\")\n OP_99(0xFE, 0x6, 0x7, 0x54B)\n\n label(\"loc_244\")\n\n Jc((scpexpr(EXPR_PUSH_LONG, 0x1), scpexpr(EXPR_END)), \"loc_259\")\n OP_99(0xFE, 0x0, 0x7, 0x5DC)\n Jump(\"loc_244\")\n\n label(\"loc_259\")\n\n Return()\n\n # Function_2_DD end\n\n def Function_3_25A(): pass\n\n label(\"Function_3_25A\")\n\n TalkBegin(0xFE)\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 2)), scpexpr(EXPR_END)), \"loc_6C4\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), \"loc_34F\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), \"loc_2B2\")\n\n ChrTalk( #0\n 0xFE,\n (\n \"I reckon my happiness is right here in this\\x01\",\n \"lighthouse.\\x02\",\n )\n )\n\n CloseMessageWindow()\n Jump(\"loc_34C\")\n\n label(\"loc_2B2\")\n\n\n ChrTalk( #1\n 0xFE,\n (\n \"There's actually a shining stone here in this\\x01\",\n \"lighthouse, though, even if it's not what you\\x01\",\n \"are looking for.\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #2\n 0xFE,\n \"I reckon that's my happiness...\\x02\",\n )\n\n CloseMessageWindow()\n OP_A2(0x0)\n\n label(\"loc_34C\")\n\n Jump(\"loc_6C1\")\n\n label(\"loc_34F\")\n\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 4)), scpexpr(EXPR_END)), \"loc_477\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x0, 0)), scpexpr(EXPR_END)), \"loc_3DF\")\n\n ChrTalk( #3\n 0xFE,\n (\n \"There's no shame in relying on others for\\x01\",\n \"help if you need it! Grab 'em by the collar\\x01\",\n \"and scream for help if you need it!\\x02\",\n )\n )\n\n CloseMessageWindow()\n Jump(\"loc_474\")\n\n label(\"loc_3DF\")\n\n\n ChrTalk( #4\n 0xFE,\n \"You lookin' for some help, young lady?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #5\n 0xFE,\n \"What do you need?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #6\n 0x14E,\n (\n \"#1714FN-No. I'll be fine, honestly...\\x02\\x03\",\n\n \"#1713FThank you for offering, sir.\\x02\",\n )\n )\n\n CloseMessageWindow()\n OP_A2(0x0)\n\n label(\"loc_474\")\n\n Jump(\"loc_6C1\")\n\n label(\"loc_477\")\n\n EventBegin(0x1)\n OP_8C(0xFE, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(0x14E, -1280, 0, 202300, 270)\n Sleep(1000)\n\n ChrTalk( #7\n 0xFE,\n (\n \"I swear, this is EXACTLY what's wrong\\x01\",\n \"with youngins these days...\\x02\",\n )\n )\n\n CloseMessageWindow()\n OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)\n OP_22(0x27, 0x0, 0x64)\n Sleep(1000)\n OP_8C(0xFE, 90, 500)\n Sleep(500)\n\n ChrTalk( #8\n 0xFE,\n \"Wh-What are you doing here, young lady?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #9\n 0x14E,\n (\n \"#1712FU-Umm... Excuse me, sir...\\x02\\x03\",\n\n \"You haven't seen a young girl other\\x01\",\n \"than me in here recently have you?\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #10\n 0xFE,\n \"A young girl? 'Fraid not.\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #11\n 0x14E,\n (\n \"#1713FI-I see...\\x02\\x03\",\n\n \"Sorry for troubling you...\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n def lambda_639():\n\n label(\"loc_639\")\n\n TurnDirection(0xFE, 0x14E, 0)\n OP_48()\n Jump(\"loc_639\")\n\n QueueWorkItem2(0x10, 3, lambda_639)\n OP_43(0x14E, 0x3, 0x0, 0x4)\n Sleep(3000)\n OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)\n Sleep(3000)\n OP_63(0x10)\n\n ChrTalk( #12\n 0xFE,\n \"I swear, kids these days...\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #13\n 0xFE,\n \"They sure are a pain.\\x02\",\n )\n\n CloseMessageWindow()\n OP_A2(0x2F44)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(0x10, 0x3)\n NewScene(\"ED6_DT21/C2219 ._SN\", 107, 0, 0)\n IdleLoop()\n\n label(\"loc_6C1\")\n\n Jump(\"loc_AE8\")\n\n label(\"loc_6C4\")\n\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E4, 0)), scpexpr(EXPR_END)), \"loc_AE1\")\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E8, 3)), scpexpr(EXPR_END)), \"loc_721\")\n\n ChrTalk( #14\n 0xFE,\n \"A happiness stone, you say?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #15\n 0xFE,\n \"You think somethin' like that exists?\\x02\",\n )\n\n CloseMessageWindow()\n Jump(\"loc_ADE\")\n\n label(\"loc_721\")\n\n EventBegin(0x1)\n OP_8C(0xFE, 270, 0)\n Fade(1000)\n OP_6D(-1600, 0, 202380, 0)\n OP_67(0, 6000, -10000, 0)\n OP_6B(3000, 0)\n OP_6C(45000, 0)\n OP_6E(280, 0)\n SetChrPos(0x14E, -1250, 0, 202480, 270)\n SetChrPos(0x14F, -1060, 0, 201620, 270)\n Sleep(1000)\n\n ChrTalk( #16\n 0xFE,\n \"I swear, kids these days...\\x02\",\n )\n\n CloseMessageWindow()\n OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)\n OP_22(0x27, 0x0, 0x64)\n Sleep(1000)\n OP_8C(0xFE, 90, 500)\n Sleep(500)\n\n ChrTalk( #17\n 0xFE,\n \"Wh-What might you two be doing here?\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #18\n 0x14E,\n \"#1718FHello!\\x02\",\n )\n\n CloseMessageWindow()\n OP_62(0x14E, 0x0, 1600, 0x26, 0x27, 0xFA, 0x1)\n Sleep(500)\n OP_63(0x14E)\n\n ChrTalk( #19\n 0x14E,\n (\n \"#1714FActually, lighthouses are pretty high up,\\x01\",\n \"aren't they?\\x02\\x03\",\n\n \"#1718FSir, you haven't seen a happiness stone before,\\x01\",\n \"have you?\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #20\n 0xFE,\n \"A-A happiness stone?!\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #21\n 0x14F,\n \"#1730FThey're really shiny and pretty!\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #22\n 0xFE,\n (\n \"N-No, I don't recall ever seein' any\\x01\",\n \"such thing in all my years...\\x02\",\n )\n )\n\n CloseMessageWindow()\n\n ChrTalk( #23\n 0x14E,\n (\n \"#1716FOh... That's too bad...\\x02\\x03\",\n\n \"#1710FWell, thank you, anyway.\\x02\",\n )\n )\n\n CloseMessageWindow()\n TurnDirection(0x14E, 0x14F, 400)\n Sleep(400)\n\n ChrTalk( #24\n 0x14E,\n \"#1718FLet's keep looking, Polly! \\x02\",\n )\n\n CloseMessageWindow()\n OP_43(0x14E, 0x3, 0x0, 0x4)\n Sleep(2000)\n\n ChrTalk( #25\n 0x14F,\n \"#1731FI hope your back feels better, mister!\\x02\",\n )\n\n CloseMessageWindow()\n OP_62(0x10, 0x0, 2000, 0x2, 0x7, 0x50, 0x1)\n OP_22(0x27, 0x0, 0x64)\n Sleep(1000)\n\n def lambda_A1A():\n\n label(\"loc_A1A\")\n\n TurnDirection(0xFE, 0x14F, 0)\n OP_48()\n Jump(\"loc_A1A\")\n\n QueueWorkItem2(0x10, 3, lambda_A1A)\n OP_43(0x14F, 0x3, 0x0, 0x4)\n Sleep(3000)\n OP_62(0x10, 0x0, 2000, 0x18, 0x1B, 0xFA, 0x0)\n Sleep(3000)\n OP_63(0x10)\n\n ChrTalk( #26\n 0xFE,\n \"I swear, kids these days...\\x02\",\n )\n\n CloseMessageWindow()\n\n ChrTalk( #27\n 0xFE,\n \"...They're sharp little devils, aren't they?\\x02\",\n )\n\n CloseMessageWindow()\n Sleep(500)\n\n ChrTalk( #28\n 0xFE,\n \"A happiness stone, hmm...?\\x02\",\n )\n\n CloseMessageWindow()\n OP_A2(0x2F43)\n FadeToDark(2000, 0, -1)\n OP_0D()\n OP_44(0x10, 0x3)\n NewScene(\"ED6_DT21/C2219 ._SN\", 107, 0, 0)\n IdleLoop()\n\n label(\"loc_ADE\")\n\n Jump(\"loc_AE8\")\n\n label(\"loc_AE1\")\n\n Jc((scpexpr(EXPR_TEST_SCENA_FLAGS, MakeScenarioFlags(0x5E2, 7)), scpexpr(EXPR_END)), \"loc_AE8\")\n\n label(\"loc_AE8\")\n\n TalkEnd(0xFE)\n Return()\n\n # Function_3_25A end\n\n def Function_4_AEC(): pass\n\n label(\"Function_4_AEC\")\n\n\n def lambda_AF2():\n OP_8E(0xFE, 0xB04, 0x0, 0x32104, 0x7D0, 0x0)\n ExitThread()\n\n QueueWorkItem(0xFE, 1, lambda_AF2)\n WaitChrThread(0xFE, 0x1)\n\n def lambda_B12():\n OP_8E(0xFE, 0xB04, 0x0, 0x3283E, 0x7D0, 0x0)\n ExitThread()\n\n QueueWorkItem(0xFE, 1, lambda_B12)\n WaitChrThread(0xFE, 0x1)\n\n def lambda_B32():\n OP_8E(0xFE, 0xFFFFF254, 0xFFFFF830, 0x328F2, 0x7D0, 0x0)\n ExitThread()\n\n QueueWorkItem(0xFE, 1, lambda_B32)\n WaitChrThread(0xFE, 0x1)\n Return()\n\n # Function_4_AEC end\n\n def Function_5_B4D(): pass\n\n label(\"Function_5_B4D\")\n\n TalkBegin(0xFF)\n TalkEnd(0xFF)\n Return()\n\n # Function_5_B4D end\n\n SaveToFile()\n\nTry(main)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class GetFromURL(tornado.web.RequestHandler):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get(self, index=None, schema=None, entry=None, query=None):
query = dict()
resultGenerator = ResultGenerator()
query[c.OPERATION] = c.GET
if index:
query[c.INDEX] = index
if schema:
query[c.SCHEMA] = schema
if entry:
query[c.ENTRY] = entry
self.logger.debug('Internal Query Generated' + str(query))
try:
result = str(resultGenerator.processQuery(json.dumps(query)))
self.logger.info('Result fetched:' + result)
self.write(result)
except Exception as e:
self.logger.error('Error', exc_info=True)
self.write('Error: ' + str(e))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GetFromURL(tornado.web.RequestHandler):
<|reserved_special_token_0|>
def initialize(self):
self.logger = ServerLogger().getLogger()
def get(self, index=None, schema=None, entry=None, query=None):
query = dict()
resultGenerator = ResultGenerator()
query[c.OPERATION] = c.GET
if index:
query[c.INDEX] = index
if schema:
query[c.SCHEMA] = schema
if entry:
query[c.ENTRY] = entry
self.logger.debug('Internal Query Generated' + str(query))
try:
result = str(resultGenerator.processQuery(json.dumps(query)))
self.logger.info('Result fetched:' + result)
self.write(result)
except Exception as e:
self.logger.error('Error', exc_info=True)
self.write('Error: ' + str(e))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GetFromURL(tornado.web.RequestHandler):
"""
This class fetches the data requested like index,schema,entry,query from the url and responds with the result
"""
def initialize(self):
self.logger = ServerLogger().getLogger()
def get(self, index=None, schema=None, entry=None, query=None):
query = dict()
resultGenerator = ResultGenerator()
query[c.OPERATION] = c.GET
if index:
query[c.INDEX] = index
if schema:
query[c.SCHEMA] = schema
if entry:
query[c.ENTRY] = entry
self.logger.debug('Internal Query Generated' + str(query))
try:
result = str(resultGenerator.processQuery(json.dumps(query)))
self.logger.info('Result fetched:' + result)
self.write(result)
except Exception as e:
self.logger.error('Error', exc_info=True)
self.write('Error: ' + str(e))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import json
import tornado
from engine import Constants as c
from engine.ResultGenerator import ResultGenerator
from ..ServerLogger import ServerLogger
class GetFromURL(tornado.web.RequestHandler):
"""
This class fetches the data requested like index,schema,entry,query from the url and responds with the result
"""
def initialize(self):
self.logger = ServerLogger().getLogger()
def get(self, index=None, schema=None, entry=None, query=None):
query = dict()
resultGenerator = ResultGenerator()
query[c.OPERATION] = c.GET
if index:
query[c.INDEX] = index
if schema:
query[c.SCHEMA] = schema
if entry:
query[c.ENTRY] = entry
self.logger.debug('Internal Query Generated' + str(query))
try:
result = str(resultGenerator.processQuery(json.dumps(query)))
self.logger.info('Result fetched:' + result)
self.write(result)
except Exception as e:
self.logger.error('Error', exc_info=True)
self.write('Error: ' + str(e))
<|reserved_special_token_1|>
'''
Created on May 17, 2016
@author: Shauryadeep Chaudhuri
'''
import json
import tornado
from engine import Constants as c
from engine.ResultGenerator import ResultGenerator
from ..ServerLogger import ServerLogger
class GetFromURL(tornado.web.RequestHandler):
'''
This class fetches the data requested like index,schema,entry,query from the url and responds with the result
'''
def initialize(self):
self.logger = ServerLogger().getLogger()
def get(self, index=None, schema=None, entry=None, query=None):
query = dict()
resultGenerator = ResultGenerator()
query[c.OPERATION] = c.GET
if index:
query[c.INDEX] = index
if schema:
query[c.SCHEMA] = schema
if entry:
query[c.ENTRY] = entry
self.logger.debug("Internal Query Generated"+str(query))
try:
result = str(resultGenerator.processQuery(json.dumps(query)))
self.logger.info("Result fetched:" + result)
self.write(result)
except Exception as e:
self.logger.error('Error', exc_info=True)
self.write("Error: " + str(e))
|
flexible
|
{
"blob_id": "5a13c7e3be8a0b5f3baf7106a938fc97f078c5bc",
"index": 7335,
"step-1": "<mask token>\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n <mask token>\n <mask token>\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n",
"step-2": "<mask token>\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n <mask token>\n\n def initialize(self):\n self.logger = ServerLogger().getLogger()\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n",
"step-3": "<mask token>\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n \"\"\"\n This class fetches the data requested like index,schema,entry,query from the url and responds with the result\n \"\"\"\n\n def initialize(self):\n self.logger = ServerLogger().getLogger()\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n",
"step-4": "<mask token>\nimport json\nimport tornado\nfrom engine import Constants as c\nfrom engine.ResultGenerator import ResultGenerator\nfrom ..ServerLogger import ServerLogger\n\n\nclass GetFromURL(tornado.web.RequestHandler):\n \"\"\"\n This class fetches the data requested like index,schema,entry,query from the url and responds with the result\n \"\"\"\n\n def initialize(self):\n self.logger = ServerLogger().getLogger()\n\n def get(self, index=None, schema=None, entry=None, query=None):\n query = dict()\n resultGenerator = ResultGenerator()\n query[c.OPERATION] = c.GET\n if index:\n query[c.INDEX] = index\n if schema:\n query[c.SCHEMA] = schema\n if entry:\n query[c.ENTRY] = entry\n self.logger.debug('Internal Query Generated' + str(query))\n try:\n result = str(resultGenerator.processQuery(json.dumps(query)))\n self.logger.info('Result fetched:' + result)\n self.write(result)\n except Exception as e:\n self.logger.error('Error', exc_info=True)\n self.write('Error: ' + str(e))\n",
"step-5": "'''\r\nCreated on May 17, 2016\r\n\r\n@author: Shauryadeep Chaudhuri\r\n'''\r\n\r\nimport json\r\n\r\nimport tornado\r\n\r\nfrom engine import Constants as c\r\nfrom engine.ResultGenerator import ResultGenerator\r\nfrom ..ServerLogger import ServerLogger\r\n\r\n\r\nclass GetFromURL(tornado.web.RequestHandler):\r\n '''\r\n This class fetches the data requested like index,schema,entry,query from the url and responds with the result\r\n '''\r\n def initialize(self):\r\n self.logger = ServerLogger().getLogger()\r\n \r\n def get(self, index=None, schema=None, entry=None, query=None):\r\n\r\n query = dict()\r\n\r\n resultGenerator = ResultGenerator()\r\n\r\n query[c.OPERATION] = c.GET\r\n\r\n if index:\r\n query[c.INDEX] = index\r\n if schema:\r\n query[c.SCHEMA] = schema\r\n if entry:\r\n query[c.ENTRY] = entry\r\n \r\n self.logger.debug(\"Internal Query Generated\"+str(query))\r\n \r\n try:\r\n result = str(resultGenerator.processQuery(json.dumps(query)))\r\n \r\n self.logger.info(\"Result fetched:\" + result)\r\n \r\n self.write(result)\r\n except Exception as e:\r\n self.logger.error('Error', exc_info=True)\r\n \r\n self.write(\"Error: \" + str(e))\r\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
pn1 = Point(9, 8)
pn2 = Point(6, 4)
print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1, pn2)}')
rc1 = Rectangle(40, 20, 120, 300)
rc2 = Rectangle(30, 21, 350, 400)
print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1, rc2)}')
if ShapeUtils.compare(pn1, pn2) > 0:
print(f'{pn1} > {pn2}')
<|reserved_special_token_1|>
from draw import Rectangle
from draw import Point
from draw import ShapeUtils
if __name__ == '__main__':
pn1 = Point(9, 8)
pn2 = Point(6, 4)
print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1, pn2)}')
rc1 = Rectangle(40, 20, 120, 300)
rc2 = Rectangle(30, 21, 350, 400)
print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1, rc2)}')
if ShapeUtils.compare(pn1, pn2) > 0:
print(f'{pn1} > {pn2}')
<|reserved_special_token_1|>
# import draw as p
# ако няма __init__.py
# from draw.point import Point
from draw import Rectangle
from draw import Point
from draw import ShapeUtils
if __name__ == '__main__':
pn1 = Point(9,8)
pn2 = Point(6,4)
print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1,pn2)}')
rc1 = Rectangle(40,20,120,300)
rc2 = Rectangle(30,21,350,400)
print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1,rc2)}')
if ShapeUtils.compare(pn1,pn2) > 0:
print(f'{pn1} > {pn2}')
|
flexible
|
{
"blob_id": "b984dc052201748a88fa51d25c3bd3c22404fa96",
"index": 6571,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n pn1 = Point(9, 8)\n pn2 = Point(6, 4)\n print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1, pn2)}')\n rc1 = Rectangle(40, 20, 120, 300)\n rc2 = Rectangle(30, 21, 350, 400)\n print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1, rc2)}')\n if ShapeUtils.compare(pn1, pn2) > 0:\n print(f'{pn1} > {pn2}')\n",
"step-3": "from draw import Rectangle\nfrom draw import Point\nfrom draw import ShapeUtils\nif __name__ == '__main__':\n pn1 = Point(9, 8)\n pn2 = Point(6, 4)\n print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1, pn2)}')\n rc1 = Rectangle(40, 20, 120, 300)\n rc2 = Rectangle(30, 21, 350, 400)\n print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1, rc2)}')\n if ShapeUtils.compare(pn1, pn2) > 0:\n print(f'{pn1} > {pn2}')\n",
"step-4": "\n# import draw as p\n\n# ако няма __init__.py\n# from draw.point import Point \n\nfrom draw import Rectangle\nfrom draw import Point\nfrom draw import ShapeUtils\n\n\n\nif __name__ == '__main__':\n pn1 = Point(9,8)\n pn2 = Point(6,4)\n\n print(f'dist: {pn1} and {pn1} = {ShapeUtils.distance(pn1,pn2)}')\n\n rc1 = Rectangle(40,20,120,300)\n rc2 = Rectangle(30,21,350,400)\n\n print(f'dist: {rc1} and {rc1} = {ShapeUtils.distance(rc1,rc2)}')\n\n if ShapeUtils.compare(pn1,pn2) > 0:\n print(f'{pn1} > {pn2}')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding:utf-8
import requests
import io
from zipfile import ZipFile
if __name__ == '__main__':
sentence_url = "http://www.manythings.org/anki/deu-eng.zip"
r = requests.get(sentence_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('deu.txt')
eng_ger_data = file.decode()
eng_ger_data = eng_ger_data.encode('ascii', errors='ignore')
eng_ger_data = eng_ger_data.decode().split('\n')
eng_ger_data = [x.split('\t') for x in eng_ger_data if len(x) >= 1]
[english_sentence, german_sentence] = [list(x) for x in zip(*eng_ger_data)]
print(len(english_sentence))
print(len(german_sentence))
print(eng_ger_data[9])
print(eng_ger_data[10])
print(german_sentence)
|
normal
|
{
"blob_id": "559c665e5544dd864d2f020c967ac8a8665af134",
"index": 6805,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n sentence_url = 'http://www.manythings.org/anki/deu-eng.zip'\n r = requests.get(sentence_url)\n z = ZipFile(io.BytesIO(r.content))\n file = z.read('deu.txt')\n eng_ger_data = file.decode()\n eng_ger_data = eng_ger_data.encode('ascii', errors='ignore')\n eng_ger_data = eng_ger_data.decode().split('\\n')\n eng_ger_data = [x.split('\\t') for x in eng_ger_data if len(x) >= 1]\n [english_sentence, german_sentence] = [list(x) for x in zip(*eng_ger_data)]\n print(len(english_sentence))\n print(len(german_sentence))\n print(eng_ger_data[9])\n print(eng_ger_data[10])\n print(german_sentence)\n",
"step-3": "import requests\nimport io\nfrom zipfile import ZipFile\nif __name__ == '__main__':\n sentence_url = 'http://www.manythings.org/anki/deu-eng.zip'\n r = requests.get(sentence_url)\n z = ZipFile(io.BytesIO(r.content))\n file = z.read('deu.txt')\n eng_ger_data = file.decode()\n eng_ger_data = eng_ger_data.encode('ascii', errors='ignore')\n eng_ger_data = eng_ger_data.decode().split('\\n')\n eng_ger_data = [x.split('\\t') for x in eng_ger_data if len(x) >= 1]\n [english_sentence, german_sentence] = [list(x) for x in zip(*eng_ger_data)]\n print(len(english_sentence))\n print(len(german_sentence))\n print(eng_ger_data[9])\n print(eng_ger_data[10])\n print(german_sentence)\n",
"step-4": "# coding:utf-8\nimport requests\nimport io\nfrom zipfile import ZipFile\n\nif __name__ == '__main__':\n sentence_url = \"http://www.manythings.org/anki/deu-eng.zip\"\n r = requests.get(sentence_url)\n z = ZipFile(io.BytesIO(r.content))\n file = z.read('deu.txt')\n eng_ger_data = file.decode()\n eng_ger_data = eng_ger_data.encode('ascii', errors='ignore')\n eng_ger_data = eng_ger_data.decode().split('\\n')\n eng_ger_data = [x.split('\\t') for x in eng_ger_data if len(x) >= 1]\n [english_sentence, german_sentence] = [list(x) for x in zip(*eng_ger_data)]\n print(len(english_sentence))\n print(len(german_sentence))\n print(eng_ger_data[9])\n print(eng_ger_data[10])\n print(german_sentence)\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from collections import defaultdict
squares = dict()
for i in range(2000):
squares[i * i] = i
perims = defaultdict(int)
for a in range(1, 1001):
for b in range(a + 1, 1001):
if a * a + b * b not in squares:
continue
c = squares[a * a + b * b]
perims[a + b + c] += 1
for perim, v in sorted(perims.items(), key=lambda x: x[1]):
if v > 1 and perim <= 1000:
print(perim, v)
|
normal
|
{
"blob_id": "a3299a2945a638c74c2d16bc28079ed692718fbd",
"index": 2703,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(2000):\n squares[i * i] = i\n<mask token>\nfor a in range(1, 1001):\n for b in range(a + 1, 1001):\n if a * a + b * b not in squares:\n continue\n c = squares[a * a + b * b]\n perims[a + b + c] += 1\nfor perim, v in sorted(perims.items(), key=lambda x: x[1]):\n if v > 1 and perim <= 1000:\n print(perim, v)\n",
"step-3": "<mask token>\nsquares = dict()\nfor i in range(2000):\n squares[i * i] = i\nperims = defaultdict(int)\nfor a in range(1, 1001):\n for b in range(a + 1, 1001):\n if a * a + b * b not in squares:\n continue\n c = squares[a * a + b * b]\n perims[a + b + c] += 1\nfor perim, v in sorted(perims.items(), key=lambda x: x[1]):\n if v > 1 and perim <= 1000:\n print(perim, v)\n",
"step-4": "from collections import defaultdict\nsquares = dict()\nfor i in range(2000):\n squares[i * i] = i\nperims = defaultdict(int)\nfor a in range(1, 1001):\n for b in range(a + 1, 1001):\n if a * a + b * b not in squares:\n continue\n c = squares[a * a + b * b]\n perims[a + b + c] += 1\nfor perim, v in sorted(perims.items(), key=lambda x: x[1]):\n if v > 1 and perim <= 1000:\n print(perim, v)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class BertBasedTODModel(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BertBasedTODModel(nn.Module):
<|reserved_special_token_0|>
def forward(self, input_ids, attention_mask, token_type_ids):
sequence_output, cls = self.bert_model(input_ids=input_ids,
attention_mask=attention_mask, token_type_ids=token_type_ids)
intent_preds = self.intent_classifier(cls)
slot_preds = self.slot_classifier(sequence_output)
return intent_preds, slot_preds
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BertBasedTODModel(nn.Module):
def __init__(self, bert_type, num_intent_labels, num_slot_labels):
super(BertBasedTODModel, self).__init__()
self.bert_model = BertModel.from_pretrained(bert_type)
self.num_intent_labels = num_intent_labels
self.num_slot_labels = num_slot_labels
self.bert_output_dim = 768
self.intent_classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(
self.bert_output_dim, self.num_intent_labels))
self.slot_classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(
self.bert_output_dim, self.num_slot_labels))
def forward(self, input_ids, attention_mask, token_type_ids):
sequence_output, cls = self.bert_model(input_ids=input_ids,
attention_mask=attention_mask, token_type_ids=token_type_ids)
intent_preds = self.intent_classifier(cls)
slot_preds = self.slot_classifier(sequence_output)
return intent_preds, slot_preds
<|reserved_special_token_1|>
import torch.nn as nn
from transformers import BertModel
class BertBasedTODModel(nn.Module):
def __init__(self, bert_type, num_intent_labels, num_slot_labels):
super(BertBasedTODModel, self).__init__()
self.bert_model = BertModel.from_pretrained(bert_type)
self.num_intent_labels = num_intent_labels
self.num_slot_labels = num_slot_labels
self.bert_output_dim = 768
self.intent_classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(
self.bert_output_dim, self.num_intent_labels))
self.slot_classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(
self.bert_output_dim, self.num_slot_labels))
def forward(self, input_ids, attention_mask, token_type_ids):
sequence_output, cls = self.bert_model(input_ids=input_ids,
attention_mask=attention_mask, token_type_ids=token_type_ids)
intent_preds = self.intent_classifier(cls)
slot_preds = self.slot_classifier(sequence_output)
return intent_preds, slot_preds
|
flexible
|
{
"blob_id": "74e70056ddfd8963a254f1a789a9058554c5489e",
"index": 2586,
"step-1": "<mask token>\n\n\nclass BertBasedTODModel(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BertBasedTODModel(nn.Module):\n <mask token>\n\n def forward(self, input_ids, attention_mask, token_type_ids):\n sequence_output, cls = self.bert_model(input_ids=input_ids,\n attention_mask=attention_mask, token_type_ids=token_type_ids)\n intent_preds = self.intent_classifier(cls)\n slot_preds = self.slot_classifier(sequence_output)\n return intent_preds, slot_preds\n",
"step-3": "<mask token>\n\n\nclass BertBasedTODModel(nn.Module):\n\n def __init__(self, bert_type, num_intent_labels, num_slot_labels):\n super(BertBasedTODModel, self).__init__()\n self.bert_model = BertModel.from_pretrained(bert_type)\n self.num_intent_labels = num_intent_labels\n self.num_slot_labels = num_slot_labels\n self.bert_output_dim = 768\n self.intent_classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(\n self.bert_output_dim, self.num_intent_labels))\n self.slot_classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(\n self.bert_output_dim, self.num_slot_labels))\n\n def forward(self, input_ids, attention_mask, token_type_ids):\n sequence_output, cls = self.bert_model(input_ids=input_ids,\n attention_mask=attention_mask, token_type_ids=token_type_ids)\n intent_preds = self.intent_classifier(cls)\n slot_preds = self.slot_classifier(sequence_output)\n return intent_preds, slot_preds\n",
"step-4": "import torch.nn as nn\nfrom transformers import BertModel\n\n\nclass BertBasedTODModel(nn.Module):\n\n def __init__(self, bert_type, num_intent_labels, num_slot_labels):\n super(BertBasedTODModel, self).__init__()\n self.bert_model = BertModel.from_pretrained(bert_type)\n self.num_intent_labels = num_intent_labels\n self.num_slot_labels = num_slot_labels\n self.bert_output_dim = 768\n self.intent_classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(\n self.bert_output_dim, self.num_intent_labels))\n self.slot_classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(\n self.bert_output_dim, self.num_slot_labels))\n\n def forward(self, input_ids, attention_mask, token_type_ids):\n sequence_output, cls = self.bert_model(input_ids=input_ids,\n attention_mask=attention_mask, token_type_ids=token_type_ids)\n intent_preds = self.intent_classifier(cls)\n slot_preds = self.slot_classifier(sequence_output)\n return intent_preds, slot_preds\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TestCuboid(TestCase):
<|reserved_special_token_0|>
def test_input_value(self):
self.assertRaises(TypeError, cuboid_volume, 'ank')
<|reserved_special_token_0|>
def test_addition_input_value(self):
self.assertRaises(TypeError, add, 'ank', 6)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCuboid(TestCase):
def test_volume(self):
self.assertAlmostEqual(cuboid_volume(2), 8)
self.assertAlmostEqual(cuboid_volume(1), 1)
self.assertAlmostEqual(cuboid_volume(0), 0)
def test_input_value(self):
self.assertRaises(TypeError, cuboid_volume, 'ank')
def test_addition(self):
self.assertEqual(add(3, 4), 7)
self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)
def test_addition_input_value(self):
self.assertRaises(TypeError, add, 'ank', 6)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestCuboid(TestCase):
def test_volume(self):
self.assertAlmostEqual(cuboid_volume(2), 8)
self.assertAlmostEqual(cuboid_volume(1), 1)
self.assertAlmostEqual(cuboid_volume(0), 0)
def test_input_value(self):
self.assertRaises(TypeError, cuboid_volume, 'ank')
def test_addition(self):
self.assertEqual(add(3, 4), 7)
self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)
def test_addition_input_value(self):
self.assertRaises(TypeError, add, 'ank', 6)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from unittest import TestCase, main
from cuboid_volume import *
class TestCuboid(TestCase):
def test_volume(self):
self.assertAlmostEqual(cuboid_volume(2), 8)
self.assertAlmostEqual(cuboid_volume(1), 1)
self.assertAlmostEqual(cuboid_volume(0), 0)
def test_input_value(self):
self.assertRaises(TypeError, cuboid_volume, 'ank')
def test_addition(self):
self.assertEqual(add(3, 4), 7)
self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)
def test_addition_input_value(self):
self.assertRaises(TypeError, add, 'ank', 6)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
"""
Created on Fri Jan 07 20:53:58 2022
@author: Ankit Bharti
"""
from unittest import TestCase, main
from cuboid_volume import *
class TestCuboid(TestCase):
def test_volume(self):
self.assertAlmostEqual(cuboid_volume(2), 8)
self.assertAlmostEqual(cuboid_volume(1), 1)
self.assertAlmostEqual(cuboid_volume(0), 0)
def test_input_value(self):
self.assertRaises(TypeError, cuboid_volume, 'ank')
def test_addition(self):
self.assertEqual(add(3, 4), 7)
self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)
def test_addition_input_value(self):
self.assertRaises(TypeError, add, 'ank', 6)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "394f835064d070a30040b6f01b25b6f0e005827d",
"index": 5010,
"step-1": "<mask token>\n\n\nclass TestCuboid(TestCase):\n <mask token>\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n <mask token>\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCuboid(TestCase):\n\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCuboid(TestCase):\n\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom unittest import TestCase, main\nfrom cuboid_volume import *\n\n\nclass TestCuboid(TestCase):\n\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nCreated on Fri Jan 07 20:53:58 2022\n@author: Ankit Bharti\n\n\"\"\"\n\n\nfrom unittest import TestCase, main\nfrom cuboid_volume import *\n\n\nclass TestCuboid(TestCase):\n def test_volume(self):\n self.assertAlmostEqual(cuboid_volume(2), 8)\n self.assertAlmostEqual(cuboid_volume(1), 1)\n self.assertAlmostEqual(cuboid_volume(0), 0)\n\n def test_input_value(self):\n self.assertRaises(TypeError, cuboid_volume, 'ank')\n\n def test_addition(self):\n self.assertEqual(add(3, 4), 7)\n self.assertAlmostEqual(add(4.5, 6.2), 10.701, places=2)\n\n def test_addition_input_value(self):\n self.assertRaises(TypeError, add, 'ank', 6)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def increment():
global time
time = time + 1
def start():
timer.start()
def stop():
global correct, tries
timer.stop()
if time != 0:
tries = tries + 1
if time % 10 == 0:
correct = correct + 1
def reset():
global time, correct, tries
time, correct, tries = 0, 0, 0
stop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def increment():
global time
time = time + 1
def start():
timer.start()
def stop():
global correct, tries
timer.stop()
if time != 0:
tries = tries + 1
if time % 10 == 0:
correct = correct + 1
def reset():
global time, correct, tries
time, correct, tries = 0, 0, 0
stop()
def draw(canvas):
format()
canvas.draw_text(str(correct), (253, 30), 30, 'white')
canvas.draw_text('/', (270, 30), 30, 'white')
canvas.draw_text(str(tries), (280, 30), 30, 'white')
canvas.draw_text(watch, (70, 130), 60, 'white')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def format():
global time, watch
t = time
deciseconds = t % 10
remains = t - deciseconds
seconds = remains % 600 / 10
minutes = remains / 600
if seconds < 10:
zero = '0'
else:
zero = ''
watch = str(minutes) + ':' + zero + str(seconds) + '.' + str(deciseconds)
def increment():
global time
time = time + 1
def start():
timer.start()
def stop():
global correct, tries
timer.stop()
if time != 0:
tries = tries + 1
if time % 10 == 0:
correct = correct + 1
def reset():
global time, correct, tries
time, correct, tries = 0, 0, 0
stop()
def draw(canvas):
format()
canvas.draw_text(str(correct), (253, 30), 30, 'white')
canvas.draw_text('/', (270, 30), 30, 'white')
canvas.draw_text(str(tries), (280, 30), 30, 'white')
canvas.draw_text(watch, (70, 130), 60, 'white')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
time = 0
watch = ''
tries = 0
correct = 0
def format():
global time, watch
t = time
deciseconds = t % 10
remains = t - deciseconds
seconds = remains % 600 / 10
minutes = remains / 600
if seconds < 10:
zero = '0'
else:
zero = ''
watch = str(minutes) + ':' + zero + str(seconds) + '.' + str(deciseconds)
def increment():
global time
time = time + 1
def start():
timer.start()
def stop():
global correct, tries
timer.stop()
if time != 0:
tries = tries + 1
if time % 10 == 0:
correct = correct + 1
def reset():
global time, correct, tries
time, correct, tries = 0, 0, 0
stop()
def draw(canvas):
format()
canvas.draw_text(str(correct), (253, 30), 30, 'white')
canvas.draw_text('/', (270, 30), 30, 'white')
canvas.draw_text(str(tries), (280, 30), 30, 'white')
canvas.draw_text(watch, (70, 130), 60, 'white')
frame = simplegui.create_frame('StOpWaTcH: gAmE', 320, 200)
button1 = frame.add_button('Start timer', start, 100)
button2 = frame.add_button('Stop timer', stop, 100)
button3 = frame.add_button('Resrt timer', reset, 100)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(100, increment)
frame.start()
<|reserved_special_token_1|>
#game that has a timer and you need to stop the timer
#with 0 at the end.
import simplegui
#necessary global variables
#time for the timer
time = 0
#the display for the timer(string form)
watch = ''
#tries and correct presses
tries = 0
correct = 0
#changes time to watch(number to string of form A:BC.D)
def format():
global time, watch
t = time
deciseconds = t % 10
remains = t - deciseconds
seconds = (remains % 600) / 10
minutes = remains / 600
if seconds<10:
zero = '0'
else:
zero = ''
watch = str(minutes) + ":" + zero + str(seconds) + "." + str(deciseconds)
#increase the time
def increment():
global time
time = time + 1
#start the timer
def start():
timer.start()
#stop the timer + claculate the tries and correct stops
def stop():
global correct, tries
timer.stop()
if time != 0:
tries = tries + 1
if time % 10 == 0:
correct = correct + 1
#reset all values
def reset():
global time, correct, tries
time, correct, tries = 0,0,0
stop()
#necessary drawings
def draw(canvas):
format()
canvas.draw_text(str(correct), (253, 30), 30, 'white')
canvas.draw_text('/', (270, 30), 30, 'white')
canvas.draw_text(str(tries), (280, 30), 30, 'white')
canvas.draw_text(watch, (70, 130), 60,'white')
#frame and event handlers
frame = simplegui.create_frame("StOpWaTcH: gAmE", 320, 200)
button1 = frame.add_button("Start timer", start, 100)
button2 = frame.add_button("Stop timer", stop, 100)
button3 = frame.add_button("Resrt timer", reset, 100)
frame.set_draw_handler(draw)
timer = simplegui.create_timer(100, increment)
#start of the game
frame.start()
|
flexible
|
{
"blob_id": "b3c22b4a453aa55da980b090df2749ff9f1066e6",
"index": 5932,
"step-1": "<mask token>\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\ndef draw(canvas):\n format()\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\n canvas.draw_text('/', (270, 30), 30, 'white')\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\n canvas.draw_text(watch, (70, 130), 60, 'white')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef format():\n global time, watch\n t = time\n deciseconds = t % 10\n remains = t - deciseconds\n seconds = remains % 600 / 10\n minutes = remains / 600\n if seconds < 10:\n zero = '0'\n else:\n zero = ''\n watch = str(minutes) + ':' + zero + str(seconds) + '.' + str(deciseconds)\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\ndef draw(canvas):\n format()\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\n canvas.draw_text('/', (270, 30), 30, 'white')\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\n canvas.draw_text(watch, (70, 130), 60, 'white')\n\n\n<mask token>\n",
"step-4": "<mask token>\ntime = 0\nwatch = ''\ntries = 0\ncorrect = 0\n\n\ndef format():\n global time, watch\n t = time\n deciseconds = t % 10\n remains = t - deciseconds\n seconds = remains % 600 / 10\n minutes = remains / 600\n if seconds < 10:\n zero = '0'\n else:\n zero = ''\n watch = str(minutes) + ':' + zero + str(seconds) + '.' + str(deciseconds)\n\n\ndef increment():\n global time\n time = time + 1\n\n\ndef start():\n timer.start()\n\n\ndef stop():\n global correct, tries\n timer.stop()\n if time != 0:\n tries = tries + 1\n if time % 10 == 0:\n correct = correct + 1\n\n\ndef reset():\n global time, correct, tries\n time, correct, tries = 0, 0, 0\n stop()\n\n\ndef draw(canvas):\n format()\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\n canvas.draw_text('/', (270, 30), 30, 'white')\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\n canvas.draw_text(watch, (70, 130), 60, 'white')\n\n\nframe = simplegui.create_frame('StOpWaTcH: gAmE', 320, 200)\nbutton1 = frame.add_button('Start timer', start, 100)\nbutton2 = frame.add_button('Stop timer', stop, 100)\nbutton3 = frame.add_button('Resrt timer', reset, 100)\nframe.set_draw_handler(draw)\ntimer = simplegui.create_timer(100, increment)\nframe.start()\n",
"step-5": "#game that has a timer and you need to stop the timer\r\n#with 0 at the end.\r\n\r\nimport simplegui\r\n\r\n#necessary global variables\r\n\r\n#time for the timer\r\ntime = 0\r\n#the display for the timer(string form)\r\nwatch = ''\r\n#tries and correct presses\r\ntries = 0\r\ncorrect = 0\r\n\r\n\r\n#changes time to watch(number to string of form A:BC.D)\r\ndef format():\r\n global time, watch\r\n t = time\r\n deciseconds = t % 10\r\n remains = t - deciseconds\r\n seconds = (remains % 600) / 10\r\n minutes = remains / 600\r\n if seconds<10:\r\n zero = '0'\r\n else:\r\n zero = '' \r\n watch = str(minutes) + \":\" + zero + str(seconds) + \".\" + str(deciseconds)\r\n \r\n\r\n#increase the time \r\ndef increment():\r\n global time\r\n time = time + 1 \r\n \r\n \r\n#start the timer \r\ndef start():\r\n timer.start()\r\n \r\n\r\n#stop the timer + claculate the tries and correct stops\r\ndef stop():\r\n global correct, tries\r\n timer.stop()\r\n if time != 0:\r\n tries = tries + 1\r\n if time % 10 == 0:\r\n correct = correct + 1\r\n\r\n\r\n#reset all values \r\ndef reset():\r\n global time, correct, tries\r\n time, correct, tries = 0,0,0\r\n stop() \r\n\r\n\r\n#necessary drawings \r\ndef draw(canvas):\r\n format()\r\n canvas.draw_text(str(correct), (253, 30), 30, 'white')\r\n canvas.draw_text('/', (270, 30), 30, 'white') \r\n canvas.draw_text(str(tries), (280, 30), 30, 'white')\r\n canvas.draw_text(watch, (70, 130), 60,'white')\r\n \r\n\r\n#frame and event handlers\r\nframe = simplegui.create_frame(\"StOpWaTcH: gAmE\", 320, 200)\r\nbutton1 = frame.add_button(\"Start timer\", start, 100)\r\nbutton2 = frame.add_button(\"Stop timer\", stop, 100)\r\nbutton3 = frame.add_button(\"Resrt timer\", reset, 100)\r\nframe.set_draw_handler(draw)\r\ntimer = simplegui.create_timer(100, increment)\r\n\r\n\r\n#start of the game\r\nframe.start()\r\n",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
vozrast=int(input("сколько вам лет?"))
print ("через 10 лет вам бóдет", vozrast+10)
|
normal
|
{
"blob_id": "8e3f23733235d73fab14e80ee0a3706ae351c7a2",
"index": 4525,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('через 10 лет вам бóдет', vozrast + 10)\n",
"step-3": "vozrast = int(input('сколько вам лет?'))\nprint('через 10 лет вам бóдет', vozrast + 10)\n",
"step-4": "vozrast=int(input(\"сколько вам лет?\"))\nprint (\"через 10 лет вам бóдет\", vozrast+10)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def del_ops3(str1, str2):
common1 = [x for x in str1 if x in str2]
common2 = [x for x in str2 if x in str1]
if len(common2) < len(common1):
common1, common2 = common2, common1
if len(common1) == 0 or len(common2) == 0:
total = len(str1) + len(str2)
elif (len(common1) == 1 or len(common2) == 1) or len(common1) == 2 and len(
common2) == 2 and common1 != common2:
total = len(str1) - 1 + (len(str2) - 1)
else:
refs = defaultdict(list)
for i, letter in enumerate(common2):
refs[letter].append(i)
substring = []
previous = min(refs[common1[0]])
for i, letter in enumerate(common1):
if any([(i > previous) for i in refs[letter]]) and all([(i !=
previous) for i in refs[letter]]):
if all([(hash(x) != hash(common2[previous])) for x in
substring]):
substring.append(common2[previous])
substring.append(letter)
previous = min([x for x in refs[letter] if x >= previous])
elif all(refs[letter]) < previous:
previous = min([x for x in refs[letter]])
print(i, previous, letter, substring)
total = len(str1) - len(substring) + (len(str2) - len(substring))
return ''.join(substring)
<|reserved_special_token_1|>
def del_ops3(str1, str2):
# find all common letters in both strings
common1 = [x for x in str1 if x in str2]
common2 = [x for x in str2 if x in str1]
if len(common2) < len(common1):
common1, common2 = common2, common1
# find total of strings with 0, 1, or 2 characters, (2 chars - only if c1 != c2)
if len(common1) == 0 or len(common2) == 0:
total = len(str1) + len(str2)
elif (len(common1) == 1 or len(common2) == 1) or (len(common1) == 2 and len(common2) == 2 and common1 != common2):
total = (len(str1) - 1) + (len(str2) - 1)
# else, if 2 characters in c1, c2 and c1 != c2 or > 2 characters in c1, c2
else:
# create references to c2 indexes of each letter in c1
refs = defaultdict(list)
for i, letter in enumerate(common2):
refs[letter].append(i)
# find all letters that follow each other (same order) in both strings
substring = [] # substring == all common letters in same sequence in both strings
previous = min(refs[common1[0]])
for i, letter in enumerate(common1):
# if any c2 index of the current letter in c1 is > the c2 index of previous letter:
# the current letter follows the previous letter in both c1 and c2
if any([i > previous for i in refs[letter]]) and all([i != previous for i in refs[letter]]):
# if the same letter at the same index is not already in substring:
if all([hash(x) != hash(common2[previous]) for x in substring]):
substring.append(common2[previous])
substring.append(letter)
previous = min([x for x in refs[letter] if x >= previous])
# next iteration of previous is always == the smallest index
# of the current letter that is >= current iteration of previous
# (always > previous if not first iteration in c1)
# indexes are never repeated or skipped
# elif the letter does not follow the same letter in both strings:
# previous = smallest c2 index of letter that broke sequence/did not follow in both strings
elif all(refs[letter]) < previous:
previous = min([x for x in refs[letter]])
print(i, previous, letter, substring)
# total == total of all letters - (number of letters in substring * 2)
total = (len(str1) - len(substring)) + (len(str2) - len(substring))
return "".join(substring)
|
flexible
|
{
"blob_id": "f9d1013fa278b9078e603b012abbdde0be2e0962",
"index": 7926,
"step-1": "<mask token>\n",
"step-2": "def del_ops3(str1, str2):\n common1 = [x for x in str1 if x in str2]\n common2 = [x for x in str2 if x in str1]\n if len(common2) < len(common1):\n common1, common2 = common2, common1\n if len(common1) == 0 or len(common2) == 0:\n total = len(str1) + len(str2)\n elif (len(common1) == 1 or len(common2) == 1) or len(common1) == 2 and len(\n common2) == 2 and common1 != common2:\n total = len(str1) - 1 + (len(str2) - 1)\n else:\n refs = defaultdict(list)\n for i, letter in enumerate(common2):\n refs[letter].append(i)\n substring = []\n previous = min(refs[common1[0]])\n for i, letter in enumerate(common1):\n if any([(i > previous) for i in refs[letter]]) and all([(i !=\n previous) for i in refs[letter]]):\n if all([(hash(x) != hash(common2[previous])) for x in\n substring]):\n substring.append(common2[previous])\n substring.append(letter)\n previous = min([x for x in refs[letter] if x >= previous])\n elif all(refs[letter]) < previous:\n previous = min([x for x in refs[letter]])\n print(i, previous, letter, substring)\n total = len(str1) - len(substring) + (len(str2) - len(substring))\n return ''.join(substring)\n",
"step-3": "def del_ops3(str1, str2):\n\n # find all common letters in both strings\n common1 = [x for x in str1 if x in str2]\n common2 = [x for x in str2 if x in str1]\n if len(common2) < len(common1):\n common1, common2 = common2, common1\n\n # find total of strings with 0, 1, or 2 characters, (2 chars - only if c1 != c2)\n if len(common1) == 0 or len(common2) == 0:\n total = len(str1) + len(str2)\n elif (len(common1) == 1 or len(common2) == 1) or (len(common1) == 2 and len(common2) == 2 and common1 != common2):\n total = (len(str1) - 1) + (len(str2) - 1)\n\n # else, if 2 characters in c1, c2 and c1 != c2 or > 2 characters in c1, c2\n else:\n\n # create references to c2 indexes of each letter in c1\n refs = defaultdict(list)\n for i, letter in enumerate(common2):\n refs[letter].append(i)\n\n # find all letters that follow each other (same order) in both strings\n substring = [] # substring == all common letters in same sequence in both strings\n previous = min(refs[common1[0]])\n for i, letter in enumerate(common1):\n\n # if any c2 index of the current letter in c1 is > the c2 index of previous letter:\n # the current letter follows the previous letter in both c1 and c2\n if any([i > previous for i in refs[letter]]) and all([i != previous for i in refs[letter]]):\n\n # if the same letter at the same index is not already in substring:\n if all([hash(x) != hash(common2[previous]) for x in substring]):\n substring.append(common2[previous])\n\n substring.append(letter)\n previous = min([x for x in refs[letter] if x >= previous])\n # next iteration of previous is always == the smallest index\n # of the current letter that is >= current iteration of previous\n # (always > previous if not first iteration in c1)\n # indexes are never repeated or skipped\n\n # elif the letter does not follow the same letter in both strings:\n # previous = smallest c2 index of letter that broke sequence/did not follow in both strings\n elif all(refs[letter]) < previous:\n previous = min([x for x in refs[letter]])\n print(i, previous, letter, substring)\n # total == total of all letters - (number of letters in substring * 2)\n total = (len(str1) - len(substring)) + (len(str2) - len(substring))\n\n return \"\".join(substring)\n \n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class AetNode(object):
def __init__(self, x, tx, my):
self.x = x
self.tx = tx
self.my = my
def op(self):
return self.x
class AetList(object):
def __init__(self, y):
self.y = y
self.numy = 0
self.l = []
pass
<|reserved_special_token_0|>
def createNewEdgeTable(point):
miny, maxy = findRange(point)
Y = []
for i in point:
Y.append(i[1])
Y = set(Y)
Y = list(Y)
newEdgeList = []
y = miny
while y <= maxy:
if y in Y:
print(y)
templist = []
for i in range(0, 6):
if point[i][1] == y:
templist.append(i)
print(templist)
print('一次创建新边表')
lists = AetList(y)
for temp in templist:
index1 = (temp + 7) % 6
index2 = (temp + 5) % 6
print(point[temp][0], point[temp][1])
print(point[index1][0], point[index1][1])
print(point[index2][0], point[index2][1])
print('+++++++++++++++++++++')
if point[index1][1] > y:
lists.numy += 1
if point[index1][1] - point[temp][1] == 0:
node = AetNode(point[temp][0], 0, point[index1][1])
else:
node = AetNode(point[temp][0], (point[index1][0] -
point[temp][0]) / (point[index1][1] - point[
temp][1]), point[index1][1])
lists.l.append(node)
if point[index2][1] > y:
lists.numy += 1
if point[index2][1] - point[temp][1] == 0:
node = AetNode(point[temp][0], 0, point[index2][1])
else:
node = AetNode(point[temp][0], (point[index2][0] -
point[temp][0]) / (point[index2][1] - point[
temp][1]), point[index2][1])
lists.l.append(node)
if len(lists.l) != 0:
lists.l.sort(key=AetNode.op)
if len(templist) > 1:
lists.numy -= 1
newEdgeList.append(lists)
y += 1
printNewEegeList(newEdgeList)
return newEdgeList, Y
def draw(x1, y1, x, y):
turtle.penup()
turtle.goto(x1, y1)
turtle.pendown()
turtle.goto(x, y)
def run():
turtle.screensize(1920, 1080)
turtle.penup()
turtle.hideturtle()
point = []
temp = [float(x11.get()), float(y1.get())]
point.append(temp)
temp = [float(x2.get()), float(y2.get())]
point.append(temp)
temp = [float(x3.get()), float(y3.get())]
point.append(temp)
temp = [float(x4.get()), float(y4.get())]
point.append(temp)
temp = [float(x5.get()), float(y5.get())]
point.append(temp)
temp = [float(x6.get()), float(y6.get())]
point.append(temp)
point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]
for i in point:
turtle.goto(i[0], i[1])
turtle.pendown()
turtle.goto(point[0][0], point[0][1])
newEdgeTable, Y = createNewEdgeTable(point)
miny, maxy = findRange(point)
y = miny
acativeList = []
while y <= maxy:
ynum = 0
if y in Y:
for i in newEdgeTable:
if i.y == y:
for j in i.l:
acativeList.append(j)
ynum = i.numy
break
acativeList.sort(key=AetNode.op)
for j in acativeList:
print((j.x, j.tx, j.my))
print('****************')
i = 0
flag = True
while i < len(acativeList) - 1:
x1 = acativeList[i].x
temp = [acativeList[i + 1].x, y]
if temp in point and ynum >= 1:
ynum -= 1
else:
i += 1
if flag:
draw(x1, y, temp[0], y)
flag = not flag
newacativeList = []
for i in acativeList:
if i.my > y:
i.x += i.tx
newacativeList.append(i)
acativeList = newacativeList
y += 1
turtle.mainloop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AetNode(object):
def __init__(self, x, tx, my):
self.x = x
self.tx = tx
self.my = my
def op(self):
return self.x
class AetList(object):
def __init__(self, y):
self.y = y
self.numy = 0
self.l = []
pass
def findRange(point):
maxy = point[0][1]
miny = point[0][1]
for i in point:
if maxy < i[1]:
maxy = i[1]
if miny > i[1]:
miny = i[1]
return miny, maxy
def printNewEegeList(newEdgeTable):
print('新边表是:')
for i in newEdgeTable:
print(i.y)
for j in i.l:
print((j.x, j.tx, j.my))
print('__________________________________')
def createNewEdgeTable(point):
miny, maxy = findRange(point)
Y = []
for i in point:
Y.append(i[1])
Y = set(Y)
Y = list(Y)
newEdgeList = []
y = miny
while y <= maxy:
if y in Y:
print(y)
templist = []
for i in range(0, 6):
if point[i][1] == y:
templist.append(i)
print(templist)
print('一次创建新边表')
lists = AetList(y)
for temp in templist:
index1 = (temp + 7) % 6
index2 = (temp + 5) % 6
print(point[temp][0], point[temp][1])
print(point[index1][0], point[index1][1])
print(point[index2][0], point[index2][1])
print('+++++++++++++++++++++')
if point[index1][1] > y:
lists.numy += 1
if point[index1][1] - point[temp][1] == 0:
node = AetNode(point[temp][0], 0, point[index1][1])
else:
node = AetNode(point[temp][0], (point[index1][0] -
point[temp][0]) / (point[index1][1] - point[
temp][1]), point[index1][1])
lists.l.append(node)
if point[index2][1] > y:
lists.numy += 1
if point[index2][1] - point[temp][1] == 0:
node = AetNode(point[temp][0], 0, point[index2][1])
else:
node = AetNode(point[temp][0], (point[index2][0] -
point[temp][0]) / (point[index2][1] - point[
temp][1]), point[index2][1])
lists.l.append(node)
if len(lists.l) != 0:
lists.l.sort(key=AetNode.op)
if len(templist) > 1:
lists.numy -= 1
newEdgeList.append(lists)
y += 1
printNewEegeList(newEdgeList)
return newEdgeList, Y
def draw(x1, y1, x, y):
turtle.penup()
turtle.goto(x1, y1)
turtle.pendown()
turtle.goto(x, y)
def run():
turtle.screensize(1920, 1080)
turtle.penup()
turtle.hideturtle()
point = []
temp = [float(x11.get()), float(y1.get())]
point.append(temp)
temp = [float(x2.get()), float(y2.get())]
point.append(temp)
temp = [float(x3.get()), float(y3.get())]
point.append(temp)
temp = [float(x4.get()), float(y4.get())]
point.append(temp)
temp = [float(x5.get()), float(y5.get())]
point.append(temp)
temp = [float(x6.get()), float(y6.get())]
point.append(temp)
point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]
for i in point:
turtle.goto(i[0], i[1])
turtle.pendown()
turtle.goto(point[0][0], point[0][1])
newEdgeTable, Y = createNewEdgeTable(point)
miny, maxy = findRange(point)
y = miny
acativeList = []
while y <= maxy:
ynum = 0
if y in Y:
for i in newEdgeTable:
if i.y == y:
for j in i.l:
acativeList.append(j)
ynum = i.numy
break
acativeList.sort(key=AetNode.op)
for j in acativeList:
print((j.x, j.tx, j.my))
print('****************')
i = 0
flag = True
while i < len(acativeList) - 1:
x1 = acativeList[i].x
temp = [acativeList[i + 1].x, y]
if temp in point and ynum >= 1:
ynum -= 1
else:
i += 1
if flag:
draw(x1, y, temp[0], y)
flag = not flag
newacativeList = []
for i in acativeList:
if i.my > y:
i.x += i.tx
newacativeList.append(i)
acativeList = newacativeList
y += 1
turtle.mainloop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AetNode(object):
def __init__(self, x, tx, my):
self.x = x
self.tx = tx
self.my = my
def op(self):
return self.x
class AetList(object):
def __init__(self, y):
self.y = y
self.numy = 0
self.l = []
pass
def findRange(point):
maxy = point[0][1]
miny = point[0][1]
for i in point:
if maxy < i[1]:
maxy = i[1]
if miny > i[1]:
miny = i[1]
return miny, maxy
def printNewEegeList(newEdgeTable):
print('新边表是:')
for i in newEdgeTable:
print(i.y)
for j in i.l:
print((j.x, j.tx, j.my))
print('__________________________________')
def createNewEdgeTable(point):
miny, maxy = findRange(point)
Y = []
for i in point:
Y.append(i[1])
Y = set(Y)
Y = list(Y)
newEdgeList = []
y = miny
while y <= maxy:
if y in Y:
print(y)
templist = []
for i in range(0, 6):
if point[i][1] == y:
templist.append(i)
print(templist)
print('一次创建新边表')
lists = AetList(y)
for temp in templist:
index1 = (temp + 7) % 6
index2 = (temp + 5) % 6
print(point[temp][0], point[temp][1])
print(point[index1][0], point[index1][1])
print(point[index2][0], point[index2][1])
print('+++++++++++++++++++++')
if point[index1][1] > y:
lists.numy += 1
if point[index1][1] - point[temp][1] == 0:
node = AetNode(point[temp][0], 0, point[index1][1])
else:
node = AetNode(point[temp][0], (point[index1][0] -
point[temp][0]) / (point[index1][1] - point[
temp][1]), point[index1][1])
lists.l.append(node)
if point[index2][1] > y:
lists.numy += 1
if point[index2][1] - point[temp][1] == 0:
node = AetNode(point[temp][0], 0, point[index2][1])
else:
node = AetNode(point[temp][0], (point[index2][0] -
point[temp][0]) / (point[index2][1] - point[
temp][1]), point[index2][1])
lists.l.append(node)
if len(lists.l) != 0:
lists.l.sort(key=AetNode.op)
if len(templist) > 1:
lists.numy -= 1
newEdgeList.append(lists)
y += 1
printNewEegeList(newEdgeList)
return newEdgeList, Y
def draw(x1, y1, x, y):
turtle.penup()
turtle.goto(x1, y1)
turtle.pendown()
turtle.goto(x, y)
def run():
turtle.screensize(1920, 1080)
turtle.penup()
turtle.hideturtle()
point = []
temp = [float(x11.get()), float(y1.get())]
point.append(temp)
temp = [float(x2.get()), float(y2.get())]
point.append(temp)
temp = [float(x3.get()), float(y3.get())]
point.append(temp)
temp = [float(x4.get()), float(y4.get())]
point.append(temp)
temp = [float(x5.get()), float(y5.get())]
point.append(temp)
temp = [float(x6.get()), float(y6.get())]
point.append(temp)
point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]
for i in point:
turtle.goto(i[0], i[1])
turtle.pendown()
turtle.goto(point[0][0], point[0][1])
newEdgeTable, Y = createNewEdgeTable(point)
miny, maxy = findRange(point)
y = miny
acativeList = []
while y <= maxy:
ynum = 0
if y in Y:
for i in newEdgeTable:
if i.y == y:
for j in i.l:
acativeList.append(j)
ynum = i.numy
break
acativeList.sort(key=AetNode.op)
for j in acativeList:
print((j.x, j.tx, j.my))
print('****************')
i = 0
flag = True
while i < len(acativeList) - 1:
x1 = acativeList[i].x
temp = [acativeList[i + 1].x, y]
if temp in point and ynum >= 1:
ynum -= 1
else:
i += 1
if flag:
draw(x1, y, temp[0], y)
flag = not flag
newacativeList = []
for i in acativeList:
if i.my > y:
i.x += i.tx
newacativeList.append(i)
acativeList = newacativeList
y += 1
turtle.mainloop()
<|reserved_special_token_0|>
tk.title('扫描填充算法:by 高谦')
Label(tk, text='输入顶点:').grid(row=0)
Label(tk, text='1:').grid(row=1)
Label(tk, text='2:').grid(row=2)
Label(tk, text='3:').grid(row=3)
Label(tk, text='4:').grid(row=4)
Label(tk, text='5:').grid(row=5)
Label(tk, text='6:').grid(row=6)
Label(tk, text='例:\n\n').grid(row=9)
Label(tk, text="""(20,20),(50,10)
(110,30),(110,80)
(50,50),(20,70)""").grid(
row=9, column=1)
Label(tk, text="""(-10,-10),(10,-10)
(15,0),(10,10)
(-10,10),(-15,0)""").grid(
row=9, column=2)
<|reserved_special_token_0|>
x11.grid(row=1, column=1)
x2.grid(row=2, column=1)
x3.grid(row=3, column=1)
x4.grid(row=4, column=1)
x5.grid(row=5, column=1)
x6.grid(row=6, column=1)
<|reserved_special_token_0|>
y1.grid(row=1, column=2, padx=5, pady=5)
y2.grid(row=2, column=2, padx=5, pady=5)
y3.grid(row=3, column=2, padx=5, pady=5)
y4.grid(row=4, column=2, padx=5, pady=5)
y5.grid(row=5, column=2, padx=5, pady=5)
y6.grid(row=6, column=2, padx=5, pady=5)
Button(tk, text='扫描填充', width=10, command=run).grid(row=7, column=1)
Button(tk, text='退出程序', width=10, command=tk.quit).grid(row=7, column=2)
tk.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AetNode(object):
def __init__(self, x, tx, my):
self.x = x
self.tx = tx
self.my = my
def op(self):
return self.x
class AetList(object):
def __init__(self, y):
self.y = y
self.numy = 0
self.l = []
pass
def findRange(point):
maxy = point[0][1]
miny = point[0][1]
for i in point:
if maxy < i[1]:
maxy = i[1]
if miny > i[1]:
miny = i[1]
return miny, maxy
def printNewEegeList(newEdgeTable):
print('新边表是:')
for i in newEdgeTable:
print(i.y)
for j in i.l:
print((j.x, j.tx, j.my))
print('__________________________________')
def createNewEdgeTable(point):
miny, maxy = findRange(point)
Y = []
for i in point:
Y.append(i[1])
Y = set(Y)
Y = list(Y)
newEdgeList = []
y = miny
while y <= maxy:
if y in Y:
print(y)
templist = []
for i in range(0, 6):
if point[i][1] == y:
templist.append(i)
print(templist)
print('一次创建新边表')
lists = AetList(y)
for temp in templist:
index1 = (temp + 7) % 6
index2 = (temp + 5) % 6
print(point[temp][0], point[temp][1])
print(point[index1][0], point[index1][1])
print(point[index2][0], point[index2][1])
print('+++++++++++++++++++++')
if point[index1][1] > y:
lists.numy += 1
if point[index1][1] - point[temp][1] == 0:
node = AetNode(point[temp][0], 0, point[index1][1])
else:
node = AetNode(point[temp][0], (point[index1][0] -
point[temp][0]) / (point[index1][1] - point[
temp][1]), point[index1][1])
lists.l.append(node)
if point[index2][1] > y:
lists.numy += 1
if point[index2][1] - point[temp][1] == 0:
node = AetNode(point[temp][0], 0, point[index2][1])
else:
node = AetNode(point[temp][0], (point[index2][0] -
point[temp][0]) / (point[index2][1] - point[
temp][1]), point[index2][1])
lists.l.append(node)
if len(lists.l) != 0:
lists.l.sort(key=AetNode.op)
if len(templist) > 1:
lists.numy -= 1
newEdgeList.append(lists)
y += 1
printNewEegeList(newEdgeList)
return newEdgeList, Y
def draw(x1, y1, x, y):
turtle.penup()
turtle.goto(x1, y1)
turtle.pendown()
turtle.goto(x, y)
def run():
turtle.screensize(1920, 1080)
turtle.penup()
turtle.hideturtle()
point = []
temp = [float(x11.get()), float(y1.get())]
point.append(temp)
temp = [float(x2.get()), float(y2.get())]
point.append(temp)
temp = [float(x3.get()), float(y3.get())]
point.append(temp)
temp = [float(x4.get()), float(y4.get())]
point.append(temp)
temp = [float(x5.get()), float(y5.get())]
point.append(temp)
temp = [float(x6.get()), float(y6.get())]
point.append(temp)
point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]
for i in point:
turtle.goto(i[0], i[1])
turtle.pendown()
turtle.goto(point[0][0], point[0][1])
newEdgeTable, Y = createNewEdgeTable(point)
miny, maxy = findRange(point)
y = miny
acativeList = []
while y <= maxy:
ynum = 0
if y in Y:
for i in newEdgeTable:
if i.y == y:
for j in i.l:
acativeList.append(j)
ynum = i.numy
break
acativeList.sort(key=AetNode.op)
for j in acativeList:
print((j.x, j.tx, j.my))
print('****************')
i = 0
flag = True
while i < len(acativeList) - 1:
x1 = acativeList[i].x
temp = [acativeList[i + 1].x, y]
if temp in point and ynum >= 1:
ynum -= 1
else:
i += 1
if flag:
draw(x1, y, temp[0], y)
flag = not flag
newacativeList = []
for i in acativeList:
if i.my > y:
i.x += i.tx
newacativeList.append(i)
acativeList = newacativeList
y += 1
turtle.mainloop()
tk = Tk()
tk.title('扫描填充算法:by 高谦')
Label(tk, text='输入顶点:').grid(row=0)
Label(tk, text='1:').grid(row=1)
Label(tk, text='2:').grid(row=2)
Label(tk, text='3:').grid(row=3)
Label(tk, text='4:').grid(row=4)
Label(tk, text='5:').grid(row=5)
Label(tk, text='6:').grid(row=6)
Label(tk, text='例:\n\n').grid(row=9)
Label(tk, text="""(20,20),(50,10)
(110,30),(110,80)
(50,50),(20,70)""").grid(
row=9, column=1)
Label(tk, text="""(-10,-10),(10,-10)
(15,0),(10,10)
(-10,10),(-15,0)""").grid(
row=9, column=2)
x11 = Entry(tk)
x2 = Entry(tk)
x3 = Entry(tk)
x4 = Entry(tk)
x5 = Entry(tk)
x6 = Entry(tk)
x11.grid(row=1, column=1)
x2.grid(row=2, column=1)
x3.grid(row=3, column=1)
x4.grid(row=4, column=1)
x5.grid(row=5, column=1)
x6.grid(row=6, column=1)
y1 = Entry(tk)
y2 = Entry(tk)
y3 = Entry(tk)
y4 = Entry(tk)
y5 = Entry(tk)
y6 = Entry(tk)
y1.grid(row=1, column=2, padx=5, pady=5)
y2.grid(row=2, column=2, padx=5, pady=5)
y3.grid(row=3, column=2, padx=5, pady=5)
y4.grid(row=4, column=2, padx=5, pady=5)
y5.grid(row=5, column=2, padx=5, pady=5)
y6.grid(row=6, column=2, padx=5, pady=5)
Button(tk, text='扫描填充', width=10, command=run).grid(row=7, column=1)
Button(tk, text='退出程序', width=10, command=tk.quit).grid(row=7, column=2)
tk.mainloop()
<|reserved_special_token_1|>
import turtle
import math
from tkinter import *
#活性边表节点:
class AetNode(object):
def __init__(self,x,tx,my):
self.x=x
self.tx=tx
self.my=my
def op(self):
return self.x
class AetList(object):
def __init__(self,y):
self.y=y
self.numy=0
self.l=[]
pass
def findRange(point):
# 找到最大y和最小y:
maxy = point[0][1]
miny = point[0][1]
for i in point:
if maxy < i[1]:
maxy = i[1]
if miny > i[1]:
miny = i[1]
return (miny,maxy)
def printNewEegeList(newEdgeTable):
print("新边表是:")
for i in newEdgeTable:
print(i.y)
for j in i.l:
print((j.x,j.tx,j.my))
print("__________________________________")
def createNewEdgeTable(point):
miny,maxy=findRange(point)
# 找打所有y的顶点:
Y = []
for i in point:
Y.append(i[1])
Y = set(Y)
Y = list(Y)
# 创建新边表:
newEdgeList = []
y=miny
while y <=maxy:
if y in Y:
# 找到所有的X值:
print(y)
templist=[]
for i in range(0, 6):
if point[i][1] == y:
templist.append(i)
print(templist)
print("一次创建新边表")
lists = AetList(y)
for temp in templist:
index1 = (temp + 7) % 6
index2 = (temp + 5) % 6
print(point[temp][0],point[temp][1])
print(point[index1][0],point[index1][1])
print(point[index2][0],point[index2][1])
print("+++++++++++++++++++++")
# lists = AetList(y)
if point[index1][1] > y:
lists.numy+=1
if point[index1][1] - point[temp][1]==0:
node = AetNode(point[temp][0],0,point[index1][1])
else:
node = AetNode(point[temp][0],
((point[index1][0] - point[temp][0]) / (point[index1][1] - point[temp][1])),
point[index1][1])
lists.l.append(node)
if point[index2][1] > y:
lists.numy+=1
if point[index2][1] - point[temp][1]==0:
node = AetNode(point[temp][0], 0, point[index2][1])
else:
node = AetNode(point[temp][0],
((point[index2][0] - point[temp][0]) / (point[index2][1] - point[temp][1])),
point[index2][1])
lists.l.append(node)
if len(lists.l)!=0:
lists.l.sort(key=AetNode.op)
if len(templist)>1:
lists.numy-=1
newEdgeList.append(lists)
y+=1
printNewEegeList(newEdgeList)
return (newEdgeList,Y)
def draw(x1,y1,x,y):
turtle.penup()
turtle.goto(x1,y1)
turtle.pendown()
turtle.goto(x,y)
def run():
turtle.screensize(1920,1080)
turtle.penup()
turtle.hideturtle()
point=[]
# point=[[20,20],[50,10],[110,30],[110,80],[50,50],[20,70]]
# point=[[-10,-10],[10,-10],[15,0],[10,10],[-10,10],[-15,0]]
temp = [float(x11.get()), float(y1.get())]
point.append(temp)
temp = [float(x2.get()), float(y2.get())]
point.append(temp)
temp = [float(x3.get()), float(y3.get())]
point.append(temp)
temp = [float(x4.get()), float(y4.get())]
point.append(temp)
temp = [float(x5.get()), float(y5.get())]
point.append(temp)
temp = [float(x6.get()), float(y6.get())]
point.append(temp)
point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]
#画出原图:
for i in point:
turtle.goto(i[0],i[1])
turtle.pendown()
turtle.goto(point[0][0],point[0][1])
#创建新边表:
newEdgeTable,Y=createNewEdgeTable(point)
miny,maxy=findRange(point)
y=miny
acativeList=[]
while y<=maxy:
#把新边表加进来:
ynum=0
if y in Y:
for i in newEdgeTable:
if i.y==y:
for j in i.l:
acativeList.append(j)
ynum=i.numy
break
acativeList.sort(key=AetNode.op)
for j in acativeList:
print((j.x,j.tx,j.my))
print("****************")
#进行填色:
i=0
flag=True
while i<len(acativeList)-1:
x1=acativeList[i].x
temp=[acativeList[i+1].x,y]
if temp in point and ynum>=1:
ynum-=1
else:
i+=1
if flag:
draw(x1,y,temp[0],y)
flag=not flag
#更新活性边表:
newacativeList=[]
for i in acativeList:
if i.my>y:
i.x+=i.tx
newacativeList.append(i)
acativeList=newacativeList
y+=1
turtle.mainloop()
tk=Tk()
tk.title("扫描填充算法:by 高谦")
Label(tk,text="输入顶点:").grid(row=0)
Label(tk,text="1:").grid(row=1)
Label(tk,text="2:").grid(row=2)
Label(tk,text="3:").grid(row=3)
Label(tk,text="4:").grid(row=4)
Label(tk,text="5:").grid(row=5)
Label(tk,text="6:").grid(row=6)
Label(tk,text="例:\n\n").grid(row=9)
Label(tk,text="(20,20),(50,10)\n(110,30),(110,80)\n(50,50),(20,70)").grid(row=9,column=1)
Label(tk,text="(-10,-10),(10,-10)\n(15,0),(10,10)\n(-10,10),(-15,0)").grid(row=9,column=2)
x11=Entry(tk)
x2=Entry(tk)
x3=Entry(tk)
x4=Entry(tk)
x5=Entry(tk)
x6=Entry(tk)
x11.grid(row=1,column=1)
x2.grid(row=2,column=1)
x3.grid(row=3,column=1)
x4.grid(row=4,column=1)
x5.grid(row=5,column=1)
x6.grid(row=6,column=1)
y1=Entry(tk)
y2=Entry(tk)
y3=Entry(tk)
y4=Entry(tk)
y5=Entry(tk)
y6=Entry(tk)
y1.grid(row=1,column=2,padx=5,pady=5)
y2.grid(row=2,column=2,padx=5,pady=5)
y3.grid(row=3,column=2,padx=5,pady=5)
y4.grid(row=4,column=2,padx=5,pady=5)
y5.grid(row=5,column=2,padx=5,pady=5)
y6.grid(row=6,column=2,padx=5,pady=5)
Button(tk,text="扫描填充",width=10,command=run).grid(row=7,column=1)
Button(tk,text="退出程序",width=10,command=tk.quit).grid(row=7,column=2)
tk.mainloop()
|
flexible
|
{
"blob_id": "0a7a95755924fd264169286cc5b5b7587d7ee8e4",
"index": 4608,
"step-1": "<mask token>\n\n\nclass AetNode(object):\n\n def __init__(self, x, tx, my):\n self.x = x\n self.tx = tx\n self.my = my\n\n def op(self):\n return self.x\n\n\nclass AetList(object):\n\n def __init__(self, y):\n self.y = y\n self.numy = 0\n self.l = []\n pass\n\n\n<mask token>\n\n\ndef createNewEdgeTable(point):\n miny, maxy = findRange(point)\n Y = []\n for i in point:\n Y.append(i[1])\n Y = set(Y)\n Y = list(Y)\n newEdgeList = []\n y = miny\n while y <= maxy:\n if y in Y:\n print(y)\n templist = []\n for i in range(0, 6):\n if point[i][1] == y:\n templist.append(i)\n print(templist)\n print('一次创建新边表')\n lists = AetList(y)\n for temp in templist:\n index1 = (temp + 7) % 6\n index2 = (temp + 5) % 6\n print(point[temp][0], point[temp][1])\n print(point[index1][0], point[index1][1])\n print(point[index2][0], point[index2][1])\n print('+++++++++++++++++++++')\n if point[index1][1] > y:\n lists.numy += 1\n if point[index1][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index1][1])\n else:\n node = AetNode(point[temp][0], (point[index1][0] -\n point[temp][0]) / (point[index1][1] - point[\n temp][1]), point[index1][1])\n lists.l.append(node)\n if point[index2][1] > y:\n lists.numy += 1\n if point[index2][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index2][1])\n else:\n node = AetNode(point[temp][0], (point[index2][0] -\n point[temp][0]) / (point[index2][1] - point[\n temp][1]), point[index2][1])\n lists.l.append(node)\n if len(lists.l) != 0:\n lists.l.sort(key=AetNode.op)\n if len(templist) > 1:\n lists.numy -= 1\n newEdgeList.append(lists)\n y += 1\n printNewEegeList(newEdgeList)\n return newEdgeList, Y\n\n\ndef draw(x1, y1, x, y):\n turtle.penup()\n turtle.goto(x1, y1)\n turtle.pendown()\n turtle.goto(x, y)\n\n\ndef run():\n turtle.screensize(1920, 1080)\n turtle.penup()\n turtle.hideturtle()\n point = []\n temp = [float(x11.get()), float(y1.get())]\n point.append(temp)\n temp = [float(x2.get()), float(y2.get())]\n point.append(temp)\n temp = [float(x3.get()), float(y3.get())]\n point.append(temp)\n temp = [float(x4.get()), float(y4.get())]\n point.append(temp)\n temp = [float(x5.get()), float(y5.get())]\n point.append(temp)\n temp = [float(x6.get()), float(y6.get())]\n point.append(temp)\n point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]\n for i in point:\n turtle.goto(i[0], i[1])\n turtle.pendown()\n turtle.goto(point[0][0], point[0][1])\n newEdgeTable, Y = createNewEdgeTable(point)\n miny, maxy = findRange(point)\n y = miny\n acativeList = []\n while y <= maxy:\n ynum = 0\n if y in Y:\n for i in newEdgeTable:\n if i.y == y:\n for j in i.l:\n acativeList.append(j)\n ynum = i.numy\n break\n acativeList.sort(key=AetNode.op)\n for j in acativeList:\n print((j.x, j.tx, j.my))\n print('****************')\n i = 0\n flag = True\n while i < len(acativeList) - 1:\n x1 = acativeList[i].x\n temp = [acativeList[i + 1].x, y]\n if temp in point and ynum >= 1:\n ynum -= 1\n else:\n i += 1\n if flag:\n draw(x1, y, temp[0], y)\n flag = not flag\n newacativeList = []\n for i in acativeList:\n if i.my > y:\n i.x += i.tx\n newacativeList.append(i)\n acativeList = newacativeList\n y += 1\n turtle.mainloop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AetNode(object):\n\n def __init__(self, x, tx, my):\n self.x = x\n self.tx = tx\n self.my = my\n\n def op(self):\n return self.x\n\n\nclass AetList(object):\n\n def __init__(self, y):\n self.y = y\n self.numy = 0\n self.l = []\n pass\n\n\ndef findRange(point):\n maxy = point[0][1]\n miny = point[0][1]\n for i in point:\n if maxy < i[1]:\n maxy = i[1]\n if miny > i[1]:\n miny = i[1]\n return miny, maxy\n\n\ndef printNewEegeList(newEdgeTable):\n print('新边表是:')\n for i in newEdgeTable:\n print(i.y)\n for j in i.l:\n print((j.x, j.tx, j.my))\n print('__________________________________')\n\n\ndef createNewEdgeTable(point):\n miny, maxy = findRange(point)\n Y = []\n for i in point:\n Y.append(i[1])\n Y = set(Y)\n Y = list(Y)\n newEdgeList = []\n y = miny\n while y <= maxy:\n if y in Y:\n print(y)\n templist = []\n for i in range(0, 6):\n if point[i][1] == y:\n templist.append(i)\n print(templist)\n print('一次创建新边表')\n lists = AetList(y)\n for temp in templist:\n index1 = (temp + 7) % 6\n index2 = (temp + 5) % 6\n print(point[temp][0], point[temp][1])\n print(point[index1][0], point[index1][1])\n print(point[index2][0], point[index2][1])\n print('+++++++++++++++++++++')\n if point[index1][1] > y:\n lists.numy += 1\n if point[index1][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index1][1])\n else:\n node = AetNode(point[temp][0], (point[index1][0] -\n point[temp][0]) / (point[index1][1] - point[\n temp][1]), point[index1][1])\n lists.l.append(node)\n if point[index2][1] > y:\n lists.numy += 1\n if point[index2][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index2][1])\n else:\n node = AetNode(point[temp][0], (point[index2][0] -\n point[temp][0]) / (point[index2][1] - point[\n temp][1]), point[index2][1])\n lists.l.append(node)\n if len(lists.l) != 0:\n lists.l.sort(key=AetNode.op)\n if len(templist) > 1:\n lists.numy -= 1\n newEdgeList.append(lists)\n y += 1\n printNewEegeList(newEdgeList)\n return newEdgeList, Y\n\n\ndef draw(x1, y1, x, y):\n turtle.penup()\n turtle.goto(x1, y1)\n turtle.pendown()\n turtle.goto(x, y)\n\n\ndef run():\n turtle.screensize(1920, 1080)\n turtle.penup()\n turtle.hideturtle()\n point = []\n temp = [float(x11.get()), float(y1.get())]\n point.append(temp)\n temp = [float(x2.get()), float(y2.get())]\n point.append(temp)\n temp = [float(x3.get()), float(y3.get())]\n point.append(temp)\n temp = [float(x4.get()), float(y4.get())]\n point.append(temp)\n temp = [float(x5.get()), float(y5.get())]\n point.append(temp)\n temp = [float(x6.get()), float(y6.get())]\n point.append(temp)\n point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]\n for i in point:\n turtle.goto(i[0], i[1])\n turtle.pendown()\n turtle.goto(point[0][0], point[0][1])\n newEdgeTable, Y = createNewEdgeTable(point)\n miny, maxy = findRange(point)\n y = miny\n acativeList = []\n while y <= maxy:\n ynum = 0\n if y in Y:\n for i in newEdgeTable:\n if i.y == y:\n for j in i.l:\n acativeList.append(j)\n ynum = i.numy\n break\n acativeList.sort(key=AetNode.op)\n for j in acativeList:\n print((j.x, j.tx, j.my))\n print('****************')\n i = 0\n flag = True\n while i < len(acativeList) - 1:\n x1 = acativeList[i].x\n temp = [acativeList[i + 1].x, y]\n if temp in point and ynum >= 1:\n ynum -= 1\n else:\n i += 1\n if flag:\n draw(x1, y, temp[0], y)\n flag = not flag\n newacativeList = []\n for i in acativeList:\n if i.my > y:\n i.x += i.tx\n newacativeList.append(i)\n acativeList = newacativeList\n y += 1\n turtle.mainloop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AetNode(object):\n\n def __init__(self, x, tx, my):\n self.x = x\n self.tx = tx\n self.my = my\n\n def op(self):\n return self.x\n\n\nclass AetList(object):\n\n def __init__(self, y):\n self.y = y\n self.numy = 0\n self.l = []\n pass\n\n\ndef findRange(point):\n maxy = point[0][1]\n miny = point[0][1]\n for i in point:\n if maxy < i[1]:\n maxy = i[1]\n if miny > i[1]:\n miny = i[1]\n return miny, maxy\n\n\ndef printNewEegeList(newEdgeTable):\n print('新边表是:')\n for i in newEdgeTable:\n print(i.y)\n for j in i.l:\n print((j.x, j.tx, j.my))\n print('__________________________________')\n\n\ndef createNewEdgeTable(point):\n miny, maxy = findRange(point)\n Y = []\n for i in point:\n Y.append(i[1])\n Y = set(Y)\n Y = list(Y)\n newEdgeList = []\n y = miny\n while y <= maxy:\n if y in Y:\n print(y)\n templist = []\n for i in range(0, 6):\n if point[i][1] == y:\n templist.append(i)\n print(templist)\n print('一次创建新边表')\n lists = AetList(y)\n for temp in templist:\n index1 = (temp + 7) % 6\n index2 = (temp + 5) % 6\n print(point[temp][0], point[temp][1])\n print(point[index1][0], point[index1][1])\n print(point[index2][0], point[index2][1])\n print('+++++++++++++++++++++')\n if point[index1][1] > y:\n lists.numy += 1\n if point[index1][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index1][1])\n else:\n node = AetNode(point[temp][0], (point[index1][0] -\n point[temp][0]) / (point[index1][1] - point[\n temp][1]), point[index1][1])\n lists.l.append(node)\n if point[index2][1] > y:\n lists.numy += 1\n if point[index2][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index2][1])\n else:\n node = AetNode(point[temp][0], (point[index2][0] -\n point[temp][0]) / (point[index2][1] - point[\n temp][1]), point[index2][1])\n lists.l.append(node)\n if len(lists.l) != 0:\n lists.l.sort(key=AetNode.op)\n if len(templist) > 1:\n lists.numy -= 1\n newEdgeList.append(lists)\n y += 1\n printNewEegeList(newEdgeList)\n return newEdgeList, Y\n\n\ndef draw(x1, y1, x, y):\n turtle.penup()\n turtle.goto(x1, y1)\n turtle.pendown()\n turtle.goto(x, y)\n\n\ndef run():\n turtle.screensize(1920, 1080)\n turtle.penup()\n turtle.hideturtle()\n point = []\n temp = [float(x11.get()), float(y1.get())]\n point.append(temp)\n temp = [float(x2.get()), float(y2.get())]\n point.append(temp)\n temp = [float(x3.get()), float(y3.get())]\n point.append(temp)\n temp = [float(x4.get()), float(y4.get())]\n point.append(temp)\n temp = [float(x5.get()), float(y5.get())]\n point.append(temp)\n temp = [float(x6.get()), float(y6.get())]\n point.append(temp)\n point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]\n for i in point:\n turtle.goto(i[0], i[1])\n turtle.pendown()\n turtle.goto(point[0][0], point[0][1])\n newEdgeTable, Y = createNewEdgeTable(point)\n miny, maxy = findRange(point)\n y = miny\n acativeList = []\n while y <= maxy:\n ynum = 0\n if y in Y:\n for i in newEdgeTable:\n if i.y == y:\n for j in i.l:\n acativeList.append(j)\n ynum = i.numy\n break\n acativeList.sort(key=AetNode.op)\n for j in acativeList:\n print((j.x, j.tx, j.my))\n print('****************')\n i = 0\n flag = True\n while i < len(acativeList) - 1:\n x1 = acativeList[i].x\n temp = [acativeList[i + 1].x, y]\n if temp in point and ynum >= 1:\n ynum -= 1\n else:\n i += 1\n if flag:\n draw(x1, y, temp[0], y)\n flag = not flag\n newacativeList = []\n for i in acativeList:\n if i.my > y:\n i.x += i.tx\n newacativeList.append(i)\n acativeList = newacativeList\n y += 1\n turtle.mainloop()\n\n\n<mask token>\ntk.title('扫描填充算法:by 高谦')\nLabel(tk, text='输入顶点:').grid(row=0)\nLabel(tk, text='1:').grid(row=1)\nLabel(tk, text='2:').grid(row=2)\nLabel(tk, text='3:').grid(row=3)\nLabel(tk, text='4:').grid(row=4)\nLabel(tk, text='5:').grid(row=5)\nLabel(tk, text='6:').grid(row=6)\nLabel(tk, text='例:\\n\\n').grid(row=9)\nLabel(tk, text=\"\"\"(20,20),(50,10)\n(110,30),(110,80)\n(50,50),(20,70)\"\"\").grid(\n row=9, column=1)\nLabel(tk, text=\"\"\"(-10,-10),(10,-10)\n(15,0),(10,10)\n(-10,10),(-15,0)\"\"\").grid(\n row=9, column=2)\n<mask token>\nx11.grid(row=1, column=1)\nx2.grid(row=2, column=1)\nx3.grid(row=3, column=1)\nx4.grid(row=4, column=1)\nx5.grid(row=5, column=1)\nx6.grid(row=6, column=1)\n<mask token>\ny1.grid(row=1, column=2, padx=5, pady=5)\ny2.grid(row=2, column=2, padx=5, pady=5)\ny3.grid(row=3, column=2, padx=5, pady=5)\ny4.grid(row=4, column=2, padx=5, pady=5)\ny5.grid(row=5, column=2, padx=5, pady=5)\ny6.grid(row=6, column=2, padx=5, pady=5)\nButton(tk, text='扫描填充', width=10, command=run).grid(row=7, column=1)\nButton(tk, text='退出程序', width=10, command=tk.quit).grid(row=7, column=2)\ntk.mainloop()\n",
"step-4": "<mask token>\n\n\nclass AetNode(object):\n\n def __init__(self, x, tx, my):\n self.x = x\n self.tx = tx\n self.my = my\n\n def op(self):\n return self.x\n\n\nclass AetList(object):\n\n def __init__(self, y):\n self.y = y\n self.numy = 0\n self.l = []\n pass\n\n\ndef findRange(point):\n maxy = point[0][1]\n miny = point[0][1]\n for i in point:\n if maxy < i[1]:\n maxy = i[1]\n if miny > i[1]:\n miny = i[1]\n return miny, maxy\n\n\ndef printNewEegeList(newEdgeTable):\n print('新边表是:')\n for i in newEdgeTable:\n print(i.y)\n for j in i.l:\n print((j.x, j.tx, j.my))\n print('__________________________________')\n\n\ndef createNewEdgeTable(point):\n miny, maxy = findRange(point)\n Y = []\n for i in point:\n Y.append(i[1])\n Y = set(Y)\n Y = list(Y)\n newEdgeList = []\n y = miny\n while y <= maxy:\n if y in Y:\n print(y)\n templist = []\n for i in range(0, 6):\n if point[i][1] == y:\n templist.append(i)\n print(templist)\n print('一次创建新边表')\n lists = AetList(y)\n for temp in templist:\n index1 = (temp + 7) % 6\n index2 = (temp + 5) % 6\n print(point[temp][0], point[temp][1])\n print(point[index1][0], point[index1][1])\n print(point[index2][0], point[index2][1])\n print('+++++++++++++++++++++')\n if point[index1][1] > y:\n lists.numy += 1\n if point[index1][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index1][1])\n else:\n node = AetNode(point[temp][0], (point[index1][0] -\n point[temp][0]) / (point[index1][1] - point[\n temp][1]), point[index1][1])\n lists.l.append(node)\n if point[index2][1] > y:\n lists.numy += 1\n if point[index2][1] - point[temp][1] == 0:\n node = AetNode(point[temp][0], 0, point[index2][1])\n else:\n node = AetNode(point[temp][0], (point[index2][0] -\n point[temp][0]) / (point[index2][1] - point[\n temp][1]), point[index2][1])\n lists.l.append(node)\n if len(lists.l) != 0:\n lists.l.sort(key=AetNode.op)\n if len(templist) > 1:\n lists.numy -= 1\n newEdgeList.append(lists)\n y += 1\n printNewEegeList(newEdgeList)\n return newEdgeList, Y\n\n\ndef draw(x1, y1, x, y):\n turtle.penup()\n turtle.goto(x1, y1)\n turtle.pendown()\n turtle.goto(x, y)\n\n\ndef run():\n turtle.screensize(1920, 1080)\n turtle.penup()\n turtle.hideturtle()\n point = []\n temp = [float(x11.get()), float(y1.get())]\n point.append(temp)\n temp = [float(x2.get()), float(y2.get())]\n point.append(temp)\n temp = [float(x3.get()), float(y3.get())]\n point.append(temp)\n temp = [float(x4.get()), float(y4.get())]\n point.append(temp)\n temp = [float(x5.get()), float(y5.get())]\n point.append(temp)\n temp = [float(x6.get()), float(y6.get())]\n point.append(temp)\n point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]\n for i in point:\n turtle.goto(i[0], i[1])\n turtle.pendown()\n turtle.goto(point[0][0], point[0][1])\n newEdgeTable, Y = createNewEdgeTable(point)\n miny, maxy = findRange(point)\n y = miny\n acativeList = []\n while y <= maxy:\n ynum = 0\n if y in Y:\n for i in newEdgeTable:\n if i.y == y:\n for j in i.l:\n acativeList.append(j)\n ynum = i.numy\n break\n acativeList.sort(key=AetNode.op)\n for j in acativeList:\n print((j.x, j.tx, j.my))\n print('****************')\n i = 0\n flag = True\n while i < len(acativeList) - 1:\n x1 = acativeList[i].x\n temp = [acativeList[i + 1].x, y]\n if temp in point and ynum >= 1:\n ynum -= 1\n else:\n i += 1\n if flag:\n draw(x1, y, temp[0], y)\n flag = not flag\n newacativeList = []\n for i in acativeList:\n if i.my > y:\n i.x += i.tx\n newacativeList.append(i)\n acativeList = newacativeList\n y += 1\n turtle.mainloop()\n\n\ntk = Tk()\ntk.title('扫描填充算法:by 高谦')\nLabel(tk, text='输入顶点:').grid(row=0)\nLabel(tk, text='1:').grid(row=1)\nLabel(tk, text='2:').grid(row=2)\nLabel(tk, text='3:').grid(row=3)\nLabel(tk, text='4:').grid(row=4)\nLabel(tk, text='5:').grid(row=5)\nLabel(tk, text='6:').grid(row=6)\nLabel(tk, text='例:\\n\\n').grid(row=9)\nLabel(tk, text=\"\"\"(20,20),(50,10)\n(110,30),(110,80)\n(50,50),(20,70)\"\"\").grid(\n row=9, column=1)\nLabel(tk, text=\"\"\"(-10,-10),(10,-10)\n(15,0),(10,10)\n(-10,10),(-15,0)\"\"\").grid(\n row=9, column=2)\nx11 = Entry(tk)\nx2 = Entry(tk)\nx3 = Entry(tk)\nx4 = Entry(tk)\nx5 = Entry(tk)\nx6 = Entry(tk)\nx11.grid(row=1, column=1)\nx2.grid(row=2, column=1)\nx3.grid(row=3, column=1)\nx4.grid(row=4, column=1)\nx5.grid(row=5, column=1)\nx6.grid(row=6, column=1)\ny1 = Entry(tk)\ny2 = Entry(tk)\ny3 = Entry(tk)\ny4 = Entry(tk)\ny5 = Entry(tk)\ny6 = Entry(tk)\ny1.grid(row=1, column=2, padx=5, pady=5)\ny2.grid(row=2, column=2, padx=5, pady=5)\ny3.grid(row=3, column=2, padx=5, pady=5)\ny4.grid(row=4, column=2, padx=5, pady=5)\ny5.grid(row=5, column=2, padx=5, pady=5)\ny6.grid(row=6, column=2, padx=5, pady=5)\nButton(tk, text='扫描填充', width=10, command=run).grid(row=7, column=1)\nButton(tk, text='退出程序', width=10, command=tk.quit).grid(row=7, column=2)\ntk.mainloop()\n",
"step-5": "import turtle\nimport math\nfrom tkinter import *\n#活性边表节点:\nclass AetNode(object):\n def __init__(self,x,tx,my):\n self.x=x\n self.tx=tx\n self.my=my\n def op(self):\n return self.x\nclass AetList(object):\n def __init__(self,y):\n self.y=y\n self.numy=0\n self.l=[]\n pass\ndef findRange(point):\n # 找到最大y和最小y:\n maxy = point[0][1]\n miny = point[0][1]\n for i in point:\n if maxy < i[1]:\n maxy = i[1]\n if miny > i[1]:\n miny = i[1]\n return (miny,maxy)\ndef printNewEegeList(newEdgeTable):\n print(\"新边表是:\")\n for i in newEdgeTable:\n print(i.y)\n for j in i.l:\n print((j.x,j.tx,j.my))\n print(\"__________________________________\")\ndef createNewEdgeTable(point):\n miny,maxy=findRange(point)\n # 找打所有y的顶点:\n Y = []\n for i in point:\n Y.append(i[1])\n Y = set(Y)\n Y = list(Y)\n # 创建新边表:\n newEdgeList = []\n y=miny\n while y <=maxy:\n if y in Y:\n # 找到所有的X值:\n print(y)\n templist=[]\n for i in range(0, 6):\n if point[i][1] == y:\n templist.append(i)\n print(templist)\n print(\"一次创建新边表\")\n lists = AetList(y)\n for temp in templist:\n index1 = (temp + 7) % 6\n index2 = (temp + 5) % 6\n print(point[temp][0],point[temp][1])\n print(point[index1][0],point[index1][1])\n print(point[index2][0],point[index2][1])\n print(\"+++++++++++++++++++++\")\n # lists = AetList(y)\n if point[index1][1] > y:\n lists.numy+=1\n if point[index1][1] - point[temp][1]==0:\n node = AetNode(point[temp][0],0,point[index1][1])\n else:\n node = AetNode(point[temp][0],\n ((point[index1][0] - point[temp][0]) / (point[index1][1] - point[temp][1])),\n point[index1][1])\n lists.l.append(node)\n if point[index2][1] > y:\n lists.numy+=1\n if point[index2][1] - point[temp][1]==0:\n node = AetNode(point[temp][0], 0, point[index2][1])\n else:\n node = AetNode(point[temp][0],\n ((point[index2][0] - point[temp][0]) / (point[index2][1] - point[temp][1])),\n point[index2][1])\n lists.l.append(node)\n if len(lists.l)!=0:\n lists.l.sort(key=AetNode.op)\n if len(templist)>1:\n lists.numy-=1\n newEdgeList.append(lists)\n y+=1\n printNewEegeList(newEdgeList)\n return (newEdgeList,Y)\ndef draw(x1,y1,x,y):\n turtle.penup()\n turtle.goto(x1,y1)\n turtle.pendown()\n turtle.goto(x,y)\ndef run():\n turtle.screensize(1920,1080)\n turtle.penup()\n turtle.hideturtle()\n point=[]\n # point=[[20,20],[50,10],[110,30],[110,80],[50,50],[20,70]]\n # point=[[-10,-10],[10,-10],[15,0],[10,10],[-10,10],[-15,0]]\n temp = [float(x11.get()), float(y1.get())]\n point.append(temp)\n temp = [float(x2.get()), float(y2.get())]\n point.append(temp)\n temp = [float(x3.get()), float(y3.get())]\n point.append(temp)\n temp = [float(x4.get()), float(y4.get())]\n point.append(temp)\n temp = [float(x5.get()), float(y5.get())]\n point.append(temp)\n temp = [float(x6.get()), float(y6.get())]\n point.append(temp)\n point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]\n #画出原图:\n for i in point:\n turtle.goto(i[0],i[1])\n turtle.pendown()\n turtle.goto(point[0][0],point[0][1])\n #创建新边表:\n newEdgeTable,Y=createNewEdgeTable(point)\n miny,maxy=findRange(point)\n y=miny\n acativeList=[]\n while y<=maxy:\n #把新边表加进来:\n ynum=0\n if y in Y:\n for i in newEdgeTable:\n if i.y==y:\n for j in i.l:\n acativeList.append(j)\n ynum=i.numy\n break\n acativeList.sort(key=AetNode.op)\n for j in acativeList:\n print((j.x,j.tx,j.my))\n print(\"****************\")\n #进行填色:\n i=0\n flag=True\n while i<len(acativeList)-1:\n x1=acativeList[i].x\n temp=[acativeList[i+1].x,y]\n if temp in point and ynum>=1:\n ynum-=1\n else:\n i+=1\n if flag:\n draw(x1,y,temp[0],y)\n flag=not flag\n #更新活性边表:\n newacativeList=[]\n for i in acativeList:\n if i.my>y:\n i.x+=i.tx\n newacativeList.append(i)\n acativeList=newacativeList\n y+=1\n turtle.mainloop()\ntk=Tk()\ntk.title(\"扫描填充算法:by 高谦\")\nLabel(tk,text=\"输入顶点:\").grid(row=0)\nLabel(tk,text=\"1:\").grid(row=1)\nLabel(tk,text=\"2:\").grid(row=2)\nLabel(tk,text=\"3:\").grid(row=3)\nLabel(tk,text=\"4:\").grid(row=4)\nLabel(tk,text=\"5:\").grid(row=5)\nLabel(tk,text=\"6:\").grid(row=6)\nLabel(tk,text=\"例:\\n\\n\").grid(row=9)\nLabel(tk,text=\"(20,20),(50,10)\\n(110,30),(110,80)\\n(50,50),(20,70)\").grid(row=9,column=1)\nLabel(tk,text=\"(-10,-10),(10,-10)\\n(15,0),(10,10)\\n(-10,10),(-15,0)\").grid(row=9,column=2)\nx11=Entry(tk)\nx2=Entry(tk)\nx3=Entry(tk)\nx4=Entry(tk)\nx5=Entry(tk)\nx6=Entry(tk)\nx11.grid(row=1,column=1)\nx2.grid(row=2,column=1)\nx3.grid(row=3,column=1)\nx4.grid(row=4,column=1)\nx5.grid(row=5,column=1)\nx6.grid(row=6,column=1)\n\ny1=Entry(tk)\ny2=Entry(tk)\ny3=Entry(tk)\ny4=Entry(tk)\ny5=Entry(tk)\ny6=Entry(tk)\ny1.grid(row=1,column=2,padx=5,pady=5)\ny2.grid(row=2,column=2,padx=5,pady=5)\ny3.grid(row=3,column=2,padx=5,pady=5)\ny4.grid(row=4,column=2,padx=5,pady=5)\ny5.grid(row=5,column=2,padx=5,pady=5)\ny6.grid(row=6,column=2,padx=5,pady=5)\nButton(tk,text=\"扫描填充\",width=10,command=run).grid(row=7,column=1)\nButton(tk,text=\"退出程序\",width=10,command=tk.quit).grid(row=7,column=2)\ntk.mainloop()",
"step-ids": [
8,
10,
11,
12,
14
]
}
|
[
8,
10,
11,
12,
14
] |
import json
import math
import pandas as pd
import datetime
record_file = r"D:\Doc\data\BBOS.log"
all_records = []
with open(record_file, "r") as f:
all_line = f.readlines()
for line in all_line:
record_time = line[line.index("[") + 1: line.index("]")]
record_order = json.loads(line[line.index("{"):])
for item in record_order["data"]["otherPositionRetList"]:
item["time"] = datetime.datetime.fromtimestamp(math.floor(float(record_time)/1000)).strftime(r"%Y-%m-%d %H:%M:%S")
all_records.append(item)
record_frame = pd.DataFrame(all_records)
print(record_frame.columns)
print(record_frame.sort_values(by=['symbol', 'time'] , ascending=[1,1]))
|
normal
|
{
"blob_id": "fbbadb5cbd2b324686fc5faa0b1bc6236fc8d87b",
"index": 9218,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(record_file, 'r') as f:\n all_line = f.readlines()\n for line in all_line:\n record_time = line[line.index('[') + 1:line.index(']')]\n record_order = json.loads(line[line.index('{'):])\n for item in record_order['data']['otherPositionRetList']:\n item['time'] = datetime.datetime.fromtimestamp(math.floor(float\n (record_time) / 1000)).strftime('%Y-%m-%d %H:%M:%S')\n all_records.append(item)\n<mask token>\nprint(record_frame.columns)\nprint(record_frame.sort_values(by=['symbol', 'time'], ascending=[1, 1]))\n",
"step-3": "<mask token>\nrecord_file = 'D:\\\\Doc\\\\data\\\\BBOS.log'\nall_records = []\nwith open(record_file, 'r') as f:\n all_line = f.readlines()\n for line in all_line:\n record_time = line[line.index('[') + 1:line.index(']')]\n record_order = json.loads(line[line.index('{'):])\n for item in record_order['data']['otherPositionRetList']:\n item['time'] = datetime.datetime.fromtimestamp(math.floor(float\n (record_time) / 1000)).strftime('%Y-%m-%d %H:%M:%S')\n all_records.append(item)\nrecord_frame = pd.DataFrame(all_records)\nprint(record_frame.columns)\nprint(record_frame.sort_values(by=['symbol', 'time'], ascending=[1, 1]))\n",
"step-4": "import json\nimport math\nimport pandas as pd\nimport datetime\nrecord_file = 'D:\\\\Doc\\\\data\\\\BBOS.log'\nall_records = []\nwith open(record_file, 'r') as f:\n all_line = f.readlines()\n for line in all_line:\n record_time = line[line.index('[') + 1:line.index(']')]\n record_order = json.loads(line[line.index('{'):])\n for item in record_order['data']['otherPositionRetList']:\n item['time'] = datetime.datetime.fromtimestamp(math.floor(float\n (record_time) / 1000)).strftime('%Y-%m-%d %H:%M:%S')\n all_records.append(item)\nrecord_frame = pd.DataFrame(all_records)\nprint(record_frame.columns)\nprint(record_frame.sort_values(by=['symbol', 'time'], ascending=[1, 1]))\n",
"step-5": "import json\nimport math\nimport pandas as pd\nimport datetime\n\nrecord_file = r\"D:\\Doc\\data\\BBOS.log\"\nall_records = []\n\nwith open(record_file, \"r\") as f:\n all_line = f.readlines()\n for line in all_line:\n record_time = line[line.index(\"[\") + 1: line.index(\"]\")]\n \n record_order = json.loads(line[line.index(\"{\"):])\n for item in record_order[\"data\"][\"otherPositionRetList\"]:\n item[\"time\"] = datetime.datetime.fromtimestamp(math.floor(float(record_time)/1000)).strftime(r\"%Y-%m-%d %H:%M:%S\")\n all_records.append(item)\n\nrecord_frame = pd.DataFrame(all_records)\nprint(record_frame.columns)\nprint(record_frame.sort_values(by=['symbol', 'time'] , ascending=[1,1]))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in file:
for j in i:
if j != '\n':
inp += j
else:
inp += ' '
inp += ' '
file.close()
<|reserved_special_token_0|>
for i in inp:
if i != ' ':
tmp += i
else:
tmp = int(tmp)
if counter == 0:
mas = [[(0) for id1 in range(2)] for id2 in range(tmp)]
svNumber = tmp
counter = tmp
elif flag:
mas[svNumber - counter][0] = tmp
flag = False
else:
mas[svNumber - counter][1] = tmp
flag = True
counter -= 1
tmp = ''
if counter == 0:
uzNumber = max(max(i) for i in mas) + 1
rez = [[(0) for id1 in range(uzNumber)] for id2 in range(uzNumber)]
for i in mas:
for j in mas:
if i == j[::-1]:
rez[i[0]][i[0]] = -1
rez[i[1]][i[1]] = -1
rez[i[0]][i[1]] = -1
rez[i[1]][i[0]] = -1
"""
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
"""
flag2 = True
while flag2:
flag2 = False
for i in mas:
for j in range(uzNumber):
for k in range(uzNumber):
if rez[j][k] == -1 and i[1] == j and rez[i[0]][k
] != -1:
rez[i[0]][k] = -1
flag2 = True
if rez[i[0]][i[0]] == -1 and i[0] == j and i[1
] == k and rez[j][k] != -1:
rez[j][k] = -1
flag2 = True
"""
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
"""
for k in range(uzNumber):
frontier = q.Queue()
frontier.put(k)
while not frontier.empty():
current = frontier.get()
for i in range(len(mas)):
if mas[i][0] == current:
if rez[k][mas[i][1]] != -1:
frontier.put(mas[i][1])
rez[k][mas[i][1]] += 1
print('matrix for city', cityCounter)
for i in rez:
for j in i:
print(j, end=' ')
print()
print()
cityCounter += 1
mas = []
<|reserved_special_token_1|>
<|reserved_special_token_0|>
file = open('input.txt', 'r')
inp = ''
for i in file:
for j in i:
if j != '\n':
inp += j
else:
inp += ' '
inp += ' '
file.close()
tmp = ''
svNumber = 0
counter = 0
cityCounter = 0
flag = True
for i in inp:
if i != ' ':
tmp += i
else:
tmp = int(tmp)
if counter == 0:
mas = [[(0) for id1 in range(2)] for id2 in range(tmp)]
svNumber = tmp
counter = tmp
elif flag:
mas[svNumber - counter][0] = tmp
flag = False
else:
mas[svNumber - counter][1] = tmp
flag = True
counter -= 1
tmp = ''
if counter == 0:
uzNumber = max(max(i) for i in mas) + 1
rez = [[(0) for id1 in range(uzNumber)] for id2 in range(uzNumber)]
for i in mas:
for j in mas:
if i == j[::-1]:
rez[i[0]][i[0]] = -1
rez[i[1]][i[1]] = -1
rez[i[0]][i[1]] = -1
rez[i[1]][i[0]] = -1
"""
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
"""
flag2 = True
while flag2:
flag2 = False
for i in mas:
for j in range(uzNumber):
for k in range(uzNumber):
if rez[j][k] == -1 and i[1] == j and rez[i[0]][k
] != -1:
rez[i[0]][k] = -1
flag2 = True
if rez[i[0]][i[0]] == -1 and i[0] == j and i[1
] == k and rez[j][k] != -1:
rez[j][k] = -1
flag2 = True
"""
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
"""
for k in range(uzNumber):
frontier = q.Queue()
frontier.put(k)
while not frontier.empty():
current = frontier.get()
for i in range(len(mas)):
if mas[i][0] == current:
if rez[k][mas[i][1]] != -1:
frontier.put(mas[i][1])
rez[k][mas[i][1]] += 1
print('matrix for city', cityCounter)
for i in rez:
for j in i:
print(j, end=' ')
print()
print()
cityCounter += 1
mas = []
<|reserved_special_token_1|>
import queue as q
file = open('input.txt', 'r')
inp = ''
for i in file:
for j in i:
if j != '\n':
inp += j
else:
inp += ' '
inp += ' '
file.close()
tmp = ''
svNumber = 0
counter = 0
cityCounter = 0
flag = True
for i in inp:
if i != ' ':
tmp += i
else:
tmp = int(tmp)
if counter == 0:
mas = [[(0) for id1 in range(2)] for id2 in range(tmp)]
svNumber = tmp
counter = tmp
elif flag:
mas[svNumber - counter][0] = tmp
flag = False
else:
mas[svNumber - counter][1] = tmp
flag = True
counter -= 1
tmp = ''
if counter == 0:
uzNumber = max(max(i) for i in mas) + 1
rez = [[(0) for id1 in range(uzNumber)] for id2 in range(uzNumber)]
for i in mas:
for j in mas:
if i == j[::-1]:
rez[i[0]][i[0]] = -1
rez[i[1]][i[1]] = -1
rez[i[0]][i[1]] = -1
rez[i[1]][i[0]] = -1
"""
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
"""
flag2 = True
while flag2:
flag2 = False
for i in mas:
for j in range(uzNumber):
for k in range(uzNumber):
if rez[j][k] == -1 and i[1] == j and rez[i[0]][k
] != -1:
rez[i[0]][k] = -1
flag2 = True
if rez[i[0]][i[0]] == -1 and i[0] == j and i[1
] == k and rez[j][k] != -1:
rez[j][k] = -1
flag2 = True
"""
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
"""
for k in range(uzNumber):
frontier = q.Queue()
frontier.put(k)
while not frontier.empty():
current = frontier.get()
for i in range(len(mas)):
if mas[i][0] == current:
if rez[k][mas[i][1]] != -1:
frontier.put(mas[i][1])
rez[k][mas[i][1]] += 1
print('matrix for city', cityCounter)
for i in rez:
for j in i:
print(j, end=' ')
print()
print()
cityCounter += 1
mas = []
<|reserved_special_token_1|>
#import os
import queue as q
#Считываем ввод
file = open('input.txt', 'r')
inp = ''
for i in file:
for j in i:
if (j != '\n'):
inp += j
else:
inp += ' '
inp += ' '
#print(inp)
file.close()
#Записываем все пути в двумерный массив
tmp = '' #Переменная для хранения текущего числа из ввода
svNumber = 0 #Число связей между узлами (число улиц в городе)
counter = 0 #Cчётчик улиц оставшихся для определения
cityCounter = 0 #Счётчик гродов
flag = True #Флаг отвечающий за отличии того что мы считываем (номер начального/конечного узла)
for i in inp:
if (i != ' '):
tmp += i
else:
tmp = int(tmp)
#print(tmp)
if (counter == 0): #Тот случай когда в tmp лежит кол-во связей
mas = [[0 for id1 in range(2)] for id2 in range(tmp)]
svNumber = tmp
counter = tmp
else:
if (flag): #Запись номера начального узла текущей связи
mas[svNumber - counter][0] = tmp
flag = False
else: #Запись номера конечного узла текущей связи
mas[svNumber - counter][1] = tmp
flag = True
counter -= 1
#print(mas)
tmp = ''
if (counter == 0):
#print(mas)
uzNumber = max(max(i) for i in mas) + 1 # Считаем колличество узлов
#print(uzNumber)
#Находим результат (двумерный массив кол-ва путей)
rez = [[0 for id1 in range(uzNumber)] for id2 in range(uzNumber)]
#Заранее заполняем результат минус единицами в нужных местах
for i in mas:
for j in mas:
if (i == j[::-1]):
rez[i[0]][i[0]] = -1
rez[i[1]][i[1]] = -1
rez[i[0]][i[1]] = -1
rez[i[1]][i[0]] = -1
'''
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
'''
flag2 = True #Флаг отвечающий за то что мы не закончили начальную обработку результата
while (flag2):
flag2 = False
for i in mas:
for j in range(uzNumber):
for k in range(uzNumber):
#print (j, k, i[0], i[1])
if (rez[j][k] == -1 and i[1] == j and rez[i[0]][k] != -1):
rez[i[0]][k] = -1
flag2 =True
#print('here')
if (rez[i[0]][i[0]] == -1 and i[0] == j and i[1] == k and rez[j][k] != -1):
rez[j][k] = -1
flag2 =True
#print('here2')
'''
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
'''
#Заполняем результат построчно с помощью алгоритма A*
for k in range(uzNumber):
#if (rez[k][0] != -1): #Проверка на то что из узла k нельзя попасть в цикл
frontier = q.Queue() #Создаём очередь типа FIFO для хранения текущих узлов
frontier.put(k) #Начинам с узла, номер которого соответствует номеру текущей строки в результате
while (not frontier.empty()):
current = frontier.get()
#print(current)
for i in range (len(mas)):
if (mas[i][0] == current):
if (rez[k][mas[i][1]] != -1):
frontier.put(mas[i][1]) #Добавляем в очередь соседние узлы
rez[k][mas[i][1]] += 1 #Считаем результат
#Выводим результат
print('matrix for city', cityCounter)
for i in rez:
for j in i:
print(j, end = ' ')
print()
print()
cityCounter += 1
mas = []
#os.system('PAUSE')
|
flexible
|
{
"blob_id": "bb847480e7e4508fbfb5e7873c4ed390943e2fcf",
"index": 3589,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in file:\n for j in i:\n if j != '\\n':\n inp += j\n else:\n inp += ' '\ninp += ' '\nfile.close()\n<mask token>\nfor i in inp:\n if i != ' ':\n tmp += i\n else:\n tmp = int(tmp)\n if counter == 0:\n mas = [[(0) for id1 in range(2)] for id2 in range(tmp)]\n svNumber = tmp\n counter = tmp\n elif flag:\n mas[svNumber - counter][0] = tmp\n flag = False\n else:\n mas[svNumber - counter][1] = tmp\n flag = True\n counter -= 1\n tmp = ''\n if counter == 0:\n uzNumber = max(max(i) for i in mas) + 1\n rez = [[(0) for id1 in range(uzNumber)] for id2 in range(uzNumber)]\n for i in mas:\n for j in mas:\n if i == j[::-1]:\n rez[i[0]][i[0]] = -1\n rez[i[1]][i[1]] = -1\n rez[i[0]][i[1]] = -1\n rez[i[1]][i[0]] = -1\n \"\"\"\n for i in rez:\n for j in i:\n print(j, end = ' ')\n print()\n print()\n \"\"\"\n flag2 = True\n while flag2:\n flag2 = False\n for i in mas:\n for j in range(uzNumber):\n for k in range(uzNumber):\n if rez[j][k] == -1 and i[1] == j and rez[i[0]][k\n ] != -1:\n rez[i[0]][k] = -1\n flag2 = True\n if rez[i[0]][i[0]] == -1 and i[0] == j and i[1\n ] == k and rez[j][k] != -1:\n rez[j][k] = -1\n flag2 = True\n \"\"\" \n for i in rez:\n for j in i:\n print(j, end = ' ')\n print()\n print()\n \"\"\"\n for k in range(uzNumber):\n frontier = q.Queue()\n frontier.put(k)\n while not frontier.empty():\n current = frontier.get()\n for i in range(len(mas)):\n if mas[i][0] == current:\n if rez[k][mas[i][1]] != -1:\n frontier.put(mas[i][1])\n rez[k][mas[i][1]] += 1\n print('matrix for city', cityCounter)\n for i in rez:\n for j in i:\n print(j, end=' ')\n print()\n print()\n cityCounter += 1\n mas = []\n",
"step-3": "<mask token>\nfile = open('input.txt', 'r')\ninp = ''\nfor i in file:\n for j in i:\n if j != '\\n':\n inp += j\n else:\n inp += ' '\ninp += ' '\nfile.close()\ntmp = ''\nsvNumber = 0\ncounter = 0\ncityCounter = 0\nflag = True\nfor i in inp:\n if i != ' ':\n tmp += i\n else:\n tmp = int(tmp)\n if counter == 0:\n mas = [[(0) for id1 in range(2)] for id2 in range(tmp)]\n svNumber = tmp\n counter = tmp\n elif flag:\n mas[svNumber - counter][0] = tmp\n flag = False\n else:\n mas[svNumber - counter][1] = tmp\n flag = True\n counter -= 1\n tmp = ''\n if counter == 0:\n uzNumber = max(max(i) for i in mas) + 1\n rez = [[(0) for id1 in range(uzNumber)] for id2 in range(uzNumber)]\n for i in mas:\n for j in mas:\n if i == j[::-1]:\n rez[i[0]][i[0]] = -1\n rez[i[1]][i[1]] = -1\n rez[i[0]][i[1]] = -1\n rez[i[1]][i[0]] = -1\n \"\"\"\n for i in rez:\n for j in i:\n print(j, end = ' ')\n print()\n print()\n \"\"\"\n flag2 = True\n while flag2:\n flag2 = False\n for i in mas:\n for j in range(uzNumber):\n for k in range(uzNumber):\n if rez[j][k] == -1 and i[1] == j and rez[i[0]][k\n ] != -1:\n rez[i[0]][k] = -1\n flag2 = True\n if rez[i[0]][i[0]] == -1 and i[0] == j and i[1\n ] == k and rez[j][k] != -1:\n rez[j][k] = -1\n flag2 = True\n \"\"\" \n for i in rez:\n for j in i:\n print(j, end = ' ')\n print()\n print()\n \"\"\"\n for k in range(uzNumber):\n frontier = q.Queue()\n frontier.put(k)\n while not frontier.empty():\n current = frontier.get()\n for i in range(len(mas)):\n if mas[i][0] == current:\n if rez[k][mas[i][1]] != -1:\n frontier.put(mas[i][1])\n rez[k][mas[i][1]] += 1\n print('matrix for city', cityCounter)\n for i in rez:\n for j in i:\n print(j, end=' ')\n print()\n print()\n cityCounter += 1\n mas = []\n",
"step-4": "import queue as q\nfile = open('input.txt', 'r')\ninp = ''\nfor i in file:\n for j in i:\n if j != '\\n':\n inp += j\n else:\n inp += ' '\ninp += ' '\nfile.close()\ntmp = ''\nsvNumber = 0\ncounter = 0\ncityCounter = 0\nflag = True\nfor i in inp:\n if i != ' ':\n tmp += i\n else:\n tmp = int(tmp)\n if counter == 0:\n mas = [[(0) for id1 in range(2)] for id2 in range(tmp)]\n svNumber = tmp\n counter = tmp\n elif flag:\n mas[svNumber - counter][0] = tmp\n flag = False\n else:\n mas[svNumber - counter][1] = tmp\n flag = True\n counter -= 1\n tmp = ''\n if counter == 0:\n uzNumber = max(max(i) for i in mas) + 1\n rez = [[(0) for id1 in range(uzNumber)] for id2 in range(uzNumber)]\n for i in mas:\n for j in mas:\n if i == j[::-1]:\n rez[i[0]][i[0]] = -1\n rez[i[1]][i[1]] = -1\n rez[i[0]][i[1]] = -1\n rez[i[1]][i[0]] = -1\n \"\"\"\n for i in rez:\n for j in i:\n print(j, end = ' ')\n print()\n print()\n \"\"\"\n flag2 = True\n while flag2:\n flag2 = False\n for i in mas:\n for j in range(uzNumber):\n for k in range(uzNumber):\n if rez[j][k] == -1 and i[1] == j and rez[i[0]][k\n ] != -1:\n rez[i[0]][k] = -1\n flag2 = True\n if rez[i[0]][i[0]] == -1 and i[0] == j and i[1\n ] == k and rez[j][k] != -1:\n rez[j][k] = -1\n flag2 = True\n \"\"\" \n for i in rez:\n for j in i:\n print(j, end = ' ')\n print()\n print()\n \"\"\"\n for k in range(uzNumber):\n frontier = q.Queue()\n frontier.put(k)\n while not frontier.empty():\n current = frontier.get()\n for i in range(len(mas)):\n if mas[i][0] == current:\n if rez[k][mas[i][1]] != -1:\n frontier.put(mas[i][1])\n rez[k][mas[i][1]] += 1\n print('matrix for city', cityCounter)\n for i in rez:\n for j in i:\n print(j, end=' ')\n print()\n print()\n cityCounter += 1\n mas = []\n",
"step-5": "#import os\nimport queue as q\n\n#Считываем ввод\nfile = open('input.txt', 'r')\n\ninp = ''\nfor i in file:\n for j in i:\n if (j != '\\n'):\n inp += j\n else:\n inp += ' '\ninp += ' '\n#print(inp)\n\nfile.close()\n\n#Записываем все пути в двумерный массив\ntmp = '' #Переменная для хранения текущего числа из ввода\nsvNumber = 0 #Число связей между узлами (число улиц в городе)\ncounter = 0 #Cчётчик улиц оставшихся для определения\ncityCounter = 0 #Счётчик гродов\nflag = True #Флаг отвечающий за отличии того что мы считываем (номер начального/конечного узла)\nfor i in inp:\n if (i != ' '):\n tmp += i\n else:\n tmp = int(tmp)\n #print(tmp)\n if (counter == 0): #Тот случай когда в tmp лежит кол-во связей\n mas = [[0 for id1 in range(2)] for id2 in range(tmp)]\n svNumber = tmp\n counter = tmp\n else:\n if (flag): #Запись номера начального узла текущей связи\n mas[svNumber - counter][0] = tmp\n flag = False\n else: #Запись номера конечного узла текущей связи\n mas[svNumber - counter][1] = tmp\n flag = True\n counter -= 1\n #print(mas)\n tmp = ''\n \n if (counter == 0):\n #print(mas)\n \n uzNumber = max(max(i) for i in mas) + 1 # Считаем колличество узлов\n #print(uzNumber)\n\n #Находим результат (двумерный массив кол-ва путей)\n rez = [[0 for id1 in range(uzNumber)] for id2 in range(uzNumber)]\n\n #Заранее заполняем результат минус единицами в нужных местах\n for i in mas:\n for j in mas:\n if (i == j[::-1]):\n rez[i[0]][i[0]] = -1\n rez[i[1]][i[1]] = -1\n rez[i[0]][i[1]] = -1\n rez[i[1]][i[0]] = -1\n '''\n for i in rez:\n for j in i:\n print(j, end = ' ')\n print()\n print()\n '''\n \n flag2 = True #Флаг отвечающий за то что мы не закончили начальную обработку результата\n while (flag2):\n flag2 = False\n for i in mas:\n for j in range(uzNumber):\n for k in range(uzNumber):\n #print (j, k, i[0], i[1])\n if (rez[j][k] == -1 and i[1] == j and rez[i[0]][k] != -1):\n rez[i[0]][k] = -1\n flag2 =True\n #print('here')\n \n if (rez[i[0]][i[0]] == -1 and i[0] == j and i[1] == k and rez[j][k] != -1):\n rez[j][k] = -1\n flag2 =True\n #print('here2')\n ''' \n for i in rez:\n for j in i:\n print(j, end = ' ')\n print()\n print()\n '''\n \n #Заполняем результат построчно с помощью алгоритма A*\n for k in range(uzNumber):\n #if (rez[k][0] != -1): #Проверка на то что из узла k нельзя попасть в цикл\n frontier = q.Queue() #Создаём очередь типа FIFO для хранения текущих узлов\n frontier.put(k) #Начинам с узла, номер которого соответствует номеру текущей строки в результате\n\n while (not frontier.empty()):\n current = frontier.get()\n #print(current)\n \n for i in range (len(mas)):\n if (mas[i][0] == current):\n if (rez[k][mas[i][1]] != -1):\n frontier.put(mas[i][1]) #Добавляем в очередь соседние узлы\n rez[k][mas[i][1]] += 1 #Считаем результат\n\n #Выводим результат \n print('matrix for city', cityCounter)\n for i in rez:\n for j in i:\n print(j, end = ' ')\n print()\n print()\n\n cityCounter += 1\n mas = []\n \n#os.system('PAUSE')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):
cell = table.add_row().cells
cell[0].text = str(word)
cell[1].text = str(frequency)
doc.save('results.docx')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
plain_text = docx2txt.process('kashmiri.docx')
list_of_words = plain_text.split()
counter_list_of_words = Counter(list_of_words)
elements = counter_list_of_words.items()
doc = Document()
table = doc.add_table(rows=1, cols=2)
cell1 = table.cell(0, 0)
cell1.text = 'Word'
cell2 = table.cell(0, 1)
cell2.text = 'Frequency'
for word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):
cell = table.add_row().cells
cell[0].text = str(word)
cell[1].text = str(frequency)
doc.save('results.docx')
<|reserved_special_token_1|>
from collections import Counter
from docx import Document
import docx2txt
plain_text = docx2txt.process('kashmiri.docx')
list_of_words = plain_text.split()
counter_list_of_words = Counter(list_of_words)
elements = counter_list_of_words.items()
doc = Document()
table = doc.add_table(rows=1, cols=2)
cell1 = table.cell(0, 0)
cell1.text = 'Word'
cell2 = table.cell(0, 1)
cell2.text = 'Frequency'
for word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):
cell = table.add_row().cells
cell[0].text = str(word)
cell[1].text = str(frequency)
doc.save('results.docx')
<|reserved_special_token_1|>
from collections import Counter
from docx import Document
import docx2txt
plain_text = docx2txt.process("kashmiri.docx")
list_of_words = plain_text.split()
#print(Counter(list_of_words))
counter_list_of_words = Counter(list_of_words)
elements = counter_list_of_words.items()
# for a, b in sorted(elements, key=lambda x: x[1], reverse=True):
# print(a)
# print(b)
doc = Document()
# Create and Name Table Heading
table = doc.add_table(rows=1, cols=2)
cell1 = table.cell(0, 0)
cell1.text = 'Word'
cell2 = table.cell(0, 1)
cell2.text = 'Frequency'
#Iterate over collection elements and append to table craeted
for word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):
cell = table.add_row().cells
cell[0].text = str(word)
cell[1].text = str(frequency)
doc.save("results.docx")
|
flexible
|
{
"blob_id": "9ad36f157abae849a1550cb96e650746d57f491d",
"index": 9732,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):\n cell = table.add_row().cells\n cell[0].text = str(word)\n cell[1].text = str(frequency)\ndoc.save('results.docx')\n",
"step-3": "<mask token>\nplain_text = docx2txt.process('kashmiri.docx')\nlist_of_words = plain_text.split()\ncounter_list_of_words = Counter(list_of_words)\nelements = counter_list_of_words.items()\ndoc = Document()\ntable = doc.add_table(rows=1, cols=2)\ncell1 = table.cell(0, 0)\ncell1.text = 'Word'\ncell2 = table.cell(0, 1)\ncell2.text = 'Frequency'\nfor word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):\n cell = table.add_row().cells\n cell[0].text = str(word)\n cell[1].text = str(frequency)\ndoc.save('results.docx')\n",
"step-4": "from collections import Counter\nfrom docx import Document\nimport docx2txt\nplain_text = docx2txt.process('kashmiri.docx')\nlist_of_words = plain_text.split()\ncounter_list_of_words = Counter(list_of_words)\nelements = counter_list_of_words.items()\ndoc = Document()\ntable = doc.add_table(rows=1, cols=2)\ncell1 = table.cell(0, 0)\ncell1.text = 'Word'\ncell2 = table.cell(0, 1)\ncell2.text = 'Frequency'\nfor word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):\n cell = table.add_row().cells\n cell[0].text = str(word)\n cell[1].text = str(frequency)\ndoc.save('results.docx')\n",
"step-5": "from collections import Counter\nfrom docx import Document\nimport docx2txt\n\nplain_text = docx2txt.process(\"kashmiri.docx\")\nlist_of_words = plain_text.split()\n#print(Counter(list_of_words))\ncounter_list_of_words = Counter(list_of_words)\nelements = counter_list_of_words.items()\n# for a, b in sorted(elements, key=lambda x: x[1], reverse=True):\n# print(a)\n# print(b)\n\ndoc = Document()\n# Create and Name Table Heading\ntable = doc.add_table(rows=1, cols=2)\ncell1 = table.cell(0, 0)\ncell1.text = 'Word'\ncell2 = table.cell(0, 1)\ncell2.text = 'Frequency'\n\n#Iterate over collection elements and append to table craeted\nfor word, frequency in sorted(elements, key=lambda x: x[1], reverse=True):\n cell = table.add_row().cells\n cell[0].text = str(word)\n cell[1].text = str(frequency)\ndoc.save(\"results.docx\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api_view(['GET'])
def get_status(request):
if request.method == 'GET':
return HttpResponse(content='Service is OK!')
<|reserved_special_token_1|>
from django.http import HttpResponse
from rest_framework.decorators import api_view
@api_view(['GET'])
def get_status(request):
if request.method == 'GET':
return HttpResponse(content='Service is OK!')
|
flexible
|
{
"blob_id": "f021940c16b7ed7fdf1088f2137d3ef724719c80",
"index": 1726,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@api_view(['GET'])\ndef get_status(request):\n if request.method == 'GET':\n return HttpResponse(content='Service is OK!')\n",
"step-3": "from django.http import HttpResponse\nfrom rest_framework.decorators import api_view\n\n\n@api_view(['GET'])\ndef get_status(request):\n if request.method == 'GET':\n return HttpResponse(content='Service is OK!')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.conf.urls import url, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
app_name = 'stock_main'
urlpatterns = [
url(r'^$', views.Stock_main.as_view(), name='stock_main'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
normal
|
{
"blob_id": "16302f23edf16e201c3f3e9800dc4a9290ddc29e",
"index": 7038,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-3": "<mask token>\napp_name = 'stock_main'\nurlpatterns = [url('^$', views.Stock_main.as_view(), name='stock_main')]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-4": "from django.conf.urls import url, include\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\napp_name = 'stock_main'\nurlpatterns = [url('^$', views.Stock_main.as_view(), name='stock_main')]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-5": "from django.conf.urls import url, include\nfrom . import views\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\napp_name = 'stock_main'\n\nurlpatterns = [\n url(r'^$', views.Stock_main.as_view(), name='stock_main'),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class LocNet:
def __init__(self, scope, buttom_layer):
self.scope = scope
with tf.variable_scope(scope) as scope:
self.build_graph(buttom_layer)
self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),
name='gt_loc')
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LocNet:
def __init__(self, scope, buttom_layer):
self.scope = scope
with tf.variable_scope(scope) as scope:
self.build_graph(buttom_layer)
self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),
name='gt_loc')
<|reserved_special_token_0|>
def loss(self):
with tf.name_scope(self.scope) as scope:
beta = tf.constant(0.05, name='beta')
loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc,
self.logit))
loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.
kernel_weights]
loss_wd = beta * tf.add_n(loss_wd)
total_loss = loss_rms + loss_wd
return total_loss
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class LocNet:
def __init__(self, scope, buttom_layer):
self.scope = scope
with tf.variable_scope(scope) as scope:
self.build_graph(buttom_layer)
self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),
name='gt_loc')
def build_graph(self, buttom_layer):
self.variables = []
self.kernel_weights = []
pool = tf.nn.max_pool(buttom_layer, ksize=[1, 2, 2, 1], strides=[1,
2, 2, 1], padding='SAME', name='pool')
drop = tf.nn.dropout(pool, 0.3)
with tf.name_scope('fc1') as scope:
shape = int(np.prod(drop.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 3000], dtype=tf.
float32, stddev=0.1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[3000], dtype=tf.
float32), trainable=True, name='biases')
pool_flat = tf.reshape(drop, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool_flat, fc1w), fc1b)
fc1 = tf.nn.relu(fc1l)
self.kernel_weights += [fc1w]
self.variables += [fc1w, fc1b]
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([3000, 4], dtype=tf.
float32, stddev=0.1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[4], dtype=tf.float32
), trainable=True, name='biases')
self.logit = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)
self.kernel_weights += [fc2w]
self.variables += [fc2w, fc2b]
def loss(self):
with tf.name_scope(self.scope) as scope:
beta = tf.constant(0.05, name='beta')
loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc,
self.logit))
loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.
kernel_weights]
loss_wd = beta * tf.add_n(loss_wd)
total_loss = loss_rms + loss_wd
return total_loss
<|reserved_special_token_1|>
import numpy as np
import tensorflow as tf
class LocNet:
def __init__(self, scope, buttom_layer):
self.scope = scope
with tf.variable_scope(scope) as scope:
self.build_graph(buttom_layer)
self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),
name='gt_loc')
def build_graph(self, buttom_layer):
self.variables = []
self.kernel_weights = []
pool = tf.nn.max_pool(buttom_layer, ksize=[1, 2, 2, 1], strides=[1,
2, 2, 1], padding='SAME', name='pool')
drop = tf.nn.dropout(pool, 0.3)
with tf.name_scope('fc1') as scope:
shape = int(np.prod(drop.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 3000], dtype=tf.
float32, stddev=0.1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[3000], dtype=tf.
float32), trainable=True, name='biases')
pool_flat = tf.reshape(drop, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool_flat, fc1w), fc1b)
fc1 = tf.nn.relu(fc1l)
self.kernel_weights += [fc1w]
self.variables += [fc1w, fc1b]
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([3000, 4], dtype=tf.
float32, stddev=0.1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[4], dtype=tf.float32
), trainable=True, name='biases')
self.logit = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)
self.kernel_weights += [fc2w]
self.variables += [fc2w, fc2b]
def loss(self):
with tf.name_scope(self.scope) as scope:
beta = tf.constant(0.05, name='beta')
loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc,
self.logit))
loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.
kernel_weights]
loss_wd = beta * tf.add_n(loss_wd)
total_loss = loss_rms + loss_wd
return total_loss
<|reserved_special_token_1|>
import numpy as np
import tensorflow as tf
class LocNet:
def __init__(self, scope, buttom_layer):
self.scope = scope
with tf.variable_scope(scope) as scope:
self.build_graph(buttom_layer)
self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None,4),name='gt_loc')
def build_graph(self, buttom_layer):
self.variables = []
self.kernel_weights = []
pool = tf.nn.max_pool(buttom_layer,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME',
name='pool')
drop = tf.nn.dropout(pool, 0.3)
with tf.name_scope('fc1') as scope:
shape = int(np.prod(drop.get_shape()[1:]))
fc1w = tf.Variable(tf.truncated_normal([shape, 3000],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc1b = tf.Variable(tf.constant(1.0, shape=[3000], dtype=tf.float32),
trainable=True, name='biases')
pool_flat = tf.reshape(drop, [-1, shape])
fc1l = tf.nn.bias_add(tf.matmul(pool_flat, fc1w), fc1b)
fc1 = tf.nn.relu(fc1l)
self.kernel_weights += [fc1w]
self.variables += [fc1w, fc1b]
with tf.name_scope('fc2') as scope:
fc2w = tf.Variable(tf.truncated_normal([3000, 4],
dtype=tf.float32,
stddev=1e-1), name='weights')
fc2b = tf.Variable(tf.constant(1.0, shape=[4], dtype=tf.float32),
trainable=True, name='biases')
self.logit = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)
self.kernel_weights += [fc2w]
self.variables += [fc2w, fc2b]
def loss(self):
with tf.name_scope(self.scope) as scope:
beta = tf.constant(0.05, name='beta')
loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc, self.logit))
loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.kernel_weights]
loss_wd = beta * tf.add_n(loss_wd)
total_loss = loss_rms + loss_wd
return total_loss
|
flexible
|
{
"blob_id": "dd4dc1c4a0dc47711d1d0512ef3f6b7908735766",
"index": 3149,
"step-1": "<mask token>\n\n\nclass LocNet:\n\n def __init__(self, scope, buttom_layer):\n self.scope = scope\n with tf.variable_scope(scope) as scope:\n self.build_graph(buttom_layer)\n self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),\n name='gt_loc')\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass LocNet:\n\n def __init__(self, scope, buttom_layer):\n self.scope = scope\n with tf.variable_scope(scope) as scope:\n self.build_graph(buttom_layer)\n self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),\n name='gt_loc')\n <mask token>\n\n def loss(self):\n with tf.name_scope(self.scope) as scope:\n beta = tf.constant(0.05, name='beta')\n loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc,\n self.logit))\n loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.\n kernel_weights]\n loss_wd = beta * tf.add_n(loss_wd)\n total_loss = loss_rms + loss_wd\n return total_loss\n",
"step-3": "<mask token>\n\n\nclass LocNet:\n\n def __init__(self, scope, buttom_layer):\n self.scope = scope\n with tf.variable_scope(scope) as scope:\n self.build_graph(buttom_layer)\n self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),\n name='gt_loc')\n\n def build_graph(self, buttom_layer):\n self.variables = []\n self.kernel_weights = []\n pool = tf.nn.max_pool(buttom_layer, ksize=[1, 2, 2, 1], strides=[1,\n 2, 2, 1], padding='SAME', name='pool')\n drop = tf.nn.dropout(pool, 0.3)\n with tf.name_scope('fc1') as scope:\n shape = int(np.prod(drop.get_shape()[1:]))\n fc1w = tf.Variable(tf.truncated_normal([shape, 3000], dtype=tf.\n float32, stddev=0.1), name='weights')\n fc1b = tf.Variable(tf.constant(1.0, shape=[3000], dtype=tf.\n float32), trainable=True, name='biases')\n pool_flat = tf.reshape(drop, [-1, shape])\n fc1l = tf.nn.bias_add(tf.matmul(pool_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(fc1l)\n self.kernel_weights += [fc1w]\n self.variables += [fc1w, fc1b]\n with tf.name_scope('fc2') as scope:\n fc2w = tf.Variable(tf.truncated_normal([3000, 4], dtype=tf.\n float32, stddev=0.1), name='weights')\n fc2b = tf.Variable(tf.constant(1.0, shape=[4], dtype=tf.float32\n ), trainable=True, name='biases')\n self.logit = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)\n self.kernel_weights += [fc2w]\n self.variables += [fc2w, fc2b]\n\n def loss(self):\n with tf.name_scope(self.scope) as scope:\n beta = tf.constant(0.05, name='beta')\n loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc,\n self.logit))\n loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.\n kernel_weights]\n loss_wd = beta * tf.add_n(loss_wd)\n total_loss = loss_rms + loss_wd\n return total_loss\n",
"step-4": "import numpy as np\nimport tensorflow as tf\n\n\nclass LocNet:\n\n def __init__(self, scope, buttom_layer):\n self.scope = scope\n with tf.variable_scope(scope) as scope:\n self.build_graph(buttom_layer)\n self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None, 4),\n name='gt_loc')\n\n def build_graph(self, buttom_layer):\n self.variables = []\n self.kernel_weights = []\n pool = tf.nn.max_pool(buttom_layer, ksize=[1, 2, 2, 1], strides=[1,\n 2, 2, 1], padding='SAME', name='pool')\n drop = tf.nn.dropout(pool, 0.3)\n with tf.name_scope('fc1') as scope:\n shape = int(np.prod(drop.get_shape()[1:]))\n fc1w = tf.Variable(tf.truncated_normal([shape, 3000], dtype=tf.\n float32, stddev=0.1), name='weights')\n fc1b = tf.Variable(tf.constant(1.0, shape=[3000], dtype=tf.\n float32), trainable=True, name='biases')\n pool_flat = tf.reshape(drop, [-1, shape])\n fc1l = tf.nn.bias_add(tf.matmul(pool_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(fc1l)\n self.kernel_weights += [fc1w]\n self.variables += [fc1w, fc1b]\n with tf.name_scope('fc2') as scope:\n fc2w = tf.Variable(tf.truncated_normal([3000, 4], dtype=tf.\n float32, stddev=0.1), name='weights')\n fc2b = tf.Variable(tf.constant(1.0, shape=[4], dtype=tf.float32\n ), trainable=True, name='biases')\n self.logit = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)\n self.kernel_weights += [fc2w]\n self.variables += [fc2w, fc2b]\n\n def loss(self):\n with tf.name_scope(self.scope) as scope:\n beta = tf.constant(0.05, name='beta')\n loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc,\n self.logit))\n loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.\n kernel_weights]\n loss_wd = beta * tf.add_n(loss_wd)\n total_loss = loss_rms + loss_wd\n return total_loss\n",
"step-5": "\n\nimport numpy as np \nimport tensorflow as tf\n\n\nclass LocNet: \n def __init__(self, scope, buttom_layer):\n self.scope = scope \n with tf.variable_scope(scope) as scope:\n self.build_graph(buttom_layer)\n self.gt_loc = tf.placeholder(dtype=tf.float32, shape=(None,4),name='gt_loc')\n \n def build_graph(self, buttom_layer):\n self.variables = []\n self.kernel_weights = []\n pool = tf.nn.max_pool(buttom_layer,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME',\n name='pool')\n \n drop = tf.nn.dropout(pool, 0.3)\n with tf.name_scope('fc1') as scope:\n shape = int(np.prod(drop.get_shape()[1:]))\n fc1w = tf.Variable(tf.truncated_normal([shape, 3000],\n dtype=tf.float32,\n stddev=1e-1), name='weights')\n fc1b = tf.Variable(tf.constant(1.0, shape=[3000], dtype=tf.float32),\n trainable=True, name='biases')\n pool_flat = tf.reshape(drop, [-1, shape])\n fc1l = tf.nn.bias_add(tf.matmul(pool_flat, fc1w), fc1b)\n fc1 = tf.nn.relu(fc1l)\n self.kernel_weights += [fc1w]\n self.variables += [fc1w, fc1b]\n \n\n with tf.name_scope('fc2') as scope:\n fc2w = tf.Variable(tf.truncated_normal([3000, 4],\n dtype=tf.float32,\n stddev=1e-1), name='weights')\n fc2b = tf.Variable(tf.constant(1.0, shape=[4], dtype=tf.float32),\n trainable=True, name='biases')\n self.logit = tf.nn.bias_add(tf.matmul(fc1, fc2w), fc2b)\n self.kernel_weights += [fc2w]\n self.variables += [fc2w, fc2b]\n \n def loss(self):\n with tf.name_scope(self.scope) as scope:\n beta = tf.constant(0.05, name='beta')\n loss_rms = tf.reduce_max(tf.squared_difference(self.gt_loc, self.logit))\n loss_wd = [tf.reduce_mean(tf.square(w)) for w in self.kernel_weights]\n loss_wd = beta * tf.add_n(loss_wd)\n total_loss = loss_rms + loss_wd\n return total_loss\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.db import models
from django.template.defaultfilters import slugify
# Create your models here.
class SlugStampMixin(object):
'''
An Worflow is an ordered collection of a Protocols
'''
def save(self, *args, **kwargs):
super(SlugStampMixin, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name.
new_slug = self.generate_slug()
if not new_slug == self.slug: # Triggered when its a clone method
self.slug = new_slug
super(SlugStampMixin, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name.
def generate_slug(self):
slug = slugify(self.name)
if self.pk:
return "%d-%s" % (self.pk, slug)
else:
return slug
|
normal
|
{
"blob_id": "c30f11e9bac54771df5198971c312624f68d0a33",
"index": 4259,
"step-1": "<mask token>\n\n\nclass SlugStampMixin(object):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SlugStampMixin(object):\n <mask token>\n\n def save(self, *args, **kwargs):\n super(SlugStampMixin, self).save(*args, **kwargs)\n new_slug = self.generate_slug()\n if not new_slug == self.slug:\n self.slug = new_slug\n super(SlugStampMixin, self).save(*args, **kwargs)\n\n def generate_slug(self):\n slug = slugify(self.name)\n if self.pk:\n return '%d-%s' % (self.pk, slug)\n else:\n return slug\n",
"step-3": "<mask token>\n\n\nclass SlugStampMixin(object):\n \"\"\"\n An Worflow is an ordered collection of a Protocols\n \"\"\"\n\n def save(self, *args, **kwargs):\n super(SlugStampMixin, self).save(*args, **kwargs)\n new_slug = self.generate_slug()\n if not new_slug == self.slug:\n self.slug = new_slug\n super(SlugStampMixin, self).save(*args, **kwargs)\n\n def generate_slug(self):\n slug = slugify(self.name)\n if self.pk:\n return '%d-%s' % (self.pk, slug)\n else:\n return slug\n",
"step-4": "from django.db import models\nfrom django.template.defaultfilters import slugify\n\n\nclass SlugStampMixin(object):\n \"\"\"\n An Worflow is an ordered collection of a Protocols\n \"\"\"\n\n def save(self, *args, **kwargs):\n super(SlugStampMixin, self).save(*args, **kwargs)\n new_slug = self.generate_slug()\n if not new_slug == self.slug:\n self.slug = new_slug\n super(SlugStampMixin, self).save(*args, **kwargs)\n\n def generate_slug(self):\n slug = slugify(self.name)\n if self.pk:\n return '%d-%s' % (self.pk, slug)\n else:\n return slug\n",
"step-5": "from django.db import models\nfrom django.template.defaultfilters import slugify\n\n# Create your models here.\n\nclass SlugStampMixin(object):\n '''\n An Worflow is an ordered collection of a Protocols\n '''\n\n def save(self, *args, **kwargs):\n super(SlugStampMixin, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name.\n \n new_slug = self.generate_slug()\n\n if not new_slug == self.slug: # Triggered when its a clone method\n self.slug = new_slug\n super(SlugStampMixin, self).save(*args, **kwargs) # Method may need to be changed to handle giving it a new name.\n\n\n def generate_slug(self):\n slug = slugify(self.name)\n\n if self.pk:\n return \"%d-%s\" % (self.pk, slug)\n else:\n return slug\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def sendmail(subject, template, to, context):
template_str = 'app/' + template + '.html'
html_msg = render_to_string(template_str, {'data': context})
plain_msg = strip_tags(html_msg)
from_email = 'ridham.shah.aditi@gmail.com'
send_mail(subject, plain_msg, from_email, to, html_message=html_msg)
<|reserved_special_token_1|>
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from datetime import datetime, timedelta
def sendmail(subject, template, to, context):
template_str = 'app/' + template + '.html'
html_msg = render_to_string(template_str, {'data': context})
plain_msg = strip_tags(html_msg)
from_email = 'ridham.shah.aditi@gmail.com'
send_mail(subject, plain_msg, from_email, to, html_message=html_msg)
|
flexible
|
{
"blob_id": "0349a8a4841b024afd77d20ae18810645fad41cd",
"index": 4883,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef sendmail(subject, template, to, context):\n template_str = 'app/' + template + '.html'\n html_msg = render_to_string(template_str, {'data': context})\n plain_msg = strip_tags(html_msg)\n from_email = 'ridham.shah.aditi@gmail.com'\n send_mail(subject, plain_msg, from_email, to, html_message=html_msg)\n",
"step-3": "from django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\nfrom datetime import datetime, timedelta\n\n\ndef sendmail(subject, template, to, context):\n template_str = 'app/' + template + '.html'\n html_msg = render_to_string(template_str, {'data': context})\n plain_msg = strip_tags(html_msg)\n from_email = 'ridham.shah.aditi@gmail.com'\n send_mail(subject, plain_msg, from_email, to, html_message=html_msg)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution2(n):
result_list = [1, 2]
for i in range(3, n + 1):
max_mult = max(list(map(lambda x: result_list[x] * (i - x - 1),
range(i - 1))))
result_list.append(max_mult)
print(result_list, i)
return max_mult
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution1(n):
if n <= 4:
return n
else:
return max(map(lambda x: solution1(x) * solution1(n - x), range(1,
n // 2 + 1)))
def solution2(n):
result_list = [1, 2]
for i in range(3, n + 1):
max_mult = max(list(map(lambda x: result_list[x] * (i - x - 1),
range(i - 1))))
result_list.append(max_mult)
print(result_list, i)
return max_mult
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution1(n):
if n <= 4:
return n
else:
return max(map(lambda x: solution1(x) * solution1(n - x), range(1,
n // 2 + 1)))
def solution2(n):
result_list = [1, 2]
for i in range(3, n + 1):
max_mult = max(list(map(lambda x: result_list[x] * (i - x - 1),
range(i - 1))))
result_list.append(max_mult)
print(result_list, i)
return max_mult
if __name__ == '__main__':
result = solution1(8)
print(result)
result = solution2(8)
print(result)
<|reserved_special_token_1|>
'''
Seperate a number into several, maximize their product
'''
# recursive
def solution1(n):
if n <= 4:
return n
else:
return max(map(lambda x: solution1(x)*solution1(n-x), range(1, n//2 + 1)))
# dp
def solution2(n):
result_list = [1,2]
for i in range(3, n+1):
max_mult = max(list(map(lambda x: result_list[x] * (i-x-1), range(i-1))))
result_list.append(max_mult)
print(result_list, i)
return max_mult
if __name__ == '__main__':
result = solution1(8)
print(result)
result = solution2(8)
print(result)
|
flexible
|
{
"blob_id": "76db5955b29696ca03ab22ef14ac018e0618e9e3",
"index": 2729,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution2(n):\n result_list = [1, 2]\n for i in range(3, n + 1):\n max_mult = max(list(map(lambda x: result_list[x] * (i - x - 1),\n range(i - 1))))\n result_list.append(max_mult)\n print(result_list, i)\n return max_mult\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef solution1(n):\n if n <= 4:\n return n\n else:\n return max(map(lambda x: solution1(x) * solution1(n - x), range(1, \n n // 2 + 1)))\n\n\ndef solution2(n):\n result_list = [1, 2]\n for i in range(3, n + 1):\n max_mult = max(list(map(lambda x: result_list[x] * (i - x - 1),\n range(i - 1))))\n result_list.append(max_mult)\n print(result_list, i)\n return max_mult\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef solution1(n):\n if n <= 4:\n return n\n else:\n return max(map(lambda x: solution1(x) * solution1(n - x), range(1, \n n // 2 + 1)))\n\n\ndef solution2(n):\n result_list = [1, 2]\n for i in range(3, n + 1):\n max_mult = max(list(map(lambda x: result_list[x] * (i - x - 1),\n range(i - 1))))\n result_list.append(max_mult)\n print(result_list, i)\n return max_mult\n\n\nif __name__ == '__main__':\n result = solution1(8)\n print(result)\n result = solution2(8)\n print(result)\n",
"step-5": "'''\nSeperate a number into several, maximize their product\n'''\n\n# recursive\ndef solution1(n):\n if n <= 4:\n return n\n else:\n return max(map(lambda x: solution1(x)*solution1(n-x), range(1, n//2 + 1)))\n\n# dp\ndef solution2(n):\n result_list = [1,2]\n\n for i in range(3, n+1):\n max_mult = max(list(map(lambda x: result_list[x] * (i-x-1), range(i-1))))\n result_list.append(max_mult)\n print(result_list, i)\n\n return max_mult\n\nif __name__ == '__main__':\n result = solution1(8)\n print(result)\n result = solution2(8)\n print(result)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Any object containing execute(self) method is considered to be IDE App
# this is Duck typing concept
class PyCharm:
def execute(self):
print("pycharm ide runnig")
class MyIde:
def execute(self):
print("MyIde running")
class Laptop:
def code(self,ide):
ide.execute()
ide=MyIde()
obj=Laptop()
obj.code(ide)
|
normal
|
{
"blob_id": "9ab3dd87f17ac75a3831e9ec1f0746ad81fad70d",
"index": 501,
"step-1": "<mask token>\n\n\nclass MyIde:\n <mask token>\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<mask token>\n",
"step-2": "class PyCharm:\n <mask token>\n\n\nclass MyIde:\n\n def execute(self):\n print('MyIde running')\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<mask token>\n",
"step-3": "class PyCharm:\n\n def execute(self):\n print('pycharm ide runnig')\n\n\nclass MyIde:\n\n def execute(self):\n print('MyIde running')\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<mask token>\nobj.code(ide)\n",
"step-4": "class PyCharm:\n\n def execute(self):\n print('pycharm ide runnig')\n\n\nclass MyIde:\n\n def execute(self):\n print('MyIde running')\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\nide = MyIde()\nobj = Laptop()\nobj.code(ide)\n",
"step-5": "\r\n# Any object containing execute(self) method is considered to be IDE App\r\n# this is Duck typing concept\r\n\r\nclass PyCharm:\r\n def execute(self):\r\n print(\"pycharm ide runnig\")\r\n\r\nclass MyIde:\r\n def execute(self):\r\n print(\"MyIde running\")\r\n\r\nclass Laptop:\r\n\r\n def code(self,ide):\r\n ide.execute()\r\n\r\nide=MyIde()\r\n\r\nobj=Laptop()\r\n\r\nobj.code(ide)\r\n",
"step-ids": [
3,
5,
7,
8,
9
]
}
|
[
3,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[
'GET'])
def get_all_places(city_id):
""" gets all places in a city """
city = storage.get('City', city_id)
if not city:
abort(404)
return jsonify([place.to_dict() for place in city.places]), 200
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])
def get_place(place_id):
"""Gets a place by place id"""
place = storage.get('Place', place_id)
if not place:
abort(404)
return jsonify(place.to_dict()), 200
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']
)
def delete_place(place_id):
""" deletes places"""
place = storage.get('Place', place_id)
if not place:
abort(404)
storage.delete(place)
storage.save()
return jsonify({}), 200
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[
'GET'])
def get_all_places(city_id):
""" gets all places in a city """
city = storage.get('City', city_id)
if not city:
abort(404)
return jsonify([place.to_dict() for place in city.places]), 200
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])
def get_place(place_id):
"""Gets a place by place id"""
place = storage.get('Place', place_id)
if not place:
abort(404)
return jsonify(place.to_dict()), 200
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']
)
def delete_place(place_id):
""" deletes places"""
place = storage.get('Place', place_id)
if not place:
abort(404)
storage.delete(place)
storage.save()
return jsonify({}), 200
<|reserved_special_token_0|>
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])
def update_place(place_id):
""" updates place """
params = request.get_json()
if not params:
abort(400, 'Not a JSON')
place = storage.get('Place', place_id)
if not place:
abort(404)
for k, v in params.items():
if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:
setattr(place, k, v)
storage.save()
return jsonify(place.to_dict()), 200
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[
'GET'])
def get_all_places(city_id):
""" gets all places in a city """
city = storage.get('City', city_id)
if not city:
abort(404)
return jsonify([place.to_dict() for place in city.places]), 200
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])
def get_place(place_id):
"""Gets a place by place id"""
place = storage.get('Place', place_id)
if not place:
abort(404)
return jsonify(place.to_dict()), 200
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']
)
def delete_place(place_id):
""" deletes places"""
place = storage.get('Place', place_id)
if not place:
abort(404)
storage.delete(place)
storage.save()
return jsonify({}), 200
@app_views.route('/cities/<city_id>/places', strict_slashes=False, methods=
['POST'])
def post_place(city_id):
"""posts a new place to city"""
kwargs = request.get_json()
if not kwargs:
abort(400, 'Not a JSON')
elif 'name' not in kwargs:
abort(400, 'Missing name')
elif 'user_id' not in kwargs:
abort(400, 'Missing user_id')
else:
city = storage.get('City', city_id)
user = storage.get('User', kwargs['user_id'])
if not city:
abort(404)
if not user:
abort(404)
kwargs['city_id'] = city_id
kwargs['user_id'] = user.id
new_place = Place(**kwargs)
storage.save()
return jsonify(new_place.to_dict()), 201
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])
def update_place(place_id):
""" updates place """
params = request.get_json()
if not params:
abort(400, 'Not a JSON')
place = storage.get('Place', place_id)
if not place:
abort(404)
for k, v in params.items():
if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:
setattr(place, k, v)
storage.save()
return jsonify(place.to_dict()), 200
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from flask import jsonify, request, Response, abort
from api.v1.views import app_views
from models import storage
from models.place import Place
@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[
'GET'])
def get_all_places(city_id):
""" gets all places in a city """
city = storage.get('City', city_id)
if not city:
abort(404)
return jsonify([place.to_dict() for place in city.places]), 200
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])
def get_place(place_id):
"""Gets a place by place id"""
place = storage.get('Place', place_id)
if not place:
abort(404)
return jsonify(place.to_dict()), 200
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']
)
def delete_place(place_id):
""" deletes places"""
place = storage.get('Place', place_id)
if not place:
abort(404)
storage.delete(place)
storage.save()
return jsonify({}), 200
@app_views.route('/cities/<city_id>/places', strict_slashes=False, methods=
['POST'])
def post_place(city_id):
"""posts a new place to city"""
kwargs = request.get_json()
if not kwargs:
abort(400, 'Not a JSON')
elif 'name' not in kwargs:
abort(400, 'Missing name')
elif 'user_id' not in kwargs:
abort(400, 'Missing user_id')
else:
city = storage.get('City', city_id)
user = storage.get('User', kwargs['user_id'])
if not city:
abort(404)
if not user:
abort(404)
kwargs['city_id'] = city_id
kwargs['user_id'] = user.id
new_place = Place(**kwargs)
storage.save()
return jsonify(new_place.to_dict()), 201
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])
def update_place(place_id):
""" updates place """
params = request.get_json()
if not params:
abort(400, 'Not a JSON')
place = storage.get('Place', place_id)
if not place:
abort(404)
for k, v in params.items():
if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:
setattr(place, k, v)
storage.save()
return jsonify(place.to_dict()), 200
<|reserved_special_token_1|>
#!/usr/bin/python3
"Places module"
from flask import jsonify, request, Response, abort
from api.v1.views import app_views
from models import storage
from models.place import Place
@app_views.route('cities/<city_id>/places', strict_slashes=False,
methods=['GET'])
def get_all_places(city_id):
''' gets all places in a city '''
city = storage.get("City", city_id)
if not city:
abort(404)
return jsonify([place.to_dict() for place in city.places]), 200
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])
def get_place(place_id):
"Gets a place by place id"
place = storage.get("Place", place_id)
if not place:
abort(404)
return jsonify(place.to_dict()), 200
@app_views.route('/places/<place_id>', strict_slashes=False,
methods=['DELETE'])
def delete_place(place_id):
''' deletes places'''
place = storage.get("Place", place_id)
if not place:
abort(404)
storage.delete(place)
storage.save()
return jsonify({}), 200
@app_views.route('/cities/<city_id>/places', strict_slashes=False,
methods=['POST'])
def post_place(city_id):
'''posts a new place to city'''
kwargs = request.get_json()
if not kwargs:
abort(400, 'Not a JSON')
elif 'name' not in kwargs:
abort(400, 'Missing name')
elif 'user_id' not in kwargs:
abort(400, 'Missing user_id')
else:
city = storage.get("City", city_id)
user = storage.get("User", kwargs['user_id'])
if not city:
abort(404)
if not user:
abort(404)
# overwrites or adds w/ valid state_id in case they provide in post
kwargs['city_id'] = city_id
kwargs['user_id'] = user.id
new_place = Place(**kwargs)
storage.save()
return jsonify(new_place.to_dict()), 201
@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])
def update_place(place_id):
''' updates place '''
params = request.get_json()
if not params:
abort(400, 'Not a JSON')
place = storage.get('Place', place_id)
if not place:
abort(404)
for k, v in params.items():
if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:
setattr(place, k, v)
storage.save()
return jsonify(place.to_dict()), 200
|
flexible
|
{
"blob_id": "d67a2eca4e2fde443b99f5133c2657cdf4ac00de",
"index": 4173,
"step-1": "<mask token>\n\n\n@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[\n 'GET'])\ndef get_all_places(city_id):\n \"\"\" gets all places in a city \"\"\"\n city = storage.get('City', city_id)\n if not city:\n abort(404)\n return jsonify([place.to_dict() for place in city.places]), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])\ndef get_place(place_id):\n \"\"\"Gets a place by place id\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n return jsonify(place.to_dict()), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']\n )\ndef delete_place(place_id):\n \"\"\" deletes places\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[\n 'GET'])\ndef get_all_places(city_id):\n \"\"\" gets all places in a city \"\"\"\n city = storage.get('City', city_id)\n if not city:\n abort(404)\n return jsonify([place.to_dict() for place in city.places]), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])\ndef get_place(place_id):\n \"\"\"Gets a place by place id\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n return jsonify(place.to_dict()), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']\n )\ndef delete_place(place_id):\n \"\"\" deletes places\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200\n\n\n<mask token>\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])\ndef update_place(place_id):\n \"\"\" updates place \"\"\"\n params = request.get_json()\n if not params:\n abort(400, 'Not a JSON')\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n for k, v in params.items():\n if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:\n setattr(place, k, v)\n storage.save()\n return jsonify(place.to_dict()), 200\n",
"step-3": "<mask token>\n\n\n@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[\n 'GET'])\ndef get_all_places(city_id):\n \"\"\" gets all places in a city \"\"\"\n city = storage.get('City', city_id)\n if not city:\n abort(404)\n return jsonify([place.to_dict() for place in city.places]), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])\ndef get_place(place_id):\n \"\"\"Gets a place by place id\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n return jsonify(place.to_dict()), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']\n )\ndef delete_place(place_id):\n \"\"\" deletes places\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/cities/<city_id>/places', strict_slashes=False, methods=\n ['POST'])\ndef post_place(city_id):\n \"\"\"posts a new place to city\"\"\"\n kwargs = request.get_json()\n if not kwargs:\n abort(400, 'Not a JSON')\n elif 'name' not in kwargs:\n abort(400, 'Missing name')\n elif 'user_id' not in kwargs:\n abort(400, 'Missing user_id')\n else:\n city = storage.get('City', city_id)\n user = storage.get('User', kwargs['user_id'])\n if not city:\n abort(404)\n if not user:\n abort(404)\n kwargs['city_id'] = city_id\n kwargs['user_id'] = user.id\n new_place = Place(**kwargs)\n storage.save()\n return jsonify(new_place.to_dict()), 201\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])\ndef update_place(place_id):\n \"\"\" updates place \"\"\"\n params = request.get_json()\n if not params:\n abort(400, 'Not a JSON')\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n for k, v in params.items():\n if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:\n setattr(place, k, v)\n storage.save()\n return jsonify(place.to_dict()), 200\n",
"step-4": "<mask token>\nfrom flask import jsonify, request, Response, abort\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models.place import Place\n\n\n@app_views.route('cities/<city_id>/places', strict_slashes=False, methods=[\n 'GET'])\ndef get_all_places(city_id):\n \"\"\" gets all places in a city \"\"\"\n city = storage.get('City', city_id)\n if not city:\n abort(404)\n return jsonify([place.to_dict() for place in city.places]), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])\ndef get_place(place_id):\n \"\"\"Gets a place by place id\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n return jsonify(place.to_dict()), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['DELETE']\n )\ndef delete_place(place_id):\n \"\"\" deletes places\"\"\"\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/cities/<city_id>/places', strict_slashes=False, methods=\n ['POST'])\ndef post_place(city_id):\n \"\"\"posts a new place to city\"\"\"\n kwargs = request.get_json()\n if not kwargs:\n abort(400, 'Not a JSON')\n elif 'name' not in kwargs:\n abort(400, 'Missing name')\n elif 'user_id' not in kwargs:\n abort(400, 'Missing user_id')\n else:\n city = storage.get('City', city_id)\n user = storage.get('User', kwargs['user_id'])\n if not city:\n abort(404)\n if not user:\n abort(404)\n kwargs['city_id'] = city_id\n kwargs['user_id'] = user.id\n new_place = Place(**kwargs)\n storage.save()\n return jsonify(new_place.to_dict()), 201\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])\ndef update_place(place_id):\n \"\"\" updates place \"\"\"\n params = request.get_json()\n if not params:\n abort(400, 'Not a JSON')\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n for k, v in params.items():\n if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:\n setattr(place, k, v)\n storage.save()\n return jsonify(place.to_dict()), 200\n",
"step-5": "#!/usr/bin/python3\n\"Places module\"\nfrom flask import jsonify, request, Response, abort\nfrom api.v1.views import app_views\nfrom models import storage\nfrom models.place import Place\n\n\n@app_views.route('cities/<city_id>/places', strict_slashes=False,\n methods=['GET'])\ndef get_all_places(city_id):\n ''' gets all places in a city '''\n city = storage.get(\"City\", city_id)\n if not city:\n abort(404)\n\n return jsonify([place.to_dict() for place in city.places]), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['GET'])\ndef get_place(place_id):\n \"Gets a place by place id\"\n place = storage.get(\"Place\", place_id)\n if not place:\n abort(404)\n return jsonify(place.to_dict()), 200\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False,\n methods=['DELETE'])\ndef delete_place(place_id):\n ''' deletes places'''\n place = storage.get(\"Place\", place_id)\n if not place:\n abort(404)\n storage.delete(place)\n storage.save()\n return jsonify({}), 200\n\n\n@app_views.route('/cities/<city_id>/places', strict_slashes=False,\n methods=['POST'])\ndef post_place(city_id):\n '''posts a new place to city'''\n kwargs = request.get_json()\n\n if not kwargs:\n abort(400, 'Not a JSON')\n elif 'name' not in kwargs:\n abort(400, 'Missing name')\n elif 'user_id' not in kwargs:\n abort(400, 'Missing user_id')\n else:\n city = storage.get(\"City\", city_id)\n user = storage.get(\"User\", kwargs['user_id'])\n if not city:\n abort(404)\n if not user:\n abort(404)\n\n # overwrites or adds w/ valid state_id in case they provide in post\n kwargs['city_id'] = city_id\n kwargs['user_id'] = user.id\n\n new_place = Place(**kwargs)\n storage.save()\n\n return jsonify(new_place.to_dict()), 201\n\n\n@app_views.route('/places/<place_id>', strict_slashes=False, methods=['PUT'])\ndef update_place(place_id):\n ''' updates place '''\n params = request.get_json()\n if not params:\n abort(400, 'Not a JSON')\n place = storage.get('Place', place_id)\n if not place:\n abort(404)\n for k, v in params.items():\n if k not in ['id', 'user_id', 'city_id', 'create_at', 'updated_at']:\n setattr(place, k, v)\n storage.save()\n return jsonify(place.to_dict()), 200\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#coding=utf-8
'初始化Package,加载url,生成app对象'
import web
from myapp.urls import urls
app = web.application(urls, globals())
|
normal
|
{
"blob_id": "4480b305a6f71ff64022f2b890998326bf402bf0",
"index": 1669,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp = web.application(urls, globals())\n",
"step-3": "<mask token>\nimport web\nfrom myapp.urls import urls\napp = web.application(urls, globals())\n",
"step-4": "#coding=utf-8\r\n'初始化Package,加载url,生成app对象'\r\nimport web\r\nfrom myapp.urls import urls\r\n\r\napp = web.application(urls, globals())\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def test_ogr_toposjon_objects_is_dict():
ds = ogr.Open('data/topojson/topojson2.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
feat = lyr.GetNextFeature()
assert feat['id'] == 'foo'
assert feat['name'] == 'line'
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
ds = None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_ogr_toposjon_objects_is_dict():
ds = ogr.Open('data/topojson/topojson2.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
feat = lyr.GetNextFeature()
assert feat['id'] == 'foo'
assert feat['name'] == 'line'
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
ds = None
def test_ogr_toposjon_no_transform():
ds = ogr.Open('data/topojson/topojson3.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
ds = None
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_ogr_toposjon_objects_is_array():
ds = ogr.Open('data/topojson/topojson1.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
expected_results = [('foo', None, 'POINT EMPTY'), (None, None,
'POINT EMPTY'), (None, None, 'POINT EMPTY'), (None, None,
'POINT (100 1010)'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, '0', 'LINESTRING EMPTY'), (None, 'foo',
'LINESTRING EMPTY'), ('1', None,
'LINESTRING (100 1000,110 1000,110 1100)'), ('2', None,
'LINESTRING (110 1100,110 1000,100 1000)'), (None, None,
'POLYGON EMPTY'), (None, None, 'POLYGON EMPTY'), (None, None,
'POLYGON EMPTY'), (None, None,
'POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'
), (None, None,
'POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))'
), (None, None, 'MULTIPOINT EMPTY'), (None, None,
'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT EMPTY'), (None, None,
'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT (100 1010,101 1020)'),
(None, None, 'MULTIPOLYGON EMPTY'), (None, None,
'MULTIPOLYGON EMPTY'), (None, None, 'MULTIPOLYGON EMPTY'), (None,
None,
'MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))'
), (None, None, 'MULTILINESTRING EMPTY'), (None, None,
'MULTILINESTRING EMPTY'), (None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100))'), (None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))'),
(None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'
)]
assert lyr.GetFeatureCount() == len(expected_results)
for i, exp_result in enumerate(expected_results):
feat = lyr.GetNextFeature()
if feat.GetField('id') != exp_result[0] or feat.GetField('name'
) != exp_result[1] or feat.GetGeometryRef().ExportToWkt(
) != exp_result[2]:
feat.DumpReadable()
print(exp_result)
print(feat.GetField('name'))
pytest.fail('failure at feat index %d' % i)
ds = None
def test_ogr_toposjon_objects_is_dict():
ds = ogr.Open('data/topojson/topojson2.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
feat = lyr.GetNextFeature()
assert feat['id'] == 'foo'
assert feat['name'] == 'line'
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
ds = None
def test_ogr_toposjon_no_transform():
ds = ogr.Open('data/topojson/topojson3.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
ds = None
<|reserved_special_token_1|>
import ogrtest
import pytest
from osgeo import ogr
def test_ogr_toposjon_objects_is_array():
ds = ogr.Open('data/topojson/topojson1.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
expected_results = [('foo', None, 'POINT EMPTY'), (None, None,
'POINT EMPTY'), (None, None, 'POINT EMPTY'), (None, None,
'POINT (100 1010)'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,
'LINESTRING EMPTY'), (None, '0', 'LINESTRING EMPTY'), (None, 'foo',
'LINESTRING EMPTY'), ('1', None,
'LINESTRING (100 1000,110 1000,110 1100)'), ('2', None,
'LINESTRING (110 1100,110 1000,100 1000)'), (None, None,
'POLYGON EMPTY'), (None, None, 'POLYGON EMPTY'), (None, None,
'POLYGON EMPTY'), (None, None,
'POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'
), (None, None,
'POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))'
), (None, None, 'MULTIPOINT EMPTY'), (None, None,
'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT EMPTY'), (None, None,
'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT (100 1010,101 1020)'),
(None, None, 'MULTIPOLYGON EMPTY'), (None, None,
'MULTIPOLYGON EMPTY'), (None, None, 'MULTIPOLYGON EMPTY'), (None,
None,
'MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))'
), (None, None, 'MULTILINESTRING EMPTY'), (None, None,
'MULTILINESTRING EMPTY'), (None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100))'), (None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))'),
(None, None,
'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'
)]
assert lyr.GetFeatureCount() == len(expected_results)
for i, exp_result in enumerate(expected_results):
feat = lyr.GetNextFeature()
if feat.GetField('id') != exp_result[0] or feat.GetField('name'
) != exp_result[1] or feat.GetGeometryRef().ExportToWkt(
) != exp_result[2]:
feat.DumpReadable()
print(exp_result)
print(feat.GetField('name'))
pytest.fail('failure at feat index %d' % i)
ds = None
def test_ogr_toposjon_objects_is_dict():
ds = ogr.Open('data/topojson/topojson2.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'
feat = lyr.GetNextFeature()
assert feat['id'] == 'foo'
assert feat['name'] == 'line'
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat,
'LINESTRING (100 1000,110 1000,110 1100)')
ds = None
def test_ogr_toposjon_no_transform():
ds = ogr.Open('data/topojson/topojson3.topojson')
lyr = ds.GetLayer(0)
assert lyr.GetName() == 'a_layer'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
lyr = ds.GetLayer(1)
assert lyr.GetName() == 'TopoJSON'
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')
ds = None
<|reserved_special_token_1|>
#!/usr/bin/env pytest
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: TopJSON driver test suite.
# Author: Even Rouault
#
###############################################################################
# Copyright (c) 2020, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import ogrtest
import pytest
from osgeo import ogr
###############################################################################
# Test TopoJSON
def test_ogr_toposjon_objects_is_array():
ds = ogr.Open("data/topojson/topojson1.topojson")
lyr = ds.GetLayer(0)
assert lyr.GetName() == "a_layer"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)")
lyr = ds.GetLayer(1)
assert lyr.GetName() == "TopoJSON"
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == "id"
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == "name"
expected_results = [
("foo", None, "POINT EMPTY"),
(None, None, "POINT EMPTY"),
(None, None, "POINT EMPTY"),
(None, None, "POINT (100 1010)"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, None, "LINESTRING EMPTY"),
(None, "0", "LINESTRING EMPTY"),
(None, "foo", "LINESTRING EMPTY"),
("1", None, "LINESTRING (100 1000,110 1000,110 1100)"),
("2", None, "LINESTRING (110 1100,110 1000,100 1000)"),
(None, None, "POLYGON EMPTY"),
(None, None, "POLYGON EMPTY"),
(None, None, "POLYGON EMPTY"),
(
None,
None,
"POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))",
),
(
None,
None,
"POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))",
),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT EMPTY"),
(None, None, "MULTIPOINT (100 1010,101 1020)"),
(None, None, "MULTIPOLYGON EMPTY"),
(None, None, "MULTIPOLYGON EMPTY"),
(None, None, "MULTIPOLYGON EMPTY"),
(
None,
None,
"MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))",
),
(None, None, "MULTILINESTRING EMPTY"),
(None, None, "MULTILINESTRING EMPTY"),
(None, None, "MULTILINESTRING ((100 1000,110 1000,110 1100))"),
(
None,
None,
"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))",
),
(
None,
None,
"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))",
),
]
assert lyr.GetFeatureCount() == len(expected_results)
for i, exp_result in enumerate(expected_results):
feat = lyr.GetNextFeature()
if (
feat.GetField("id") != exp_result[0]
or feat.GetField("name") != exp_result[1]
or feat.GetGeometryRef().ExportToWkt() != exp_result[2]
):
feat.DumpReadable()
print(exp_result)
print(feat.GetField("name"))
pytest.fail("failure at feat index %d" % i)
ds = None
def test_ogr_toposjon_objects_is_dict():
ds = ogr.Open("data/topojson/topojson2.topojson")
lyr = ds.GetLayer(0)
assert lyr.GetName() == "a_layer"
assert lyr.GetLayerDefn().GetFieldCount() == 2
assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == "id"
assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == "name"
feat = lyr.GetNextFeature()
assert feat["id"] == "foo"
assert feat["name"] == "line"
ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)")
lyr = ds.GetLayer(1)
assert lyr.GetName() == "TopoJSON"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (100 1000,110 1000,110 1100)")
ds = None
def test_ogr_toposjon_no_transform():
ds = ogr.Open("data/topojson/topojson3.topojson")
lyr = ds.GetLayer(0)
assert lyr.GetName() == "a_layer"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (0 0,10 0,0 10,10 0,0 0)")
lyr = ds.GetLayer(1)
assert lyr.GetName() == "TopoJSON"
feat = lyr.GetNextFeature()
ogrtest.check_feature_geometry(feat, "LINESTRING (0 0,10 0,0 10,10 0,0 0)")
ds = None
|
flexible
|
{
"blob_id": "270dba92af583e37c35ed5365f764adfdc2f947d",
"index": 2112,
"step-1": "<mask token>\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n ds = ogr.Open('data/topojson/topojson3.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n ds = None\n",
"step-3": "<mask token>\n\n\ndef test_ogr_toposjon_objects_is_array():\n ds = ogr.Open('data/topojson/topojson1.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n expected_results = [('foo', None, 'POINT EMPTY'), (None, None,\n 'POINT EMPTY'), (None, None, 'POINT EMPTY'), (None, None,\n 'POINT (100 1010)'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, '0', 'LINESTRING EMPTY'), (None, 'foo',\n 'LINESTRING EMPTY'), ('1', None,\n 'LINESTRING (100 1000,110 1000,110 1100)'), ('2', None,\n 'LINESTRING (110 1100,110 1000,100 1000)'), (None, None,\n 'POLYGON EMPTY'), (None, None, 'POLYGON EMPTY'), (None, None,\n 'POLYGON EMPTY'), (None, None,\n 'POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n ), (None, None,\n 'POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))'\n ), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT (100 1010,101 1020)'),\n (None, None, 'MULTIPOLYGON EMPTY'), (None, None,\n 'MULTIPOLYGON EMPTY'), (None, None, 'MULTIPOLYGON EMPTY'), (None,\n None,\n 'MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))'\n ), (None, None, 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100))'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))'),\n (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n )]\n assert lyr.GetFeatureCount() == len(expected_results)\n for i, exp_result in enumerate(expected_results):\n feat = lyr.GetNextFeature()\n if feat.GetField('id') != exp_result[0] or feat.GetField('name'\n ) != exp_result[1] or feat.GetGeometryRef().ExportToWkt(\n ) != exp_result[2]:\n feat.DumpReadable()\n print(exp_result)\n print(feat.GetField('name'))\n pytest.fail('failure at feat index %d' % i)\n ds = None\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n ds = ogr.Open('data/topojson/topojson3.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n ds = None\n",
"step-4": "import ogrtest\nimport pytest\nfrom osgeo import ogr\n\n\ndef test_ogr_toposjon_objects_is_array():\n ds = ogr.Open('data/topojson/topojson1.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n expected_results = [('foo', None, 'POINT EMPTY'), (None, None,\n 'POINT EMPTY'), (None, None, 'POINT EMPTY'), (None, None,\n 'POINT (100 1010)'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, None, 'LINESTRING EMPTY'), (None, None,\n 'LINESTRING EMPTY'), (None, '0', 'LINESTRING EMPTY'), (None, 'foo',\n 'LINESTRING EMPTY'), ('1', None,\n 'LINESTRING (100 1000,110 1000,110 1100)'), ('2', None,\n 'LINESTRING (110 1100,110 1000,100 1000)'), (None, None,\n 'POLYGON EMPTY'), (None, None, 'POLYGON EMPTY'), (None, None,\n 'POLYGON EMPTY'), (None, None,\n 'POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n ), (None, None,\n 'POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))'\n ), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT EMPTY'), (None, None,\n 'MULTIPOINT EMPTY'), (None, None, 'MULTIPOINT (100 1010,101 1020)'),\n (None, None, 'MULTIPOLYGON EMPTY'), (None, None,\n 'MULTIPOLYGON EMPTY'), (None, None, 'MULTIPOLYGON EMPTY'), (None,\n None,\n 'MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))'\n ), (None, None, 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING EMPTY'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100))'), (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))'),\n (None, None,\n 'MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))'\n )]\n assert lyr.GetFeatureCount() == len(expected_results)\n for i, exp_result in enumerate(expected_results):\n feat = lyr.GetNextFeature()\n if feat.GetField('id') != exp_result[0] or feat.GetField('name'\n ) != exp_result[1] or feat.GetGeometryRef().ExportToWkt(\n ) != exp_result[2]:\n feat.DumpReadable()\n print(exp_result)\n print(feat.GetField('name'))\n pytest.fail('failure at feat index %d' % i)\n ds = None\n\n\ndef test_ogr_toposjon_objects_is_dict():\n ds = ogr.Open('data/topojson/topojson2.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == 'id'\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == 'name'\n feat = lyr.GetNextFeature()\n assert feat['id'] == 'foo'\n assert feat['name'] == 'line'\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat,\n 'LINESTRING (100 1000,110 1000,110 1100)')\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n ds = ogr.Open('data/topojson/topojson3.topojson')\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == 'a_layer'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == 'TopoJSON'\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, 'LINESTRING (0 0,10 0,0 10,10 0,0 0)')\n ds = None\n",
"step-5": "#!/usr/bin/env pytest\n# -*- coding: utf-8 -*-\n###############################################################################\n# $Id$\n#\n# Project: GDAL/OGR Test Suite\n# Purpose: TopJSON driver test suite.\n# Author: Even Rouault\n#\n###############################################################################\n# Copyright (c) 2020, Even Rouault <even dot rouault at spatialys.com>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n###############################################################################\n\nimport ogrtest\nimport pytest\n\nfrom osgeo import ogr\n\n###############################################################################\n# Test TopoJSON\n\n\ndef test_ogr_toposjon_objects_is_array():\n\n ds = ogr.Open(\"data/topojson/topojson1.topojson\")\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == \"a_layer\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (100 1000,110 1000,110 1100)\")\n\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == \"TopoJSON\"\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == \"id\"\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == \"name\"\n expected_results = [\n (\"foo\", None, \"POINT EMPTY\"),\n (None, None, \"POINT EMPTY\"),\n (None, None, \"POINT EMPTY\"),\n (None, None, \"POINT (100 1010)\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, None, \"LINESTRING EMPTY\"),\n (None, \"0\", \"LINESTRING EMPTY\"),\n (None, \"foo\", \"LINESTRING EMPTY\"),\n (\"1\", None, \"LINESTRING (100 1000,110 1000,110 1100)\"),\n (\"2\", None, \"LINESTRING (110 1100,110 1000,100 1000)\"),\n (None, None, \"POLYGON EMPTY\"),\n (None, None, \"POLYGON EMPTY\"),\n (None, None, \"POLYGON EMPTY\"),\n (\n None,\n None,\n \"POLYGON ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))\",\n ),\n (\n None,\n None,\n \"POLYGON ((110 1100,110 1000,100 1000,100 1100,110 1100),(101 1010,109 1010,109 1090,101 1090,101 1010))\",\n ),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT EMPTY\"),\n (None, None, \"MULTIPOINT (100 1010,101 1020)\"),\n (None, None, \"MULTIPOLYGON EMPTY\"),\n (None, None, \"MULTIPOLYGON EMPTY\"),\n (None, None, \"MULTIPOLYGON EMPTY\"),\n (\n None,\n None,\n \"MULTIPOLYGON (((110 1100,110 1000,100 1000,100 1100,110 1100)),((101 1010,109 1010,109 1090,101 1090,101 1010)))\",\n ),\n (None, None, \"MULTILINESTRING EMPTY\"),\n (None, None, \"MULTILINESTRING EMPTY\"),\n (None, None, \"MULTILINESTRING ((100 1000,110 1000,110 1100))\"),\n (\n None,\n None,\n \"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000))\",\n ),\n (\n None,\n None,\n \"MULTILINESTRING ((100 1000,110 1000,110 1100,100 1100,100 1000),(101 1010,101 1090,109 1090,109 1010,101 1010))\",\n ),\n ]\n assert lyr.GetFeatureCount() == len(expected_results)\n for i, exp_result in enumerate(expected_results):\n feat = lyr.GetNextFeature()\n if (\n feat.GetField(\"id\") != exp_result[0]\n or feat.GetField(\"name\") != exp_result[1]\n or feat.GetGeometryRef().ExportToWkt() != exp_result[2]\n ):\n feat.DumpReadable()\n print(exp_result)\n print(feat.GetField(\"name\"))\n pytest.fail(\"failure at feat index %d\" % i)\n ds = None\n\n\ndef test_ogr_toposjon_objects_is_dict():\n\n ds = ogr.Open(\"data/topojson/topojson2.topojson\")\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == \"a_layer\"\n assert lyr.GetLayerDefn().GetFieldCount() == 2\n assert lyr.GetLayerDefn().GetFieldDefn(0).GetName() == \"id\"\n assert lyr.GetLayerDefn().GetFieldDefn(1).GetName() == \"name\"\n feat = lyr.GetNextFeature()\n assert feat[\"id\"] == \"foo\"\n assert feat[\"name\"] == \"line\"\n ogrtest.check_feature_geometry(feat, \"LINESTRING (100 1000,110 1000,110 1100)\")\n\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == \"TopoJSON\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (100 1000,110 1000,110 1100)\")\n\n ds = None\n\n\ndef test_ogr_toposjon_no_transform():\n\n ds = ogr.Open(\"data/topojson/topojson3.topojson\")\n lyr = ds.GetLayer(0)\n assert lyr.GetName() == \"a_layer\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (0 0,10 0,0 10,10 0,0 0)\")\n\n lyr = ds.GetLayer(1)\n assert lyr.GetName() == \"TopoJSON\"\n feat = lyr.GetNextFeature()\n ogrtest.check_feature_geometry(feat, \"LINESTRING (0 0,10 0,0 10,10 0,0 0)\")\n ds = None\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def noOfStepsDP(n, k):
dp = [0] * max(n + 1, 3)
dp[0] = 1
dp[1] = 1
dp[2] = 2
for i in range(3, n + 1):
dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]
return dp[n]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def noOfSteps(n, k):
if n < 0:
return 0
if n == 0:
return 1
t_steps = 0
for i in range(1, k + 1):
t_steps += noOfSteps(n - i, k)
return t_steps
def noOfStepsDP(n, k):
dp = [0] * max(n + 1, 3)
dp[0] = 1
dp[1] = 1
dp[2] = 2
for i in range(3, n + 1):
dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]
return dp[n]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def noOfSteps(n, k):
if n < 0:
return 0
if n == 0:
return 1
t_steps = 0
for i in range(1, k + 1):
t_steps += noOfSteps(n - i, k)
return t_steps
def noOfStepsDP(n, k):
dp = [0] * max(n + 1, 3)
dp[0] = 1
dp[1] = 1
dp[2] = 2
for i in range(3, n + 1):
dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]
return dp[n]
<|reserved_special_token_0|>
noOfSteps(n, 3), noOfStepsDP(n, 3)
<|reserved_special_token_1|>
def noOfSteps(n, k):
if n < 0:
return 0
if n == 0:
return 1
t_steps = 0
for i in range(1, k + 1):
t_steps += noOfSteps(n - i, k)
return t_steps
def noOfStepsDP(n, k):
dp = [0] * max(n + 1, 3)
dp[0] = 1
dp[1] = 1
dp[2] = 2
for i in range(3, n + 1):
dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]
return dp[n]
n = 10
noOfSteps(n, 3), noOfStepsDP(n, 3)
<|reserved_special_token_1|>
# Q. In How many ways N stair can be climb if allowesd steps are 1, 2 or 3.
# triple Sort
def noOfSteps(n, k):
if n<0: return 0
if n == 0: return 1
t_steps = 0
for i in range(1, k+1):
t_steps += noOfSteps(n-i, k)
return t_steps
def noOfStepsDP(n,k):
dp = [0]*max((n+1),3)
dp[0] = 1
dp[1] = 1
dp[2] = 2
for i in range(3, n+1):
dp[i] = dp[i-1]+dp[i-2]+dp[i-3]
return dp[n]
n = 10
noOfSteps(n,3), noOfStepsDP(n,3)
|
flexible
|
{
"blob_id": "6c2699ff8e739595a2648d53745dc3c788536d7b",
"index": 1907,
"step-1": "<mask token>\n\n\ndef noOfStepsDP(n, k):\n dp = [0] * max(n + 1, 3)\n dp[0] = 1\n dp[1] = 1\n dp[2] = 2\n for i in range(3, n + 1):\n dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]\n return dp[n]\n\n\n<mask token>\n",
"step-2": "def noOfSteps(n, k):\n if n < 0:\n return 0\n if n == 0:\n return 1\n t_steps = 0\n for i in range(1, k + 1):\n t_steps += noOfSteps(n - i, k)\n return t_steps\n\n\ndef noOfStepsDP(n, k):\n dp = [0] * max(n + 1, 3)\n dp[0] = 1\n dp[1] = 1\n dp[2] = 2\n for i in range(3, n + 1):\n dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]\n return dp[n]\n\n\n<mask token>\n",
"step-3": "def noOfSteps(n, k):\n if n < 0:\n return 0\n if n == 0:\n return 1\n t_steps = 0\n for i in range(1, k + 1):\n t_steps += noOfSteps(n - i, k)\n return t_steps\n\n\ndef noOfStepsDP(n, k):\n dp = [0] * max(n + 1, 3)\n dp[0] = 1\n dp[1] = 1\n dp[2] = 2\n for i in range(3, n + 1):\n dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]\n return dp[n]\n\n\n<mask token>\nnoOfSteps(n, 3), noOfStepsDP(n, 3)\n",
"step-4": "def noOfSteps(n, k):\n if n < 0:\n return 0\n if n == 0:\n return 1\n t_steps = 0\n for i in range(1, k + 1):\n t_steps += noOfSteps(n - i, k)\n return t_steps\n\n\ndef noOfStepsDP(n, k):\n dp = [0] * max(n + 1, 3)\n dp[0] = 1\n dp[1] = 1\n dp[2] = 2\n for i in range(3, n + 1):\n dp[i] = dp[i - 1] + dp[i - 2] + dp[i - 3]\n return dp[n]\n\n\nn = 10\nnoOfSteps(n, 3), noOfStepsDP(n, 3)\n",
"step-5": "# Q. In How many ways N stair can be climb if allowesd steps are 1, 2 or 3.\r\n# triple Sort\r\n\r\ndef noOfSteps(n, k):\r\n if n<0: return 0\r\n if n == 0: return 1\r\n\r\n t_steps = 0\r\n for i in range(1, k+1):\r\n t_steps += noOfSteps(n-i, k)\r\n \r\n return t_steps\r\n\r\ndef noOfStepsDP(n,k):\r\n\r\n dp = [0]*max((n+1),3)\r\n\r\n dp[0] = 1\r\n dp[1] = 1\r\n dp[2] = 2\r\n\r\n for i in range(3, n+1):\r\n dp[i] = dp[i-1]+dp[i-2]+dp[i-3]\r\n \r\n return dp[n]\r\n\r\nn = 10\r\nnoOfSteps(n,3), noOfStepsDP(n,3)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#给定一个非负整数数组 A,返回一个数组,在该数组中, A 的所有偶数元素之后跟着所有奇数元素。你可以返回满足此条件的任何数组作为答案
class Solution:
def sortArrayByParity(self, A: List[int]) -> List[int]:
l=[]
r=[]
for x in A:
if(x%2==0):
l.append(x)
else:
r.append(x)
ans=l+r
return ans
|
normal
|
{
"blob_id": "ae4d12ff88cf08b2e19b212c80549adc0a0d47dc",
"index": 2030,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def sortArrayByParity(self, A: List[int]) ->List[int]:\n l = []\n r = []\n for x in A:\n if x % 2 == 0:\n l.append(x)\n else:\n r.append(x)\n ans = l + r\n return ans\n",
"step-4": "#给定一个非负整数数组 A,返回一个数组,在该数组中, A 的所有偶数元素之后跟着所有奇数元素。你可以返回满足此条件的任何数组作为答案\nclass Solution:\n def sortArrayByParity(self, A: List[int]) -> List[int]:\n l=[]\n r=[]\n for x in A:\n if(x%2==0):\n l.append(x)\n else:\n r.append(x)\n ans=l+r\n return ans\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Float32():
return tf.float32
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def Float32():
return tf.float32
def Float16():
return tf.float16
<|reserved_special_token_1|>
import tensorflow as tf
def Float32():
return tf.float32
def Float16():
return tf.float16
|
flexible
|
{
"blob_id": "c60b8eec57d845c73ee3e00432747d23748c1706",
"index": 9537,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef Float32():\n return tf.float32\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef Float32():\n return tf.float32\n\n\ndef Float16():\n return tf.float16\n",
"step-4": "import tensorflow as tf\n\n\ndef Float32():\n return tf.float32\n\n\ndef Float16():\n return tf.float16\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import division, print_function, absolute_import
"""
The dataset is stored in a CSV file, so we can use the TFLearn load_csv() function to
load the data from the CSV file into a python list.
We specify the 'target_column' argument to indicate that our labels (survived or not)
are located in the first column (id: 0). The function will return a tuple: (data, labels).
"""
import numpy as np
import tflearn
#DownLoad the Titanic dataset
from tflearn.datasets import titanic
titanic.download_dataset('titanic_dataset.csv')
#loadCSVfile,indicate that the first column represent labels
from tflearn.data_utils import load_csv
data, labels = load_csv('titanic_dataset.csv',target_column=0,
categorical_labels=True,n_classes=2)
'''
Preprocessing Data
Data are given 'as is' and need some preprocessing to be ready for use in our deep neural network classifier.
First, we will discard the fields that are not likely to help in our analysis.
For example, we make the assumption that the 'name' field will not be very useful in our task,
since a passenger's name and his or her chance of surviving are probably not correlated.
With such thinking, we can go ahead and discard the 'name' and 'ticket' fields.
Then, we need to convert all our data to numerical values,
because a neural network model can only perform operations over numbers.
However, our dataset contains some non-numerical values, such as 'name' and 'sex'. Because 'name' is discarded,
we just need to handle the 'sex' field. In this simple case, we will just assign '0' to males and '1' to females.
example:
survived pclass name sex age sibsp parch ticket fare
1 1 Aubart, Mme. Leontine Pauline female 24 0 0 PC 17477 69.3000
'''
# Here is the preprocessing function:
#Preprocessing function
def preprocess(passengers,columns_to_delete):
#Sort by descending is and delete column
for column_to_delete in sorted(columns_to_delete,reverse = True):
[passenger.pop(column_to_delete) for passenger in passengers]
# print(type(passengers[0]))
for i in range(len(passengers)):
# Converting 'sex' field to float (id is 1 after removing labels column)
passengers[i][1] = 1. if passengers[i][1] == 'female' else 0.
print(np.array(passengers,dtype=np.float32))
return np.array(passengers,dtype=np.float32)
# Ignore 'name' and 'ticket' columns (id 1 & 6 of data array)
to_ignore = [1,6]
#Preprocess data
data = preprocess(data,to_ignore)
'''
Build a Deep Neural Network
We are building a 3-layer neural network using TFLearn. First, we need to specify the shape of our input data.
In our case, each sample has a total of 6 features, and we will process samples per batch to save memory.
So our data input shape is [None, 6] ('None' stands for an unknown dimension, so we can change the total
number of samples that are processed in a batch).
'''
# Build neural network
net = tflearn.input_data(shape=[None,6])
net = tflearn.fully_connected(net,32)
net = tflearn.fully_connected(net,32)
net = tflearn.fully_connected(net,2,activation='softmax')
net =tflearn.regression(net)
'''
Training
TFLearn provides a model wrapper ('DNN') that automatically performs neural network classifier tasks,
such as training, prediction, save/restore, and more. We will run it for 10 epochs
(i.e., the network will see all data 10 times) with a batch size of 16.
'''
#Define model
model = tflearn.DNN(net)
# Start training (apply gradient descent algorithm)
model.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)
'''
Try the Model
It's time to try out our model.
For fun, let's take Titanic movie protagonists
(DiCaprio and Winslet) and calculate their chance of surviving (class 1).
'''
# Let's create some data for DiCaprio and Winslet
dicaprio = [3, 'Jack Dawson', 'male', 19, 0, 0, 'N/A', 5.0000]
winslet = [1, 'Rose DeWitt Bukater', 'female', 17, 1, 2, 'N/A', 100.0000]
# Preprocess data
dicaprio, winslet = preprocess([dicaprio, winslet], to_ignore)
# Predict surviving chances (class 1 results)
pred = model.predict([dicaprio, winslet])
print("DiCaprio Surviving Rate:", pred[0][1])
print("Winslet Surviving Rate:", pred[1][1])
|
normal
|
{
"blob_id": "87e9c1d264523d02b287dedb44472fc08b488908",
"index": 9630,
"step-1": "<mask token>\n\n\ndef preprocess(passengers, columns_to_delete):\n for column_to_delete in sorted(columns_to_delete, reverse=True):\n [passenger.pop(column_to_delete) for passenger in passengers]\n for i in range(len(passengers)):\n passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0\n print(np.array(passengers, dtype=np.float32))\n return np.array(passengers, dtype=np.float32)\n\n\n<mask token>\n",
"step-2": "<mask token>\ntitanic.download_dataset('titanic_dataset.csv')\n<mask token>\n\n\ndef preprocess(passengers, columns_to_delete):\n for column_to_delete in sorted(columns_to_delete, reverse=True):\n [passenger.pop(column_to_delete) for passenger in passengers]\n for i in range(len(passengers)):\n passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0\n print(np.array(passengers, dtype=np.float32))\n return np.array(passengers, dtype=np.float32)\n\n\n<mask token>\nmodel.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)\n<mask token>\nprint('DiCaprio Surviving Rate:', pred[0][1])\nprint('Winslet Surviving Rate:', pred[1][1])\n",
"step-3": "<mask token>\ntitanic.download_dataset('titanic_dataset.csv')\n<mask token>\ndata, labels = load_csv('titanic_dataset.csv', target_column=0,\n categorical_labels=True, n_classes=2)\n<mask token>\n\n\ndef preprocess(passengers, columns_to_delete):\n for column_to_delete in sorted(columns_to_delete, reverse=True):\n [passenger.pop(column_to_delete) for passenger in passengers]\n for i in range(len(passengers)):\n passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0\n print(np.array(passengers, dtype=np.float32))\n return np.array(passengers, dtype=np.float32)\n\n\nto_ignore = [1, 6]\ndata = preprocess(data, to_ignore)\n<mask token>\nnet = tflearn.input_data(shape=[None, 6])\nnet = tflearn.fully_connected(net, 32)\nnet = tflearn.fully_connected(net, 32)\nnet = tflearn.fully_connected(net, 2, activation='softmax')\nnet = tflearn.regression(net)\n<mask token>\nmodel = tflearn.DNN(net)\nmodel.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)\n<mask token>\ndicaprio = [3, 'Jack Dawson', 'male', 19, 0, 0, 'N/A', 5.0]\nwinslet = [1, 'Rose DeWitt Bukater', 'female', 17, 1, 2, 'N/A', 100.0]\ndicaprio, winslet = preprocess([dicaprio, winslet], to_ignore)\npred = model.predict([dicaprio, winslet])\nprint('DiCaprio Surviving Rate:', pred[0][1])\nprint('Winslet Surviving Rate:', pred[1][1])\n",
"step-4": "from __future__ import division, print_function, absolute_import\n<mask token>\nimport numpy as np\nimport tflearn\nfrom tflearn.datasets import titanic\ntitanic.download_dataset('titanic_dataset.csv')\nfrom tflearn.data_utils import load_csv\ndata, labels = load_csv('titanic_dataset.csv', target_column=0,\n categorical_labels=True, n_classes=2)\n<mask token>\n\n\ndef preprocess(passengers, columns_to_delete):\n for column_to_delete in sorted(columns_to_delete, reverse=True):\n [passenger.pop(column_to_delete) for passenger in passengers]\n for i in range(len(passengers)):\n passengers[i][1] = 1.0 if passengers[i][1] == 'female' else 0.0\n print(np.array(passengers, dtype=np.float32))\n return np.array(passengers, dtype=np.float32)\n\n\nto_ignore = [1, 6]\ndata = preprocess(data, to_ignore)\n<mask token>\nnet = tflearn.input_data(shape=[None, 6])\nnet = tflearn.fully_connected(net, 32)\nnet = tflearn.fully_connected(net, 32)\nnet = tflearn.fully_connected(net, 2, activation='softmax')\nnet = tflearn.regression(net)\n<mask token>\nmodel = tflearn.DNN(net)\nmodel.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)\n<mask token>\ndicaprio = [3, 'Jack Dawson', 'male', 19, 0, 0, 'N/A', 5.0]\nwinslet = [1, 'Rose DeWitt Bukater', 'female', 17, 1, 2, 'N/A', 100.0]\ndicaprio, winslet = preprocess([dicaprio, winslet], to_ignore)\npred = model.predict([dicaprio, winslet])\nprint('DiCaprio Surviving Rate:', pred[0][1])\nprint('Winslet Surviving Rate:', pred[1][1])\n",
"step-5": "from __future__ import division, print_function, absolute_import\n\"\"\"\nThe dataset is stored in a CSV file, so we can use the TFLearn load_csv() function to\n load the data from the CSV file into a python list.\n We specify the 'target_column' argument to indicate that our labels (survived or not)\n are located in the first column (id: 0). The function will return a tuple: (data, labels).\n\"\"\"\nimport numpy as np\nimport tflearn\n\n#DownLoad the Titanic dataset\nfrom tflearn.datasets import titanic\ntitanic.download_dataset('titanic_dataset.csv')\n\n#loadCSVfile,indicate that the first column represent labels\nfrom tflearn.data_utils import load_csv\ndata, labels = load_csv('titanic_dataset.csv',target_column=0,\n\t\t\t\t\t\tcategorical_labels=True,n_classes=2)\n\n'''\nPreprocessing Data\n\nData are given 'as is' and need some preprocessing to be ready for use in our deep neural network classifier.\nFirst, we will discard the fields that are not likely to help in our analysis.\nFor example, we make the assumption that the 'name' field will not be very useful in our task,\nsince a passenger's name and his or her chance of surviving are probably not correlated.\nWith such thinking, we can go ahead and discard the 'name' and 'ticket' fields.\nThen, we need to convert all our data to numerical values,\nbecause a neural network model can only perform operations over numbers.\nHowever, our dataset contains some non-numerical values, such as 'name' and 'sex'. Because 'name' is discarded,\nwe just need to handle the 'sex' field. In this simple case, we will just assign '0' to males and '1' to females.\n\nexample:\nsurvived\tpclass\tname\t\t\t\t\t\t\tsex\t\tage\t\tsibsp\tparch\tticket\t\tfare\n1\t\t\t1\t\tAubart, Mme. Leontine Pauline\tfemale\t24\t\t0\t\t0\t\tPC 17477\t69.3000\n'''\n# Here is the preprocessing function:\n#Preprocessing function\ndef preprocess(passengers,columns_to_delete):\n\t#Sort by descending is and delete column\n\tfor column_to_delete in sorted(columns_to_delete,reverse = True):\n\t\t[passenger.pop(column_to_delete) for passenger in passengers]\n\t# print(type(passengers[0]))\n\tfor i in range(len(passengers)):\n\t\t# Converting 'sex' field to float (id is 1 after removing labels column)\n\t\tpassengers[i][1] = 1. if passengers[i][1] == 'female' else 0.\n\tprint(np.array(passengers,dtype=np.float32))\n\treturn np.array(passengers,dtype=np.float32)\n\n# Ignore 'name' and 'ticket' columns (id 1 & 6 of data array)\nto_ignore = [1,6]\n#Preprocess data\ndata = preprocess(data,to_ignore)\n\n'''\nBuild a Deep Neural Network\n\nWe are building a 3-layer neural network using TFLearn. First, we need to specify the shape of our input data.\nIn our case, each sample has a total of 6 features, and we will process samples per batch to save memory.\nSo our data input shape is [None, 6] ('None' stands for an unknown dimension, so we can change the total\nnumber of samples that are processed in a batch).\n'''\n# Build neural network\nnet = tflearn.input_data(shape=[None,6])\nnet = tflearn.fully_connected(net,32)\nnet = tflearn.fully_connected(net,32)\nnet = tflearn.fully_connected(net,2,activation='softmax')\nnet =tflearn.regression(net)\n\n'''\nTraining\n\nTFLearn provides a model wrapper ('DNN') that automatically performs neural network classifier tasks,\nsuch as training, prediction, save/restore, and more. We will run it for 10 epochs\n(i.e., the network will see all data 10 times) with a batch size of 16.\n'''\n\n#Define model\nmodel = tflearn.DNN(net)\n# Start training (apply gradient descent algorithm)\nmodel.fit(data, labels, n_epoch=10, batch_size=16, show_metric=True)\n\n'''\nTry the Model\nIt's time to try out our model.\nFor fun, let's take Titanic movie protagonists\n(DiCaprio and Winslet) and calculate their chance of surviving (class 1).\n'''\n\n# Let's create some data for DiCaprio and Winslet\ndicaprio = [3, 'Jack Dawson', 'male', 19, 0, 0, 'N/A', 5.0000]\nwinslet = [1, 'Rose DeWitt Bukater', 'female', 17, 1, 2, 'N/A', 100.0000]\n# Preprocess data\ndicaprio, winslet = preprocess([dicaprio, winslet], to_ignore)\n# Predict surviving chances (class 1 results)\npred = model.predict([dicaprio, winslet])\nprint(\"DiCaprio Surviving Rate:\", pred[0][1])\nprint(\"Winslet Surviving Rate:\", pred[1][1])\n\n\n\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app_name = 'stock_main'
urlpatterns = [url('^$', views.Stock_main.as_view(), name='stock_main')]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<|reserved_special_token_1|>
from django.conf.urls import url, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
app_name = 'stock_main'
urlpatterns = [url('^$', views.Stock_main.as_view(), name='stock_main')]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<|reserved_special_token_1|>
from django.conf.urls import url, include
from . import views
from django.conf import settings
from django.conf.urls.static import static
app_name = 'stock_main'
urlpatterns = [
url(r'^$', views.Stock_main.as_view(), name='stock_main'),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
flexible
|
{
"blob_id": "16302f23edf16e201c3f3e9800dc4a9290ddc29e",
"index": 7038,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-3": "<mask token>\napp_name = 'stock_main'\nurlpatterns = [url('^$', views.Stock_main.as_view(), name='stock_main')]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-4": "from django.conf.urls import url, include\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\napp_name = 'stock_main'\nurlpatterns = [url('^$', views.Stock_main.as_view(), name='stock_main')]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"step-5": "from django.conf.urls import url, include\nfrom . import views\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\napp_name = 'stock_main'\n\nurlpatterns = [\n url(r'^$', views.Stock_main.as_view(), name='stock_main'),\n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app_name = 'restuarant'
urlpatterns = [path('orderplaced/', views.orderplaced), path('restaurant/',
views.restuarent, name='restuarant'), path('login/restaurant/', views.
restLogin, name='rlogin'), path('register/restaurant/', views.
restRegister, name='rregister'), path('profile/restaurant/', views.
restaurantProfile, name='rprofile'), path('restaurant/create/', views.
createRestaurant, name='rcreate'), path('restaurant/update/<int:id>/',
views.updateRestaurant, name='rupdate'), path('restaurant/orderlist/',
views.orderlist, name='orderlist'), path('restaurant/menu/', views.
menuManipulation, name='mmenu'), path('logout/', views.Logout, name=
'logout'), path('restaurant/<int:pk>/', views.restuarantMenu, name=
'menu'), path('checkout/', views.checkout, name='checkout'), path(
'profile/change_password/', views.change_password, name='change_password')]
<|reserved_special_token_1|>
from django.urls import path
from . import views
app_name = 'restuarant'
urlpatterns = [path('orderplaced/', views.orderplaced), path('restaurant/',
views.restuarent, name='restuarant'), path('login/restaurant/', views.
restLogin, name='rlogin'), path('register/restaurant/', views.
restRegister, name='rregister'), path('profile/restaurant/', views.
restaurantProfile, name='rprofile'), path('restaurant/create/', views.
createRestaurant, name='rcreate'), path('restaurant/update/<int:id>/',
views.updateRestaurant, name='rupdate'), path('restaurant/orderlist/',
views.orderlist, name='orderlist'), path('restaurant/menu/', views.
menuManipulation, name='mmenu'), path('logout/', views.Logout, name=
'logout'), path('restaurant/<int:pk>/', views.restuarantMenu, name=
'menu'), path('checkout/', views.checkout, name='checkout'), path(
'profile/change_password/', views.change_password, name='change_password')]
|
flexible
|
{
"blob_id": "63830a3c09a2d0a267b030a336062d5e95b9a71a",
"index": 3308,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp_name = 'restuarant'\nurlpatterns = [path('orderplaced/', views.orderplaced), path('restaurant/',\n views.restuarent, name='restuarant'), path('login/restaurant/', views.\n restLogin, name='rlogin'), path('register/restaurant/', views.\n restRegister, name='rregister'), path('profile/restaurant/', views.\n restaurantProfile, name='rprofile'), path('restaurant/create/', views.\n createRestaurant, name='rcreate'), path('restaurant/update/<int:id>/',\n views.updateRestaurant, name='rupdate'), path('restaurant/orderlist/',\n views.orderlist, name='orderlist'), path('restaurant/menu/', views.\n menuManipulation, name='mmenu'), path('logout/', views.Logout, name=\n 'logout'), path('restaurant/<int:pk>/', views.restuarantMenu, name=\n 'menu'), path('checkout/', views.checkout, name='checkout'), path(\n 'profile/change_password/', views.change_password, name='change_password')]\n",
"step-3": "from django.urls import path\nfrom . import views\napp_name = 'restuarant'\nurlpatterns = [path('orderplaced/', views.orderplaced), path('restaurant/',\n views.restuarent, name='restuarant'), path('login/restaurant/', views.\n restLogin, name='rlogin'), path('register/restaurant/', views.\n restRegister, name='rregister'), path('profile/restaurant/', views.\n restaurantProfile, name='rprofile'), path('restaurant/create/', views.\n createRestaurant, name='rcreate'), path('restaurant/update/<int:id>/',\n views.updateRestaurant, name='rupdate'), path('restaurant/orderlist/',\n views.orderlist, name='orderlist'), path('restaurant/menu/', views.\n menuManipulation, name='mmenu'), path('logout/', views.Logout, name=\n 'logout'), path('restaurant/<int:pk>/', views.restuarantMenu, name=\n 'menu'), path('checkout/', views.checkout, name='checkout'), path(\n 'profile/change_password/', views.change_password, name='change_password')]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
video_compress.apply_async(['a'], queue='high')
video_compress.apply_async(['b'], queue='low')
video_upload.apply_async(['c'], queue='low')
video_upload.apply_async(['d'], queue='high')
<|reserved_special_token_1|>
from tasks import video_compress, video_upload
if __name__ == '__main__':
video_compress.apply_async(['a'], queue='high')
video_compress.apply_async(['b'], queue='low')
video_upload.apply_async(['c'], queue='low')
video_upload.apply_async(['d'], queue='high')
<|reserved_special_token_1|>
from tasks import video_compress, video_upload
if __name__ == '__main__':
video_compress.apply_async(["a"],queue='high')
video_compress.apply_async(["b"],queue='low')
video_upload.apply_async(["c"], queue='low')
video_upload.apply_async(["d"], queue='high')
|
flexible
|
{
"blob_id": "2cd7d4fe87de66e85bc0d060e2eaa68be39eed02",
"index": 9461,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n video_compress.apply_async(['a'], queue='high')\n video_compress.apply_async(['b'], queue='low')\n video_upload.apply_async(['c'], queue='low')\n video_upload.apply_async(['d'], queue='high')\n",
"step-3": "from tasks import video_compress, video_upload\nif __name__ == '__main__':\n video_compress.apply_async(['a'], queue='high')\n video_compress.apply_async(['b'], queue='low')\n video_upload.apply_async(['c'], queue='low')\n video_upload.apply_async(['d'], queue='high')\n",
"step-4": "from tasks import video_compress, video_upload\nif __name__ == '__main__':\n video_compress.apply_async([\"a\"],queue='high')\n video_compress.apply_async([\"b\"],queue='low')\n video_upload.apply_async([\"c\"], queue='low')\n video_upload.apply_async([\"d\"], queue='high')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
for Ti in np.arange(MINT, MAXT, DT):
fp = FrenetPath()
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE,
TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
Jp = sum(np.power(tfp.d_ddd, 2))
Js = sum(np.power(tfp.s_ddd, 2))
ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2
tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2
tfp.cv = KJ * Js + KT * Ti + KD * ds
tfp.cf = KLAT * tfp.cd + KLON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
<|reserved_special_token_0|>
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in
zip(fp.x, fp.y)]
collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
"""
check path above max speed, max a, does collision or not
"""
okind = []
for i in range(len(fplist)):
if any([(v > MAX_SPEED) for v in fplist[i].s_d]):
continue
elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):
continue
elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):
continue
elif not check_collision(fplist[i], ob):
continue
okind.append(i)
return [fplist[i] for i in okind]
<|reserved_special_token_0|>
def generate_road_widle(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +
math.pi / 2.0)
road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +
math.pi / 2.0)
road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s
) + math.pi / 2.0)
road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s
) + math.pi / 2.0)
road_left_x.append(road_left_ix)
road_left_y.append(road_left_iy)
road_right_x.append(road_right_ix)
road_right_y.append(road_right_iy)
return road_left_x, road_left_y, road_right_x, road_right_y
def generate_target_course(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
def load_global_path():
global zero_cord_x, zero_cord_y
bet = 0.1
blank = []
white = []
yellow = []
GPS_x = []
GPS_y = []
nums, ber = np.loadtxt(
'/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'
, dtype=str, delimiter=',', unpack=True)
for i in range(len(nums)):
if not nums[i] in blank:
yellow.append(float(nums[i]))
white.append(float(ber[i]))
bx = yellow[0]
by = white[0]
for i in range(len(yellow)):
dx = yellow[i] - bx
dy = white[i] - by
dis = math.sqrt(dx ** 2 + dy ** 2)
if dis > bet:
GPS_x.append(yellow[i])
GPS_y.append(white[i])
bx = yellow[i]
by = white[i]
GPS_x = np.array(GPS_x)
GPS_y = np.array(GPS_y)
zero_cord_x = GPS_x[0]
zero_cord_y = GPS_y[0]
GPS_x = GPS_x - zero_cord_x
GPS_y = GPS_y - zero_cord_y
plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')
plt.plot()
plt.show()
return GPS_x, GPS_y
class Info(object):
def __init__(self):
self.CurrGPS_lat = float(-1)
self.CurrGPS_lon = float(-1)
self.CurrentVelocity = float(-1)
self.Target_Velocity = float(-1)
self.ImuYaw = float(-1)
self.Target_Theta = float(-1)
self.gob = np.array([])
self.ob = np.array([])
self.gobx = np.array([])
self.goby = np.array([])
rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)
sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.
FeedbackCallbackGPSIMU, queue_size=10)
rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.
RVcallback, queue_size=10)
def FeedbackCallbackGPSIMU(self, msg):
self.CurrGPS_lat = msg.latitude
self.CurrGPS_lon = msg.longitude
self.ImuYaw = (90 - msg.course_angle) * np.pi / 180
def FeedbackCallbackObs(self, msg):
global Gob_x
global Gob_y
self.gobx = msg.x
self.goby = msg.y
Gob_x.append(self.gobx)
Gob_y.append(self.goby)
self.gob = np.column_stack((Gob_x, Gob_y))
def RVcallback(self, msg):
self.CurrentVelocity = msg.Base_Vehspd
def init(self):
return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,
self.goby, self.gob, self.CurrentVelocity)
def talker(self, Target_Velocity, path_record):
self.rate = rospy.Rate(100)
self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,
queue_size=10)
self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)
self.pub_Velocity.publish(Target_Velocity)
self.path_pub.publish(path_record)
def get_transalation(curr_gps_lat, curr_gps_lon):
curr_posy = float(curr_gps_lon) - zero_cord_y
curr_posx = float(curr_gps_lat) - zero_cord_x
return curr_posx, curr_posy
def get_transformation(pt, curr_yaw, T):
c, s = np.cos(curr_yaw), np.sin(curr_yaw)
R = np.array(((c, -s), (s, c)))
pt = pt.dot(R) + T
return pt
<|reserved_special_token_0|>
def get_lateral_dist(tx, ty, curr_posx, curr_posy):
dist = []
for x in range(0, len(tx) - 1):
dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -
ty[x]))
lat_dist = min(dist)
st = dist.index(min(dist))
theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])
theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])
if lat_dist < THRESH_DIST:
lat_dist = 0
curr_posx = tx[st]
curr_posy = ty[st]
if theta2 < theta1:
lat_dist = -lat_dist
return st, lat_dist, curr_posx, curr_posy
def proportional_control(target, current):
a = 1.0 * (target - current)
return a
def main():
ptx = []
pty = []
ptx, pty = load_global_path()
tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)
road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(
ptx, pty)
c_speed = 5.0 / 3.6
c_acc = 1.0
c_d_dd = 0
c_d_d = 0
area = 25.0
start = time.time()
rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)
my_node = Info()
while not rospy.is_shutdown():
(CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity
) = my_node.init()
ob = []
if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
T = [curr_posx, curr_posy]
curr_yaw = ImuYaw
if len(gob) == 0:
ob = [[-20, -20]]
else:
ob = gob
ob_len = len(ob) - 1
for x in xrange(0, ob_len):
ob = np.array(ob)
ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)
try:
curr_posx, curr_posy = get_transalation(CurrGPS_lat,
CurrGPS_lon)
spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,
curr_posx, curr_posy)
s0 = get_arc_length(tx, ty, spt)
path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,
c_d_dd, ob)
c_speed = path.s_d[1]
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print('Goal')
c_speed = 0.0
break
if show_animation:
plt.cla()
plt.plot(tx, ty, '-.k')
plt.plot(road_left_x, road_left_y, '-k')
plt.plot(road_right_x, road_right_y, '-k')
plt.plot(ob[:, 0], ob[:, 1], 'ob')
plt.plot(path.x[1:], path.y[1:], '-or')
plt.plot(path.x[1], path.y[1], 'vc')
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),
math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,
head_length=1.0)
plt.title('v[km/h]:' + str(c_speed)[0:4])
plt.xlabel(u'x/m', fontsize=14)
plt.ylabel(u'y/m', fontsize=14)
plt.pause(0.0001)
PathFail_flag = 0
except:
PathFail_flag = 1
print("Don't find optimal path")
global Gob_x
global Gob_y
Gob_x *= 0
Gob_y *= 0
try:
"""
acc = proportional_control(6, CurrentVelocity)
temp1=path.yaw[1] `
temp2=curr_yaw
if temp1<0:
temp1=6.28+temp1
if temp2<0:
temp2=6.28+temp2
val = temp1-temp2
if val > 3.14:
val = val - 6.28
if val < -3.14:
val = val + 6.28
val = math.degrees(val)
if val > 50:
val = 50
if val < -50:
val = -50
my_node.talker(acc,val)
"""
path_record = localPath()
for i in range(len(path.x[1:])):
path_record.path_x.append(path.x[i])
path_record.path_y.append(path.y[i])
if len(path_record.path_x) > 10000:
path_record.path_x.pop(0)
path_record.path_y.pop(0)
my_node.talker(c_speed, path_record)
except:
print('local path send fail')
pass
print('Finish')
end = time.time()
if show_animation:
plt.grid(True)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
for Ti in np.arange(MINT, MAXT, DT):
fp = FrenetPath()
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE,
TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
Jp = sum(np.power(tfp.d_ddd, 2))
Js = sum(np.power(tfp.s_ddd, 2))
ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2
tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2
tfp.cv = KJ * Js + KT * Ti + KD * ds
tfp.cf = KLAT * tfp.cd + KLON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
<|reserved_special_token_0|>
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in
zip(fp.x, fp.y)]
collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
"""
check path above max speed, max a, does collision or not
"""
okind = []
for i in range(len(fplist)):
if any([(v > MAX_SPEED) for v in fplist[i].s_d]):
continue
elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):
continue
elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):
continue
elif not check_collision(fplist[i], ob):
continue
okind.append(i)
return [fplist[i] for i in okind]
def frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):
ob = np.array(ob)
fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)
fplist = calc_global_paths(fplist, csp)
fplist = check_paths(fplist, ob)
mincost = float('inf')
bestpath = None
for fp in fplist:
if mincost >= fp.cf:
mincost = fp.cf
bestpath = fp
return bestpath
def generate_road_widle(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +
math.pi / 2.0)
road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +
math.pi / 2.0)
road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s
) + math.pi / 2.0)
road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s
) + math.pi / 2.0)
road_left_x.append(road_left_ix)
road_left_y.append(road_left_iy)
road_right_x.append(road_right_ix)
road_right_y.append(road_right_iy)
return road_left_x, road_left_y, road_right_x, road_right_y
def generate_target_course(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
def load_global_path():
global zero_cord_x, zero_cord_y
bet = 0.1
blank = []
white = []
yellow = []
GPS_x = []
GPS_y = []
nums, ber = np.loadtxt(
'/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'
, dtype=str, delimiter=',', unpack=True)
for i in range(len(nums)):
if not nums[i] in blank:
yellow.append(float(nums[i]))
white.append(float(ber[i]))
bx = yellow[0]
by = white[0]
for i in range(len(yellow)):
dx = yellow[i] - bx
dy = white[i] - by
dis = math.sqrt(dx ** 2 + dy ** 2)
if dis > bet:
GPS_x.append(yellow[i])
GPS_y.append(white[i])
bx = yellow[i]
by = white[i]
GPS_x = np.array(GPS_x)
GPS_y = np.array(GPS_y)
zero_cord_x = GPS_x[0]
zero_cord_y = GPS_y[0]
GPS_x = GPS_x - zero_cord_x
GPS_y = GPS_y - zero_cord_y
plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')
plt.plot()
plt.show()
return GPS_x, GPS_y
class Info(object):
def __init__(self):
self.CurrGPS_lat = float(-1)
self.CurrGPS_lon = float(-1)
self.CurrentVelocity = float(-1)
self.Target_Velocity = float(-1)
self.ImuYaw = float(-1)
self.Target_Theta = float(-1)
self.gob = np.array([])
self.ob = np.array([])
self.gobx = np.array([])
self.goby = np.array([])
rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)
sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.
FeedbackCallbackGPSIMU, queue_size=10)
rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.
RVcallback, queue_size=10)
def FeedbackCallbackGPSIMU(self, msg):
self.CurrGPS_lat = msg.latitude
self.CurrGPS_lon = msg.longitude
self.ImuYaw = (90 - msg.course_angle) * np.pi / 180
def FeedbackCallbackObs(self, msg):
global Gob_x
global Gob_y
self.gobx = msg.x
self.goby = msg.y
Gob_x.append(self.gobx)
Gob_y.append(self.goby)
self.gob = np.column_stack((Gob_x, Gob_y))
def RVcallback(self, msg):
self.CurrentVelocity = msg.Base_Vehspd
def init(self):
return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,
self.goby, self.gob, self.CurrentVelocity)
def talker(self, Target_Velocity, path_record):
self.rate = rospy.Rate(100)
self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,
queue_size=10)
self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)
self.pub_Velocity.publish(Target_Velocity)
self.path_pub.publish(path_record)
def get_transalation(curr_gps_lat, curr_gps_lon):
curr_posy = float(curr_gps_lon) - zero_cord_y
curr_posx = float(curr_gps_lat) - zero_cord_x
return curr_posx, curr_posy
def get_transformation(pt, curr_yaw, T):
c, s = np.cos(curr_yaw), np.sin(curr_yaw)
R = np.array(((c, -s), (s, c)))
pt = pt.dot(R) + T
return pt
<|reserved_special_token_0|>
def get_lateral_dist(tx, ty, curr_posx, curr_posy):
dist = []
for x in range(0, len(tx) - 1):
dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -
ty[x]))
lat_dist = min(dist)
st = dist.index(min(dist))
theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])
theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])
if lat_dist < THRESH_DIST:
lat_dist = 0
curr_posx = tx[st]
curr_posy = ty[st]
if theta2 < theta1:
lat_dist = -lat_dist
return st, lat_dist, curr_posx, curr_posy
def proportional_control(target, current):
a = 1.0 * (target - current)
return a
def main():
ptx = []
pty = []
ptx, pty = load_global_path()
tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)
road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(
ptx, pty)
c_speed = 5.0 / 3.6
c_acc = 1.0
c_d_dd = 0
c_d_d = 0
area = 25.0
start = time.time()
rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)
my_node = Info()
while not rospy.is_shutdown():
(CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity
) = my_node.init()
ob = []
if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
T = [curr_posx, curr_posy]
curr_yaw = ImuYaw
if len(gob) == 0:
ob = [[-20, -20]]
else:
ob = gob
ob_len = len(ob) - 1
for x in xrange(0, ob_len):
ob = np.array(ob)
ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)
try:
curr_posx, curr_posy = get_transalation(CurrGPS_lat,
CurrGPS_lon)
spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,
curr_posx, curr_posy)
s0 = get_arc_length(tx, ty, spt)
path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,
c_d_dd, ob)
c_speed = path.s_d[1]
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print('Goal')
c_speed = 0.0
break
if show_animation:
plt.cla()
plt.plot(tx, ty, '-.k')
plt.plot(road_left_x, road_left_y, '-k')
plt.plot(road_right_x, road_right_y, '-k')
plt.plot(ob[:, 0], ob[:, 1], 'ob')
plt.plot(path.x[1:], path.y[1:], '-or')
plt.plot(path.x[1], path.y[1], 'vc')
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),
math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,
head_length=1.0)
plt.title('v[km/h]:' + str(c_speed)[0:4])
plt.xlabel(u'x/m', fontsize=14)
plt.ylabel(u'y/m', fontsize=14)
plt.pause(0.0001)
PathFail_flag = 0
except:
PathFail_flag = 1
print("Don't find optimal path")
global Gob_x
global Gob_y
Gob_x *= 0
Gob_y *= 0
try:
"""
acc = proportional_control(6, CurrentVelocity)
temp1=path.yaw[1] `
temp2=curr_yaw
if temp1<0:
temp1=6.28+temp1
if temp2<0:
temp2=6.28+temp2
val = temp1-temp2
if val > 3.14:
val = val - 6.28
if val < -3.14:
val = val + 6.28
val = math.degrees(val)
if val > 50:
val = 50
if val < -50:
val = -50
my_node.talker(acc,val)
"""
path_record = localPath()
for i in range(len(path.x[1:])):
path_record.path_x.append(path.x[i])
path_record.path_y.append(path.y[i])
if len(path_record.path_x) > 10000:
path_record.path_x.pop(0)
path_record.path_y.pop(0)
my_node.talker(c_speed, path_record)
except:
print('local path send fail')
pass
print('Finish')
end = time.time()
if show_animation:
plt.grid(True)
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
for Ti in np.arange(MINT, MAXT, DT):
fp = FrenetPath()
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE,
TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
Jp = sum(np.power(tfp.d_ddd, 2))
Js = sum(np.power(tfp.s_ddd, 2))
ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2
tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2
tfp.cv = KJ * Js + KT * Ti + KD * ds
tfp.cf = KLAT * tfp.cd + KLON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
def calc_global_paths(fplist, csp):
for fp in fplist:
for i in range(len(fp.s)):
ix, iy = csp.calc_position(fp.s[i])
if ix is None:
break
iyaw = csp.calc_yaw(fp.s[i])
di = fp.d[i]
fx = ix + di * math.cos(iyaw + math.pi / 2.0)
fy = iy + di * math.sin(iyaw + math.pi / 2.0)
fp.x.append(fx)
fp.y.append(fy)
for i in range(len(fp.x) - 1):
dx = fp.x[i + 1] - fp.x[i]
dy = fp.y[i + 1] - fp.y[i]
fp.yaw.append(math.atan2(dy, dx))
fp.ds.append(math.sqrt(dx ** 2 + dy ** 2))
fp.yaw.append(fp.yaw[-1])
fp.ds.append(fp.ds[-1])
for i in range(len(fp.yaw) - 1):
fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])
return fplist
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in
zip(fp.x, fp.y)]
collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
"""
check path above max speed, max a, does collision or not
"""
okind = []
for i in range(len(fplist)):
if any([(v > MAX_SPEED) for v in fplist[i].s_d]):
continue
elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):
continue
elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):
continue
elif not check_collision(fplist[i], ob):
continue
okind.append(i)
return [fplist[i] for i in okind]
def frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):
ob = np.array(ob)
fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)
fplist = calc_global_paths(fplist, csp)
fplist = check_paths(fplist, ob)
mincost = float('inf')
bestpath = None
for fp in fplist:
if mincost >= fp.cf:
mincost = fp.cf
bestpath = fp
return bestpath
def generate_road_widle(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +
math.pi / 2.0)
road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +
math.pi / 2.0)
road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s
) + math.pi / 2.0)
road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s
) + math.pi / 2.0)
road_left_x.append(road_left_ix)
road_left_y.append(road_left_iy)
road_right_x.append(road_right_ix)
road_right_y.append(road_right_iy)
return road_left_x, road_left_y, road_right_x, road_right_y
def generate_target_course(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
def load_global_path():
global zero_cord_x, zero_cord_y
bet = 0.1
blank = []
white = []
yellow = []
GPS_x = []
GPS_y = []
nums, ber = np.loadtxt(
'/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'
, dtype=str, delimiter=',', unpack=True)
for i in range(len(nums)):
if not nums[i] in blank:
yellow.append(float(nums[i]))
white.append(float(ber[i]))
bx = yellow[0]
by = white[0]
for i in range(len(yellow)):
dx = yellow[i] - bx
dy = white[i] - by
dis = math.sqrt(dx ** 2 + dy ** 2)
if dis > bet:
GPS_x.append(yellow[i])
GPS_y.append(white[i])
bx = yellow[i]
by = white[i]
GPS_x = np.array(GPS_x)
GPS_y = np.array(GPS_y)
zero_cord_x = GPS_x[0]
zero_cord_y = GPS_y[0]
GPS_x = GPS_x - zero_cord_x
GPS_y = GPS_y - zero_cord_y
plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')
plt.plot()
plt.show()
return GPS_x, GPS_y
class Info(object):
def __init__(self):
self.CurrGPS_lat = float(-1)
self.CurrGPS_lon = float(-1)
self.CurrentVelocity = float(-1)
self.Target_Velocity = float(-1)
self.ImuYaw = float(-1)
self.Target_Theta = float(-1)
self.gob = np.array([])
self.ob = np.array([])
self.gobx = np.array([])
self.goby = np.array([])
rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)
sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.
FeedbackCallbackGPSIMU, queue_size=10)
rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.
RVcallback, queue_size=10)
def FeedbackCallbackGPSIMU(self, msg):
self.CurrGPS_lat = msg.latitude
self.CurrGPS_lon = msg.longitude
self.ImuYaw = (90 - msg.course_angle) * np.pi / 180
def FeedbackCallbackObs(self, msg):
global Gob_x
global Gob_y
self.gobx = msg.x
self.goby = msg.y
Gob_x.append(self.gobx)
Gob_y.append(self.goby)
self.gob = np.column_stack((Gob_x, Gob_y))
def RVcallback(self, msg):
self.CurrentVelocity = msg.Base_Vehspd
def init(self):
return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,
self.goby, self.gob, self.CurrentVelocity)
def talker(self, Target_Velocity, path_record):
self.rate = rospy.Rate(100)
self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,
queue_size=10)
self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)
self.pub_Velocity.publish(Target_Velocity)
self.path_pub.publish(path_record)
def get_transalation(curr_gps_lat, curr_gps_lon):
curr_posy = float(curr_gps_lon) - zero_cord_y
curr_posx = float(curr_gps_lat) - zero_cord_x
return curr_posx, curr_posy
def get_transformation(pt, curr_yaw, T):
c, s = np.cos(curr_yaw), np.sin(curr_yaw)
R = np.array(((c, -s), (s, c)))
pt = pt.dot(R) + T
return pt
def get_arc_length(tx, ty, st):
arc_length = 0
for x in range(1, st):
arc_length = arc_length + np.hypot(tx[x - 1] - tx[x], ty[x - 1] - ty[x]
)
return arc_length
def get_lateral_dist(tx, ty, curr_posx, curr_posy):
dist = []
for x in range(0, len(tx) - 1):
dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -
ty[x]))
lat_dist = min(dist)
st = dist.index(min(dist))
theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])
theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])
if lat_dist < THRESH_DIST:
lat_dist = 0
curr_posx = tx[st]
curr_posy = ty[st]
if theta2 < theta1:
lat_dist = -lat_dist
return st, lat_dist, curr_posx, curr_posy
def proportional_control(target, current):
a = 1.0 * (target - current)
return a
def main():
ptx = []
pty = []
ptx, pty = load_global_path()
tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)
road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(
ptx, pty)
c_speed = 5.0 / 3.6
c_acc = 1.0
c_d_dd = 0
c_d_d = 0
area = 25.0
start = time.time()
rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)
my_node = Info()
while not rospy.is_shutdown():
(CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity
) = my_node.init()
ob = []
if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
T = [curr_posx, curr_posy]
curr_yaw = ImuYaw
if len(gob) == 0:
ob = [[-20, -20]]
else:
ob = gob
ob_len = len(ob) - 1
for x in xrange(0, ob_len):
ob = np.array(ob)
ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)
try:
curr_posx, curr_posy = get_transalation(CurrGPS_lat,
CurrGPS_lon)
spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,
curr_posx, curr_posy)
s0 = get_arc_length(tx, ty, spt)
path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,
c_d_dd, ob)
c_speed = path.s_d[1]
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print('Goal')
c_speed = 0.0
break
if show_animation:
plt.cla()
plt.plot(tx, ty, '-.k')
plt.plot(road_left_x, road_left_y, '-k')
plt.plot(road_right_x, road_right_y, '-k')
plt.plot(ob[:, 0], ob[:, 1], 'ob')
plt.plot(path.x[1:], path.y[1:], '-or')
plt.plot(path.x[1], path.y[1], 'vc')
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),
math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,
head_length=1.0)
plt.title('v[km/h]:' + str(c_speed)[0:4])
plt.xlabel(u'x/m', fontsize=14)
plt.ylabel(u'y/m', fontsize=14)
plt.pause(0.0001)
PathFail_flag = 0
except:
PathFail_flag = 1
print("Don't find optimal path")
global Gob_x
global Gob_y
Gob_x *= 0
Gob_y *= 0
try:
"""
acc = proportional_control(6, CurrentVelocity)
temp1=path.yaw[1] `
temp2=curr_yaw
if temp1<0:
temp1=6.28+temp1
if temp2<0:
temp2=6.28+temp2
val = temp1-temp2
if val > 3.14:
val = val - 6.28
if val < -3.14:
val = val + 6.28
val = math.degrees(val)
if val > 50:
val = 50
if val < -50:
val = -50
my_node.talker(acc,val)
"""
path_record = localPath()
for i in range(len(path.x[1:])):
path_record.path_x.append(path.x[i])
path_record.path_y.append(path.y[i])
if len(path_record.path_x) > 10000:
path_record.path_x.pop(0)
path_record.path_y.pop(0)
my_node.talker(c_speed, path_record)
except:
print('local path send fail')
pass
print('Finish')
end = time.time()
if show_animation:
plt.grid(True)
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
MAX_SPEED = 30.0
MAX_ACCEL = 50.0
MAX_CURVATURE = 30.0
MAX_ROAD_WIDTH = 10.0
D_ROAD_W = 2.0
DT = 0.3
MAXT = 6.0
MINT = 4.0
TARGET_SPEED = 15.0 / 3.6
D_T_S = 10.0 / 3.6
N_S_SAMPLE = 0.1
ROBOT_RADIUS = 2.3
THRESH_DIST = 0.01
KJ = 0.8
KT = 0.1
KD = 20.0
KLAT = 0.8
KLON = 0.2
show_animation = True
Gob_x = []
Gob_y = []
PathFail_flag = 0
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
for Ti in np.arange(MINT, MAXT, DT):
fp = FrenetPath()
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE,
TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
Jp = sum(np.power(tfp.d_ddd, 2))
Js = sum(np.power(tfp.s_ddd, 2))
ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2
tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2
tfp.cv = KJ * Js + KT * Ti + KD * ds
tfp.cf = KLAT * tfp.cd + KLON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
def calc_global_paths(fplist, csp):
for fp in fplist:
for i in range(len(fp.s)):
ix, iy = csp.calc_position(fp.s[i])
if ix is None:
break
iyaw = csp.calc_yaw(fp.s[i])
di = fp.d[i]
fx = ix + di * math.cos(iyaw + math.pi / 2.0)
fy = iy + di * math.sin(iyaw + math.pi / 2.0)
fp.x.append(fx)
fp.y.append(fy)
for i in range(len(fp.x) - 1):
dx = fp.x[i + 1] - fp.x[i]
dy = fp.y[i + 1] - fp.y[i]
fp.yaw.append(math.atan2(dy, dx))
fp.ds.append(math.sqrt(dx ** 2 + dy ** 2))
fp.yaw.append(fp.yaw[-1])
fp.ds.append(fp.ds[-1])
for i in range(len(fp.yaw) - 1):
fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])
return fplist
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in
zip(fp.x, fp.y)]
collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
"""
check path above max speed, max a, does collision or not
"""
okind = []
for i in range(len(fplist)):
if any([(v > MAX_SPEED) for v in fplist[i].s_d]):
continue
elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):
continue
elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):
continue
elif not check_collision(fplist[i], ob):
continue
okind.append(i)
return [fplist[i] for i in okind]
def frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):
ob = np.array(ob)
fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)
fplist = calc_global_paths(fplist, csp)
fplist = check_paths(fplist, ob)
mincost = float('inf')
bestpath = None
for fp in fplist:
if mincost >= fp.cf:
mincost = fp.cf
bestpath = fp
return bestpath
def generate_road_widle(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +
math.pi / 2.0)
road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +
math.pi / 2.0)
road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s
) + math.pi / 2.0)
road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s
) + math.pi / 2.0)
road_left_x.append(road_left_ix)
road_left_y.append(road_left_iy)
road_right_x.append(road_right_ix)
road_right_y.append(road_right_iy)
return road_left_x, road_left_y, road_right_x, road_right_y
def generate_target_course(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
def load_global_path():
global zero_cord_x, zero_cord_y
bet = 0.1
blank = []
white = []
yellow = []
GPS_x = []
GPS_y = []
nums, ber = np.loadtxt(
'/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'
, dtype=str, delimiter=',', unpack=True)
for i in range(len(nums)):
if not nums[i] in blank:
yellow.append(float(nums[i]))
white.append(float(ber[i]))
bx = yellow[0]
by = white[0]
for i in range(len(yellow)):
dx = yellow[i] - bx
dy = white[i] - by
dis = math.sqrt(dx ** 2 + dy ** 2)
if dis > bet:
GPS_x.append(yellow[i])
GPS_y.append(white[i])
bx = yellow[i]
by = white[i]
GPS_x = np.array(GPS_x)
GPS_y = np.array(GPS_y)
zero_cord_x = GPS_x[0]
zero_cord_y = GPS_y[0]
GPS_x = GPS_x - zero_cord_x
GPS_y = GPS_y - zero_cord_y
plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')
plt.plot()
plt.show()
return GPS_x, GPS_y
class Info(object):
def __init__(self):
self.CurrGPS_lat = float(-1)
self.CurrGPS_lon = float(-1)
self.CurrentVelocity = float(-1)
self.Target_Velocity = float(-1)
self.ImuYaw = float(-1)
self.Target_Theta = float(-1)
self.gob = np.array([])
self.ob = np.array([])
self.gobx = np.array([])
self.goby = np.array([])
rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)
sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.
FeedbackCallbackGPSIMU, queue_size=10)
rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.
RVcallback, queue_size=10)
def FeedbackCallbackGPSIMU(self, msg):
self.CurrGPS_lat = msg.latitude
self.CurrGPS_lon = msg.longitude
self.ImuYaw = (90 - msg.course_angle) * np.pi / 180
def FeedbackCallbackObs(self, msg):
global Gob_x
global Gob_y
self.gobx = msg.x
self.goby = msg.y
Gob_x.append(self.gobx)
Gob_y.append(self.goby)
self.gob = np.column_stack((Gob_x, Gob_y))
def RVcallback(self, msg):
self.CurrentVelocity = msg.Base_Vehspd
def init(self):
return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,
self.goby, self.gob, self.CurrentVelocity)
def talker(self, Target_Velocity, path_record):
self.rate = rospy.Rate(100)
self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,
queue_size=10)
self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)
self.pub_Velocity.publish(Target_Velocity)
self.path_pub.publish(path_record)
def get_transalation(curr_gps_lat, curr_gps_lon):
curr_posy = float(curr_gps_lon) - zero_cord_y
curr_posx = float(curr_gps_lat) - zero_cord_x
return curr_posx, curr_posy
def get_transformation(pt, curr_yaw, T):
c, s = np.cos(curr_yaw), np.sin(curr_yaw)
R = np.array(((c, -s), (s, c)))
pt = pt.dot(R) + T
return pt
def get_arc_length(tx, ty, st):
arc_length = 0
for x in range(1, st):
arc_length = arc_length + np.hypot(tx[x - 1] - tx[x], ty[x - 1] - ty[x]
)
return arc_length
def get_lateral_dist(tx, ty, curr_posx, curr_posy):
dist = []
for x in range(0, len(tx) - 1):
dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -
ty[x]))
lat_dist = min(dist)
st = dist.index(min(dist))
theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])
theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])
if lat_dist < THRESH_DIST:
lat_dist = 0
curr_posx = tx[st]
curr_posy = ty[st]
if theta2 < theta1:
lat_dist = -lat_dist
return st, lat_dist, curr_posx, curr_posy
def proportional_control(target, current):
a = 1.0 * (target - current)
return a
def main():
ptx = []
pty = []
ptx, pty = load_global_path()
tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)
road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(
ptx, pty)
c_speed = 5.0 / 3.6
c_acc = 1.0
c_d_dd = 0
c_d_d = 0
area = 25.0
start = time.time()
rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)
my_node = Info()
while not rospy.is_shutdown():
(CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity
) = my_node.init()
ob = []
if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
T = [curr_posx, curr_posy]
curr_yaw = ImuYaw
if len(gob) == 0:
ob = [[-20, -20]]
else:
ob = gob
ob_len = len(ob) - 1
for x in xrange(0, ob_len):
ob = np.array(ob)
ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)
try:
curr_posx, curr_posy = get_transalation(CurrGPS_lat,
CurrGPS_lon)
spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,
curr_posx, curr_posy)
s0 = get_arc_length(tx, ty, spt)
path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,
c_d_dd, ob)
c_speed = path.s_d[1]
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print('Goal')
c_speed = 0.0
break
if show_animation:
plt.cla()
plt.plot(tx, ty, '-.k')
plt.plot(road_left_x, road_left_y, '-k')
plt.plot(road_right_x, road_right_y, '-k')
plt.plot(ob[:, 0], ob[:, 1], 'ob')
plt.plot(path.x[1:], path.y[1:], '-or')
plt.plot(path.x[1], path.y[1], 'vc')
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),
math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,
head_length=1.0)
plt.title('v[km/h]:' + str(c_speed)[0:4])
plt.xlabel(u'x/m', fontsize=14)
plt.ylabel(u'y/m', fontsize=14)
plt.pause(0.0001)
PathFail_flag = 0
except:
PathFail_flag = 1
print("Don't find optimal path")
global Gob_x
global Gob_y
Gob_x *= 0
Gob_y *= 0
try:
"""
acc = proportional_control(6, CurrentVelocity)
temp1=path.yaw[1] `
temp2=curr_yaw
if temp1<0:
temp1=6.28+temp1
if temp2<0:
temp2=6.28+temp2
val = temp1-temp2
if val > 3.14:
val = val - 6.28
if val < -3.14:
val = val + 6.28
val = math.degrees(val)
if val > 50:
val = 50
if val < -50:
val = -50
my_node.talker(acc,val)
"""
path_record = localPath()
for i in range(len(path.x[1:])):
path_record.path_x.append(path.x[i])
path_record.path_y.append(path.y[i])
if len(path_record.path_x) > 10000:
path_record.path_x.pop(0)
path_record.path_y.pop(0)
my_node.talker(c_speed, path_record)
except:
print('local path send fail')
pass
print('Finish')
end = time.time()
if show_animation:
plt.grid(True)
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/python2
# -*- coding: UTF-8 -*-
# coding: utf-8
#!/usr/bin/env python
'''
发布轨迹信息
path.x; path.y; c_speed;
'''
import numpy as np
import matplotlib.pyplot as plt
import copy
import math
from cubic_spline import Spline2D
from polynomials import QuarticPolynomial, QuinticPolynomial
import time
import rospy
from std_msgs.msg import String
from std_msgs.msg import Float32
from std_msgs.msg import Int32
from geometry_msgs.msg import Point
from nav_msgs.msg import Path
from local_planner.msg import localPath
from geometry_msgs.msg import PoseStamped, Quaternion
import tf
from CAN_driver.msg import Motor_Feedback
from GNSS_driver.msg import GNSS_CAN
import sys
# 参数
MAX_SPEED = 30.0 # 最大速度 [m/s]
MAX_ACCEL = 50.0 # 最大加速度 [m/ss]
MAX_CURVATURE = 30.0 # 最大曲率 [1/m]
MAX_ROAD_WIDTH = 10.0 # 最大道路宽度 [m]
D_ROAD_W = 2.0 # 路宽采样间隔 [m]
DT = 0.3 # Delta T[s]
MAXT = 6.0 # 最大预测时间 [m]
MINT = 4.0 # 最小预测时间 [m]
TARGET_SPEED = 15.0/3.6 # 目标速度 [m/s] 即纵向速度保持
D_T_S = 10.0/3.6 # 目标opo][]o][o][\o][o][o速度采样间隔 [m/s]
N_S_SAMPLE = 0.1 # 目标速度采样数量
ROBOT_RADIUS = 2.3 # 车辆半径 [m]
THRESH_DIST=0.01
# 损失函数权重
KJ = 0.8
KT = 0.1
KD = 20.0
KLAT = 0.8
KLON = 0.2
show_animation = True
Gob_x = []
Gob_y = []
#规划失败标志 1 决策层需要
PathFail_flag = 0
class FrenetPath:
def __init__(self):
self.t = []
self.d = []
self.d_d = []
self.d_dd = []
self.d_ddd = []
self.s = []
self.s_d = []
self.s_dd = []
self.s_ddd = []
self.cd = 0.0
self.cv = 0.0
self.cf = 0.0
self.x = []
self.y = []
self.yaw = []
self.ds = []
self.c = []
def calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):
frenet_paths = []
# generate path to each offset goal
for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):
# 采样,并对每一个目标配置生成轨迹
# Lateral motion planning
for Ti in np.arange(MINT, MAXT, DT):
fp = FrenetPath()
# 计算出关于目标配置di,Ti的横向多项式
lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)
fp.t = [t for t in np.arange(0.0, Ti, DT)]
fp.d = [lat_qp.calc_point(t) for t in fp.t]
fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]
fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]
fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]
# 纵向速度规划 (速度保持)
# Loongitudinal motion planning (Velocity keeping)
for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):
tfp = copy.deepcopy(fp)
lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)
tfp.s = [lon_qp.calc_point(t) for t in fp.t]
tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]
tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]
tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]
###########################################################
#高速时的损失函数
###########################################################
Jp = sum(np.power(tfp.d_ddd, 2)) # square of jerk
Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# square of diff from target speed
ds = (TARGET_SPEED - tfp.s_d[-1])**2
# 横向的损失函数
tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1]**2
# 纵向的损失函数
tfp.cv = KJ * Js + KT * Ti + KD * ds
# 总的损失函数为d 和 s方向的损失函数乘对应的系数相加
#########################################################
#低速时的损失函数
#########################################################
# # 低速时的损失函数
# ltfp = copy.deepcopy(tfp)
# ltfp.d_sss = [lat_qp.calc_third_derivative(s) for s in tfp.s]
# Jp_s = sum(np.power(ltfp.d_sss, 2)) # square of jerk
# Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk
# # S = s1 - s0
# dS = tfp.s[-1] - s0
# #横向的损失函数
# tfp.cd = KJ * Jp_s + KT * dS + KD * tfp.d[-1] ** 2
# #纵向的损失函数
# tfp.cv = KJ * Js + KT * Ti + KD * ds
tfp.cf = KLAT * tfp.cd + KLON * tfp.cv
frenet_paths.append(tfp)
return frenet_paths
def calc_global_paths(fplist, csp):
for fp in fplist:
# calc global positions
for i in range(len(fp.s)):
ix, iy = csp.calc_position(fp.s[i])
if ix is None:
break
iyaw = csp.calc_yaw(fp.s[i])
di = fp.d[i]
fx = ix + di * math.cos(iyaw + math.pi / 2.0)
fy = iy + di * math.sin(iyaw + math.pi / 2.0)
fp.x.append(fx)
fp.y.append(fy)
# calc yaw and ds
for i in range(len(fp.x) - 1):
dx = fp.x[i + 1] - fp.x[i]
dy = fp.y[i + 1] - fp.y[i]
fp.yaw.append(math.atan2(dy, dx))
fp.ds.append(math.sqrt(dx**2 + dy**2))
fp.yaw.append(fp.yaw[-1])
fp.ds.append(fp.ds[-1])
# calc curvature
for i in range(len(fp.yaw) - 1):
fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])
return fplist
def check_collision(fp, ob):
for i in range(len(ob[:, 0])):
d = [((ix - ob[i, 0])**2 + (iy - ob[i, 1])**2)
for (ix, iy) in zip(fp.x, fp.y)]
collision = any([di <= ROBOT_RADIUS**2 for di in d])
if collision:
return False
return True
def check_paths(fplist, ob):
"""
check path above max speed, max a, does collision or not
"""
okind = []
for i in range(len(fplist)):
if any([v > MAX_SPEED for v in fplist[i].s_d]): # Max speed check
continue
elif any([abs(a) > MAX_ACCEL for a in fplist[i].s_dd]): # Max accel check
continue
elif any([abs(c) > MAX_CURVATURE for c in fplist[i].c]): # Max curvature check
continue
elif not check_collision(fplist[i], ob):
continue
okind.append(i)
return [fplist[i] for i in okind]
def frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):
ob = np.array(ob)
fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)
fplist = calc_global_paths(fplist, csp)
fplist = check_paths(fplist, ob)
# find minimum cost path
mincost = float("inf")
bestpath = None
for fp in fplist:
if mincost >= fp.cf:
mincost = fp.cf
bestpath = fp
return bestpath
def generate_road_widle(x,y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1)
road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
road_left_ix = ix + MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_iy = iy + MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_ix = ix - MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)
road_right_iy = iy - MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)
road_left_x.append(road_left_ix)
road_left_y.append(road_left_iy)
road_right_x.append(road_right_ix)
road_right_y.append(road_right_iy)
return road_left_x, road_left_y, road_right_x, road_right_y
def generate_target_course(x, y):
csp = Spline2D(x, y)
s = np.arange(0, csp.s[-1], 0.1) #0.1
rx, ry, ryaw, rk = [], [], [], []
for i_s in s:
ix, iy = csp.calc_position(i_s)
rx.append(ix)
ry.append(iy)
ryaw.append(csp.calc_yaw(i_s))
rk.append(csp.calc_curvature(i_s))
return rx, ry, ryaw, rk, csp
#######################################################################################
def load_global_path():
global zero_cord_x,zero_cord_y
bet = 0.1
blank = [] #buffer
white = [] #buffer
yellow = [] #buffer
GPS_x = [] #所采集预描点的x
GPS_y = [] #所采集预描点的x
#读取预描点
nums, ber = np.loadtxt("/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt", dtype=str, delimiter=',', unpack=True)
for i in range(len(nums)):
if not nums[i] in blank: #去除重复点
#blank.append(nums[i])
yellow.append(float(nums[i]))
white.append(float(ber[i]))
bx = yellow[0] #起始点坐标
by = white[0]
for i in range(len(yellow)):
dx = yellow[i] - bx
dy = white[i] - by
dis = math.sqrt(dx ** 2 + dy ** 2)
if dis > bet: #选取大于设定的距离的点
GPS_x.append(yellow[i]) #使cx,cy中点均满足要求
GPS_y.append(white[i])
bx = yellow[i]
by = white[i]
GPS_x = np.array(GPS_x) #将列表转换成数组
GPS_y = np.array(GPS_y)
#print("cx:",cx)
#print("cy:",cy)
zero_cord_x = GPS_x[0]
zero_cord_y = GPS_y[0]
GPS_x = GPS_x - zero_cord_x
GPS_y = GPS_y - zero_cord_y
plt.plot(GPS_x,GPS_y, "-r", label="GPS point ")
plt.plot()
plt.show()
return GPS_x, GPS_y
class Info(object):
def __init__(self):
self.CurrGPS_lat = float(-1)
self.CurrGPS_lon = float(-1)
self.CurrentVelocity = float(-1)
self.Target_Velocity = float(-1)
self.ImuYaw = float(-1)
self.Target_Theta = float(-1)
#self.CommandMessage = Car_Input()
self.gob = np.array([])
self.ob = np.array([])
self.gobx = np.array([])
self.goby = np.array([])
# Subscribers
rospy.Subscriber("coordinate", Point, self.FeedbackCallbackObs)
sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.FeedbackCallbackGPSIMU,queue_size = 10) #订阅GPS数据
rospy.Subscriber("Motor_Feedback_mssage", Motor_Feedback,self.RVcallback,queue_size = 10)
def FeedbackCallbackGPSIMU(self, msg):
self.CurrGPS_lat = msg.latitude
self.CurrGPS_lon = msg.longitude
self.ImuYaw = (90-msg.course_angle)*np.pi/180
#print(self.CurrGPS_lat,self.CurrGPS_lon,self.ImuYaw)
def FeedbackCallbackObs(self, msg):
global Gob_x
global Gob_y
self.gobx = msg.x
self.goby = msg.y
#print("msg.x","msg.y", msg.x, msg.y)
Gob_x.append(self.gobx)
Gob_y.append(self.goby)
#print("Gob_x","Gob_y", Gob_x, Gob_y)
#np.append(self.gobx,5)
#np.append(self.goby,5)
self.gob = np.column_stack((Gob_x, Gob_y))
#print(self.gobx,self.goby)
#print(self.gob)
def RVcallback(self,msg):
self.CurrentVelocity = msg.Base_Vehspd
#print("*"*50)
#print("rv:",rv)
#rospy.loginfo('I heard: %s', data.data)
def init(self):
return self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx, self.goby, self.gob, self.CurrentVelocity
def talker(self,Target_Velocity, path_record):
self.rate = rospy.Rate(100) # 10hz
self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# 定义发布器 path_pub 发布 trajectory
self.path_pub = rospy.Publisher('trajectory', localPath, queue_size = 50) #定义Publisher对象
self.pub_Velocity.publish(Target_Velocity)
# 发布路径
self.path_pub.publish(path_record)
#self.rate.sleep()
# def talker(self,Target_Velocity,Target_Theta):
# self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象
# self.pub_Steering = rospy.Publisher('Car_Steering', Float32, queue_size = 10)
# self.rate = rospy.Rate(100) # 10hz
# self.pub_Velocity.publish(Target_Velocity)
# self.pub_Steering.publish(Target_Theta)
# self.rate.sleep()
#######################################################################################
def get_transalation(curr_gps_lat,curr_gps_lon):
curr_posy=(float(curr_gps_lon)-zero_cord_y)
curr_posx=(float(curr_gps_lat)-zero_cord_x)
#print("curr_posy,curr_posx=",curr_posy,curr_posx)
return curr_posx, curr_posy
def get_transformation(pt,curr_yaw,T):
c, s = np.cos(curr_yaw), np.sin(curr_yaw)
R = (np.array(((c,-s), (s, c))))
pt=pt.dot(R)+T
return pt
def get_arc_length(tx,ty,st):
arc_length=0
for x in range(1,st):
arc_length=arc_length+(np.hypot((tx[x-1]-tx[x]),(ty[x-1]-ty[x])))
return arc_length
def get_lateral_dist(tx,ty,curr_posx,curr_posy):
dist=[]
for x in range(0,len(tx)-1):
dist.append(np.hypot((float(curr_posx)-tx[x]),(float(curr_posy)-ty[x])))
lat_dist=min(dist)
st=dist.index(min(dist))
theta1=math.atan2((ty[st]-ty[st-1]),(tx[st]-tx[st-1]))
theta2=math.atan2((curr_posy-ty[st-1]),(curr_posx-tx[st-1]))
if lat_dist<THRESH_DIST:
lat_dist=0
curr_posx=tx[st]
curr_posy=ty[st]
if theta2<theta1:
lat_dist=-lat_dist
# print(lat_dist)
return st, lat_dist, curr_posx, curr_posy
def proportional_control(target, current):
#print("*"*50)
#print("current=",current)
#print("target - current",target - current)
a = 1.0 * (target - current)
return a
def main():
ptx = []
pty = []
ptx, pty = load_global_path()
tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)
#print(csp)
road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(ptx, pty)
#当前车速及加速度
c_speed = 5.0/3.6
c_acc = 1.0
c_d_dd = 0
c_d_d = 0
area = 25.0 # animation area length [m]
start = time.time()
rospy.init_node('AvoidObstacles_PlannerOut', anonymous = False)
my_node = Info()
while not rospy.is_shutdown():
CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity = my_node.init()
#print("gob",gob)
ob = []
if (CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1):
#print(CurrGPS_lat,CurrGPS_lon,ImuYaw, curr_posx, curr_posy)
#print(gobx,goby,gob)
#path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
#s0 = path.s[1]
#c_d = path.d[1]
#c_d_d = path.d_d[1]
#c_d_dd = path.d_dd[1]
#c_speed = path.s_d[1]
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
T = [curr_posx, curr_posy]
curr_yaw = ImuYaw #+ math.pi / 2
if (len(gob) == 0):
ob = [[-20, -20]]
else:
ob = gob
ob_len = len(ob)-1
for x in xrange(0, ob_len):
#print("ob_transformation",ob)
ob = np.array(ob)
#ob[x, :] = .2 * ob[x, :]
ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)
#print("ob_transformation",ob)
#############################################################
# c_d_dd = c_acc*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))+curr_yaw)
#spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
#curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
try:
curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)
spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)
s0 = get_arc_length(tx, ty, spt)
path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)
c_speed = path.s_d[1]
#c_d_d = c_speed*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))-curr_yaw)
c_d_d = path.d_d[1]
c_d_dd = path.d_dd[1]
if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:
print("Goal")
c_speed = 0.0
break
if show_animation:
plt.cla()
plt.plot(tx, ty, "-.k")
plt.plot(road_left_x, road_left_y, "-k")
plt.plot(road_right_x, road_right_y, "-k")
plt.plot(ob[:, 0], ob[:, 1], "ob")
plt.plot(path.x[1:], path.y[1:], "-or")
plt.plot(path.x[1], path.y[1], "vc")
plt.xlim(path.x[1] - area, path.x[1] + area)
plt.ylim(path.y[1] - area, path.y[1] + area)
plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw), math.sin(curr_yaw),fc="r", ec="k", head_width=0.5, head_length=1.0)
plt.title("v[km/h]:" + str(c_speed)[0:4])
plt.xlabel(u'x/m', fontsize=14) # 设置x轴,并设定字号大小
plt.ylabel(u'y/m', fontsize=14) # 设置y轴,并设定字号大小
plt.pause(0.0001)
####################规划成功###############
###########################################
PathFail_flag = 0
###########################################
except:
###############规划失败################
PathFail_flag = 1
print("Don't find optimal path")
################对障碍物堆栈清空############
############################################
############################################
global Gob_x
global Gob_y
Gob_x*=0
Gob_y*=0
############################################
############################################
###############################################################################
try:
'''
acc = proportional_control(6, CurrentVelocity)
temp1=path.yaw[1] `
temp2=curr_yaw
if temp1<0:
temp1=6.28+temp1
if temp2<0:
temp2=6.28+temp2
val = temp1-temp2
if val > 3.14:
val = val - 6.28
if val < -3.14:
val = val + 6.28
val = math.degrees(val)
if val > 50:
val = 50
if val < -50:
val = -50
my_node.talker(acc,val)
'''
path_record = localPath()
# 配置路径
for i in range(len(path.x[1:])):
#print("path_x",path.x[i])
path_record.path_x.append(path.x[i])
path_record.path_y.append(path.y[i])
# 路径数量限制
if len(path_record.path_x) > 10000:
path_record.path_x.pop(0)
path_record.path_y.pop(0)
# 发布路径`
my_node.talker(c_speed, path_record)
except:
print("local path send fail")
pass
#my_node.talker(c_speed, path.x[1:], path.y[1:])
#except:
# pass
print("Finish")
end = time.time()
#print("total time: ", end - start)
if show_animation:
plt.grid(True)
plt.show()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "4647a7d0996ceeef4f39cf3182ac3944d25cb349",
"index": 8197,
"step-1": "<mask token>\n\n\nclass FrenetPath:\n\n def __init__(self):\n self.t = []\n self.d = []\n self.d_d = []\n self.d_dd = []\n self.d_ddd = []\n self.s = []\n self.s_d = []\n self.s_dd = []\n self.s_ddd = []\n self.cd = 0.0\n self.cv = 0.0\n self.cf = 0.0\n self.x = []\n self.y = []\n self.yaw = []\n self.ds = []\n self.c = []\n\n\ndef calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):\n frenet_paths = []\n for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):\n for Ti in np.arange(MINT, MAXT, DT):\n fp = FrenetPath()\n lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)\n fp.t = [t for t in np.arange(0.0, Ti, DT)]\n fp.d = [lat_qp.calc_point(t) for t in fp.t]\n fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]\n fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]\n fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]\n for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, \n TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):\n tfp = copy.deepcopy(fp)\n lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)\n tfp.s = [lon_qp.calc_point(t) for t in fp.t]\n tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]\n tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]\n tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]\n Jp = sum(np.power(tfp.d_ddd, 2))\n Js = sum(np.power(tfp.s_ddd, 2))\n ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2\n tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2\n tfp.cv = KJ * Js + KT * Ti + KD * ds\n tfp.cf = KLAT * tfp.cd + KLON * tfp.cv\n frenet_paths.append(tfp)\n return frenet_paths\n\n\n<mask token>\n\n\ndef check_collision(fp, ob):\n for i in range(len(ob[:, 0])):\n d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in\n zip(fp.x, fp.y)]\n collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])\n if collision:\n return False\n return True\n\n\ndef check_paths(fplist, ob):\n \"\"\"\n check path above max speed, max a, does collision or not\n \"\"\"\n okind = []\n for i in range(len(fplist)):\n if any([(v > MAX_SPEED) for v in fplist[i].s_d]):\n continue\n elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):\n continue\n elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):\n continue\n elif not check_collision(fplist[i], ob):\n continue\n okind.append(i)\n return [fplist[i] for i in okind]\n\n\n<mask token>\n\n\ndef generate_road_widle(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_left_x.append(road_left_ix)\n road_left_y.append(road_left_iy)\n road_right_x.append(road_right_ix)\n road_right_y.append(road_right_iy)\n return road_left_x, road_left_y, road_right_x, road_right_y\n\n\ndef generate_target_course(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(csp.calc_yaw(i_s))\n rk.append(csp.calc_curvature(i_s))\n return rx, ry, ryaw, rk, csp\n\n\ndef load_global_path():\n global zero_cord_x, zero_cord_y\n bet = 0.1\n blank = []\n white = []\n yellow = []\n GPS_x = []\n GPS_y = []\n nums, ber = np.loadtxt(\n '/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'\n , dtype=str, delimiter=',', unpack=True)\n for i in range(len(nums)):\n if not nums[i] in blank:\n yellow.append(float(nums[i]))\n white.append(float(ber[i]))\n bx = yellow[0]\n by = white[0]\n for i in range(len(yellow)):\n dx = yellow[i] - bx\n dy = white[i] - by\n dis = math.sqrt(dx ** 2 + dy ** 2)\n if dis > bet:\n GPS_x.append(yellow[i])\n GPS_y.append(white[i])\n bx = yellow[i]\n by = white[i]\n GPS_x = np.array(GPS_x)\n GPS_y = np.array(GPS_y)\n zero_cord_x = GPS_x[0]\n zero_cord_y = GPS_y[0]\n GPS_x = GPS_x - zero_cord_x\n GPS_y = GPS_y - zero_cord_y\n plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')\n plt.plot()\n plt.show()\n return GPS_x, GPS_y\n\n\nclass Info(object):\n\n def __init__(self):\n self.CurrGPS_lat = float(-1)\n self.CurrGPS_lon = float(-1)\n self.CurrentVelocity = float(-1)\n self.Target_Velocity = float(-1)\n self.ImuYaw = float(-1)\n self.Target_Theta = float(-1)\n self.gob = np.array([])\n self.ob = np.array([])\n self.gobx = np.array([])\n self.goby = np.array([])\n rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)\n sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.\n FeedbackCallbackGPSIMU, queue_size=10)\n rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.\n RVcallback, queue_size=10)\n\n def FeedbackCallbackGPSIMU(self, msg):\n self.CurrGPS_lat = msg.latitude\n self.CurrGPS_lon = msg.longitude\n self.ImuYaw = (90 - msg.course_angle) * np.pi / 180\n\n def FeedbackCallbackObs(self, msg):\n global Gob_x\n global Gob_y\n self.gobx = msg.x\n self.goby = msg.y\n Gob_x.append(self.gobx)\n Gob_y.append(self.goby)\n self.gob = np.column_stack((Gob_x, Gob_y))\n\n def RVcallback(self, msg):\n self.CurrentVelocity = msg.Base_Vehspd\n\n def init(self):\n return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,\n self.goby, self.gob, self.CurrentVelocity)\n\n def talker(self, Target_Velocity, path_record):\n self.rate = rospy.Rate(100)\n self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,\n queue_size=10)\n self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)\n self.pub_Velocity.publish(Target_Velocity)\n self.path_pub.publish(path_record)\n\n\ndef get_transalation(curr_gps_lat, curr_gps_lon):\n curr_posy = float(curr_gps_lon) - zero_cord_y\n curr_posx = float(curr_gps_lat) - zero_cord_x\n return curr_posx, curr_posy\n\n\ndef get_transformation(pt, curr_yaw, T):\n c, s = np.cos(curr_yaw), np.sin(curr_yaw)\n R = np.array(((c, -s), (s, c)))\n pt = pt.dot(R) + T\n return pt\n\n\n<mask token>\n\n\ndef get_lateral_dist(tx, ty, curr_posx, curr_posy):\n dist = []\n for x in range(0, len(tx) - 1):\n dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -\n ty[x]))\n lat_dist = min(dist)\n st = dist.index(min(dist))\n theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])\n theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])\n if lat_dist < THRESH_DIST:\n lat_dist = 0\n curr_posx = tx[st]\n curr_posy = ty[st]\n if theta2 < theta1:\n lat_dist = -lat_dist\n return st, lat_dist, curr_posx, curr_posy\n\n\ndef proportional_control(target, current):\n a = 1.0 * (target - current)\n return a\n\n\ndef main():\n ptx = []\n pty = []\n ptx, pty = load_global_path()\n tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)\n road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(\n ptx, pty)\n c_speed = 5.0 / 3.6\n c_acc = 1.0\n c_d_dd = 0\n c_d_d = 0\n area = 25.0\n start = time.time()\n rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)\n my_node = Info()\n while not rospy.is_shutdown():\n (CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity\n ) = my_node.init()\n ob = []\n if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n T = [curr_posx, curr_posy]\n curr_yaw = ImuYaw\n if len(gob) == 0:\n ob = [[-20, -20]]\n else:\n ob = gob\n ob_len = len(ob) - 1\n for x in xrange(0, ob_len):\n ob = np.array(ob)\n ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)\n try:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat,\n CurrGPS_lon)\n spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,\n curr_posx, curr_posy)\n s0 = get_arc_length(tx, ty, spt)\n path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,\n c_d_dd, ob)\n c_speed = path.s_d[1]\n c_d_d = path.d_d[1]\n c_d_dd = path.d_dd[1]\n if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:\n print('Goal')\n c_speed = 0.0\n break\n if show_animation:\n plt.cla()\n plt.plot(tx, ty, '-.k')\n plt.plot(road_left_x, road_left_y, '-k')\n plt.plot(road_right_x, road_right_y, '-k')\n plt.plot(ob[:, 0], ob[:, 1], 'ob')\n plt.plot(path.x[1:], path.y[1:], '-or')\n plt.plot(path.x[1], path.y[1], 'vc')\n plt.xlim(path.x[1] - area, path.x[1] + area)\n plt.ylim(path.y[1] - area, path.y[1] + area)\n plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),\n math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,\n head_length=1.0)\n plt.title('v[km/h]:' + str(c_speed)[0:4])\n plt.xlabel(u'x/m', fontsize=14)\n plt.ylabel(u'y/m', fontsize=14)\n plt.pause(0.0001)\n PathFail_flag = 0\n except:\n PathFail_flag = 1\n print(\"Don't find optimal path\")\n global Gob_x\n global Gob_y\n Gob_x *= 0\n Gob_y *= 0\n try:\n \"\"\"\n acc = proportional_control(6, CurrentVelocity)\n temp1=path.yaw[1] `\n temp2=curr_yaw \n \n if temp1<0:\n temp1=6.28+temp1\n if temp2<0:\n temp2=6.28+temp2\n\n val = temp1-temp2\n \n if val > 3.14:\n val = val - 6.28\n if val < -3.14:\n val = val + 6.28\n \n val = math.degrees(val)\n \n if val > 50:\n val = 50\n if val < -50:\n val = -50\n \n my_node.talker(acc,val)\n \"\"\"\n path_record = localPath()\n for i in range(len(path.x[1:])):\n path_record.path_x.append(path.x[i])\n path_record.path_y.append(path.y[i])\n if len(path_record.path_x) > 10000:\n path_record.path_x.pop(0)\n path_record.path_y.pop(0)\n my_node.talker(c_speed, path_record)\n except:\n print('local path send fail')\n pass\n print('Finish')\n end = time.time()\n if show_animation:\n plt.grid(True)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass FrenetPath:\n\n def __init__(self):\n self.t = []\n self.d = []\n self.d_d = []\n self.d_dd = []\n self.d_ddd = []\n self.s = []\n self.s_d = []\n self.s_dd = []\n self.s_ddd = []\n self.cd = 0.0\n self.cv = 0.0\n self.cf = 0.0\n self.x = []\n self.y = []\n self.yaw = []\n self.ds = []\n self.c = []\n\n\ndef calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):\n frenet_paths = []\n for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):\n for Ti in np.arange(MINT, MAXT, DT):\n fp = FrenetPath()\n lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)\n fp.t = [t for t in np.arange(0.0, Ti, DT)]\n fp.d = [lat_qp.calc_point(t) for t in fp.t]\n fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]\n fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]\n fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]\n for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, \n TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):\n tfp = copy.deepcopy(fp)\n lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)\n tfp.s = [lon_qp.calc_point(t) for t in fp.t]\n tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]\n tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]\n tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]\n Jp = sum(np.power(tfp.d_ddd, 2))\n Js = sum(np.power(tfp.s_ddd, 2))\n ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2\n tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2\n tfp.cv = KJ * Js + KT * Ti + KD * ds\n tfp.cf = KLAT * tfp.cd + KLON * tfp.cv\n frenet_paths.append(tfp)\n return frenet_paths\n\n\n<mask token>\n\n\ndef check_collision(fp, ob):\n for i in range(len(ob[:, 0])):\n d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in\n zip(fp.x, fp.y)]\n collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])\n if collision:\n return False\n return True\n\n\ndef check_paths(fplist, ob):\n \"\"\"\n check path above max speed, max a, does collision or not\n \"\"\"\n okind = []\n for i in range(len(fplist)):\n if any([(v > MAX_SPEED) for v in fplist[i].s_d]):\n continue\n elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):\n continue\n elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):\n continue\n elif not check_collision(fplist[i], ob):\n continue\n okind.append(i)\n return [fplist[i] for i in okind]\n\n\ndef frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):\n ob = np.array(ob)\n fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)\n fplist = calc_global_paths(fplist, csp)\n fplist = check_paths(fplist, ob)\n mincost = float('inf')\n bestpath = None\n for fp in fplist:\n if mincost >= fp.cf:\n mincost = fp.cf\n bestpath = fp\n return bestpath\n\n\ndef generate_road_widle(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_left_x.append(road_left_ix)\n road_left_y.append(road_left_iy)\n road_right_x.append(road_right_ix)\n road_right_y.append(road_right_iy)\n return road_left_x, road_left_y, road_right_x, road_right_y\n\n\ndef generate_target_course(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(csp.calc_yaw(i_s))\n rk.append(csp.calc_curvature(i_s))\n return rx, ry, ryaw, rk, csp\n\n\ndef load_global_path():\n global zero_cord_x, zero_cord_y\n bet = 0.1\n blank = []\n white = []\n yellow = []\n GPS_x = []\n GPS_y = []\n nums, ber = np.loadtxt(\n '/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'\n , dtype=str, delimiter=',', unpack=True)\n for i in range(len(nums)):\n if not nums[i] in blank:\n yellow.append(float(nums[i]))\n white.append(float(ber[i]))\n bx = yellow[0]\n by = white[0]\n for i in range(len(yellow)):\n dx = yellow[i] - bx\n dy = white[i] - by\n dis = math.sqrt(dx ** 2 + dy ** 2)\n if dis > bet:\n GPS_x.append(yellow[i])\n GPS_y.append(white[i])\n bx = yellow[i]\n by = white[i]\n GPS_x = np.array(GPS_x)\n GPS_y = np.array(GPS_y)\n zero_cord_x = GPS_x[0]\n zero_cord_y = GPS_y[0]\n GPS_x = GPS_x - zero_cord_x\n GPS_y = GPS_y - zero_cord_y\n plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')\n plt.plot()\n plt.show()\n return GPS_x, GPS_y\n\n\nclass Info(object):\n\n def __init__(self):\n self.CurrGPS_lat = float(-1)\n self.CurrGPS_lon = float(-1)\n self.CurrentVelocity = float(-1)\n self.Target_Velocity = float(-1)\n self.ImuYaw = float(-1)\n self.Target_Theta = float(-1)\n self.gob = np.array([])\n self.ob = np.array([])\n self.gobx = np.array([])\n self.goby = np.array([])\n rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)\n sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.\n FeedbackCallbackGPSIMU, queue_size=10)\n rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.\n RVcallback, queue_size=10)\n\n def FeedbackCallbackGPSIMU(self, msg):\n self.CurrGPS_lat = msg.latitude\n self.CurrGPS_lon = msg.longitude\n self.ImuYaw = (90 - msg.course_angle) * np.pi / 180\n\n def FeedbackCallbackObs(self, msg):\n global Gob_x\n global Gob_y\n self.gobx = msg.x\n self.goby = msg.y\n Gob_x.append(self.gobx)\n Gob_y.append(self.goby)\n self.gob = np.column_stack((Gob_x, Gob_y))\n\n def RVcallback(self, msg):\n self.CurrentVelocity = msg.Base_Vehspd\n\n def init(self):\n return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,\n self.goby, self.gob, self.CurrentVelocity)\n\n def talker(self, Target_Velocity, path_record):\n self.rate = rospy.Rate(100)\n self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,\n queue_size=10)\n self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)\n self.pub_Velocity.publish(Target_Velocity)\n self.path_pub.publish(path_record)\n\n\ndef get_transalation(curr_gps_lat, curr_gps_lon):\n curr_posy = float(curr_gps_lon) - zero_cord_y\n curr_posx = float(curr_gps_lat) - zero_cord_x\n return curr_posx, curr_posy\n\n\ndef get_transformation(pt, curr_yaw, T):\n c, s = np.cos(curr_yaw), np.sin(curr_yaw)\n R = np.array(((c, -s), (s, c)))\n pt = pt.dot(R) + T\n return pt\n\n\n<mask token>\n\n\ndef get_lateral_dist(tx, ty, curr_posx, curr_posy):\n dist = []\n for x in range(0, len(tx) - 1):\n dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -\n ty[x]))\n lat_dist = min(dist)\n st = dist.index(min(dist))\n theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])\n theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])\n if lat_dist < THRESH_DIST:\n lat_dist = 0\n curr_posx = tx[st]\n curr_posy = ty[st]\n if theta2 < theta1:\n lat_dist = -lat_dist\n return st, lat_dist, curr_posx, curr_posy\n\n\ndef proportional_control(target, current):\n a = 1.0 * (target - current)\n return a\n\n\ndef main():\n ptx = []\n pty = []\n ptx, pty = load_global_path()\n tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)\n road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(\n ptx, pty)\n c_speed = 5.0 / 3.6\n c_acc = 1.0\n c_d_dd = 0\n c_d_d = 0\n area = 25.0\n start = time.time()\n rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)\n my_node = Info()\n while not rospy.is_shutdown():\n (CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity\n ) = my_node.init()\n ob = []\n if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n T = [curr_posx, curr_posy]\n curr_yaw = ImuYaw\n if len(gob) == 0:\n ob = [[-20, -20]]\n else:\n ob = gob\n ob_len = len(ob) - 1\n for x in xrange(0, ob_len):\n ob = np.array(ob)\n ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)\n try:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat,\n CurrGPS_lon)\n spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,\n curr_posx, curr_posy)\n s0 = get_arc_length(tx, ty, spt)\n path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,\n c_d_dd, ob)\n c_speed = path.s_d[1]\n c_d_d = path.d_d[1]\n c_d_dd = path.d_dd[1]\n if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:\n print('Goal')\n c_speed = 0.0\n break\n if show_animation:\n plt.cla()\n plt.plot(tx, ty, '-.k')\n plt.plot(road_left_x, road_left_y, '-k')\n plt.plot(road_right_x, road_right_y, '-k')\n plt.plot(ob[:, 0], ob[:, 1], 'ob')\n plt.plot(path.x[1:], path.y[1:], '-or')\n plt.plot(path.x[1], path.y[1], 'vc')\n plt.xlim(path.x[1] - area, path.x[1] + area)\n plt.ylim(path.y[1] - area, path.y[1] + area)\n plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),\n math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,\n head_length=1.0)\n plt.title('v[km/h]:' + str(c_speed)[0:4])\n plt.xlabel(u'x/m', fontsize=14)\n plt.ylabel(u'y/m', fontsize=14)\n plt.pause(0.0001)\n PathFail_flag = 0\n except:\n PathFail_flag = 1\n print(\"Don't find optimal path\")\n global Gob_x\n global Gob_y\n Gob_x *= 0\n Gob_y *= 0\n try:\n \"\"\"\n acc = proportional_control(6, CurrentVelocity)\n temp1=path.yaw[1] `\n temp2=curr_yaw \n \n if temp1<0:\n temp1=6.28+temp1\n if temp2<0:\n temp2=6.28+temp2\n\n val = temp1-temp2\n \n if val > 3.14:\n val = val - 6.28\n if val < -3.14:\n val = val + 6.28\n \n val = math.degrees(val)\n \n if val > 50:\n val = 50\n if val < -50:\n val = -50\n \n my_node.talker(acc,val)\n \"\"\"\n path_record = localPath()\n for i in range(len(path.x[1:])):\n path_record.path_x.append(path.x[i])\n path_record.path_y.append(path.y[i])\n if len(path_record.path_x) > 10000:\n path_record.path_x.pop(0)\n path_record.path_y.pop(0)\n my_node.talker(c_speed, path_record)\n except:\n print('local path send fail')\n pass\n print('Finish')\n end = time.time()\n if show_animation:\n plt.grid(True)\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FrenetPath:\n\n def __init__(self):\n self.t = []\n self.d = []\n self.d_d = []\n self.d_dd = []\n self.d_ddd = []\n self.s = []\n self.s_d = []\n self.s_dd = []\n self.s_ddd = []\n self.cd = 0.0\n self.cv = 0.0\n self.cf = 0.0\n self.x = []\n self.y = []\n self.yaw = []\n self.ds = []\n self.c = []\n\n\ndef calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):\n frenet_paths = []\n for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):\n for Ti in np.arange(MINT, MAXT, DT):\n fp = FrenetPath()\n lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)\n fp.t = [t for t in np.arange(0.0, Ti, DT)]\n fp.d = [lat_qp.calc_point(t) for t in fp.t]\n fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]\n fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]\n fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]\n for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, \n TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):\n tfp = copy.deepcopy(fp)\n lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)\n tfp.s = [lon_qp.calc_point(t) for t in fp.t]\n tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]\n tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]\n tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]\n Jp = sum(np.power(tfp.d_ddd, 2))\n Js = sum(np.power(tfp.s_ddd, 2))\n ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2\n tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2\n tfp.cv = KJ * Js + KT * Ti + KD * ds\n tfp.cf = KLAT * tfp.cd + KLON * tfp.cv\n frenet_paths.append(tfp)\n return frenet_paths\n\n\ndef calc_global_paths(fplist, csp):\n for fp in fplist:\n for i in range(len(fp.s)):\n ix, iy = csp.calc_position(fp.s[i])\n if ix is None:\n break\n iyaw = csp.calc_yaw(fp.s[i])\n di = fp.d[i]\n fx = ix + di * math.cos(iyaw + math.pi / 2.0)\n fy = iy + di * math.sin(iyaw + math.pi / 2.0)\n fp.x.append(fx)\n fp.y.append(fy)\n for i in range(len(fp.x) - 1):\n dx = fp.x[i + 1] - fp.x[i]\n dy = fp.y[i + 1] - fp.y[i]\n fp.yaw.append(math.atan2(dy, dx))\n fp.ds.append(math.sqrt(dx ** 2 + dy ** 2))\n fp.yaw.append(fp.yaw[-1])\n fp.ds.append(fp.ds[-1])\n for i in range(len(fp.yaw) - 1):\n fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])\n return fplist\n\n\ndef check_collision(fp, ob):\n for i in range(len(ob[:, 0])):\n d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in\n zip(fp.x, fp.y)]\n collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])\n if collision:\n return False\n return True\n\n\ndef check_paths(fplist, ob):\n \"\"\"\n check path above max speed, max a, does collision or not\n \"\"\"\n okind = []\n for i in range(len(fplist)):\n if any([(v > MAX_SPEED) for v in fplist[i].s_d]):\n continue\n elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):\n continue\n elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):\n continue\n elif not check_collision(fplist[i], ob):\n continue\n okind.append(i)\n return [fplist[i] for i in okind]\n\n\ndef frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):\n ob = np.array(ob)\n fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)\n fplist = calc_global_paths(fplist, csp)\n fplist = check_paths(fplist, ob)\n mincost = float('inf')\n bestpath = None\n for fp in fplist:\n if mincost >= fp.cf:\n mincost = fp.cf\n bestpath = fp\n return bestpath\n\n\ndef generate_road_widle(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_left_x.append(road_left_ix)\n road_left_y.append(road_left_iy)\n road_right_x.append(road_right_ix)\n road_right_y.append(road_right_iy)\n return road_left_x, road_left_y, road_right_x, road_right_y\n\n\ndef generate_target_course(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(csp.calc_yaw(i_s))\n rk.append(csp.calc_curvature(i_s))\n return rx, ry, ryaw, rk, csp\n\n\ndef load_global_path():\n global zero_cord_x, zero_cord_y\n bet = 0.1\n blank = []\n white = []\n yellow = []\n GPS_x = []\n GPS_y = []\n nums, ber = np.loadtxt(\n '/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'\n , dtype=str, delimiter=',', unpack=True)\n for i in range(len(nums)):\n if not nums[i] in blank:\n yellow.append(float(nums[i]))\n white.append(float(ber[i]))\n bx = yellow[0]\n by = white[0]\n for i in range(len(yellow)):\n dx = yellow[i] - bx\n dy = white[i] - by\n dis = math.sqrt(dx ** 2 + dy ** 2)\n if dis > bet:\n GPS_x.append(yellow[i])\n GPS_y.append(white[i])\n bx = yellow[i]\n by = white[i]\n GPS_x = np.array(GPS_x)\n GPS_y = np.array(GPS_y)\n zero_cord_x = GPS_x[0]\n zero_cord_y = GPS_y[0]\n GPS_x = GPS_x - zero_cord_x\n GPS_y = GPS_y - zero_cord_y\n plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')\n plt.plot()\n plt.show()\n return GPS_x, GPS_y\n\n\nclass Info(object):\n\n def __init__(self):\n self.CurrGPS_lat = float(-1)\n self.CurrGPS_lon = float(-1)\n self.CurrentVelocity = float(-1)\n self.Target_Velocity = float(-1)\n self.ImuYaw = float(-1)\n self.Target_Theta = float(-1)\n self.gob = np.array([])\n self.ob = np.array([])\n self.gobx = np.array([])\n self.goby = np.array([])\n rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)\n sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.\n FeedbackCallbackGPSIMU, queue_size=10)\n rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.\n RVcallback, queue_size=10)\n\n def FeedbackCallbackGPSIMU(self, msg):\n self.CurrGPS_lat = msg.latitude\n self.CurrGPS_lon = msg.longitude\n self.ImuYaw = (90 - msg.course_angle) * np.pi / 180\n\n def FeedbackCallbackObs(self, msg):\n global Gob_x\n global Gob_y\n self.gobx = msg.x\n self.goby = msg.y\n Gob_x.append(self.gobx)\n Gob_y.append(self.goby)\n self.gob = np.column_stack((Gob_x, Gob_y))\n\n def RVcallback(self, msg):\n self.CurrentVelocity = msg.Base_Vehspd\n\n def init(self):\n return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,\n self.goby, self.gob, self.CurrentVelocity)\n\n def talker(self, Target_Velocity, path_record):\n self.rate = rospy.Rate(100)\n self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,\n queue_size=10)\n self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)\n self.pub_Velocity.publish(Target_Velocity)\n self.path_pub.publish(path_record)\n\n\ndef get_transalation(curr_gps_lat, curr_gps_lon):\n curr_posy = float(curr_gps_lon) - zero_cord_y\n curr_posx = float(curr_gps_lat) - zero_cord_x\n return curr_posx, curr_posy\n\n\ndef get_transformation(pt, curr_yaw, T):\n c, s = np.cos(curr_yaw), np.sin(curr_yaw)\n R = np.array(((c, -s), (s, c)))\n pt = pt.dot(R) + T\n return pt\n\n\ndef get_arc_length(tx, ty, st):\n arc_length = 0\n for x in range(1, st):\n arc_length = arc_length + np.hypot(tx[x - 1] - tx[x], ty[x - 1] - ty[x]\n )\n return arc_length\n\n\ndef get_lateral_dist(tx, ty, curr_posx, curr_posy):\n dist = []\n for x in range(0, len(tx) - 1):\n dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -\n ty[x]))\n lat_dist = min(dist)\n st = dist.index(min(dist))\n theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])\n theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])\n if lat_dist < THRESH_DIST:\n lat_dist = 0\n curr_posx = tx[st]\n curr_posy = ty[st]\n if theta2 < theta1:\n lat_dist = -lat_dist\n return st, lat_dist, curr_posx, curr_posy\n\n\ndef proportional_control(target, current):\n a = 1.0 * (target - current)\n return a\n\n\ndef main():\n ptx = []\n pty = []\n ptx, pty = load_global_path()\n tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)\n road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(\n ptx, pty)\n c_speed = 5.0 / 3.6\n c_acc = 1.0\n c_d_dd = 0\n c_d_d = 0\n area = 25.0\n start = time.time()\n rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)\n my_node = Info()\n while not rospy.is_shutdown():\n (CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity\n ) = my_node.init()\n ob = []\n if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n T = [curr_posx, curr_posy]\n curr_yaw = ImuYaw\n if len(gob) == 0:\n ob = [[-20, -20]]\n else:\n ob = gob\n ob_len = len(ob) - 1\n for x in xrange(0, ob_len):\n ob = np.array(ob)\n ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)\n try:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat,\n CurrGPS_lon)\n spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,\n curr_posx, curr_posy)\n s0 = get_arc_length(tx, ty, spt)\n path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,\n c_d_dd, ob)\n c_speed = path.s_d[1]\n c_d_d = path.d_d[1]\n c_d_dd = path.d_dd[1]\n if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:\n print('Goal')\n c_speed = 0.0\n break\n if show_animation:\n plt.cla()\n plt.plot(tx, ty, '-.k')\n plt.plot(road_left_x, road_left_y, '-k')\n plt.plot(road_right_x, road_right_y, '-k')\n plt.plot(ob[:, 0], ob[:, 1], 'ob')\n plt.plot(path.x[1:], path.y[1:], '-or')\n plt.plot(path.x[1], path.y[1], 'vc')\n plt.xlim(path.x[1] - area, path.x[1] + area)\n plt.ylim(path.y[1] - area, path.y[1] + area)\n plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),\n math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,\n head_length=1.0)\n plt.title('v[km/h]:' + str(c_speed)[0:4])\n plt.xlabel(u'x/m', fontsize=14)\n plt.ylabel(u'y/m', fontsize=14)\n plt.pause(0.0001)\n PathFail_flag = 0\n except:\n PathFail_flag = 1\n print(\"Don't find optimal path\")\n global Gob_x\n global Gob_y\n Gob_x *= 0\n Gob_y *= 0\n try:\n \"\"\"\n acc = proportional_control(6, CurrentVelocity)\n temp1=path.yaw[1] `\n temp2=curr_yaw \n \n if temp1<0:\n temp1=6.28+temp1\n if temp2<0:\n temp2=6.28+temp2\n\n val = temp1-temp2\n \n if val > 3.14:\n val = val - 6.28\n if val < -3.14:\n val = val + 6.28\n \n val = math.degrees(val)\n \n if val > 50:\n val = 50\n if val < -50:\n val = -50\n \n my_node.talker(acc,val)\n \"\"\"\n path_record = localPath()\n for i in range(len(path.x[1:])):\n path_record.path_x.append(path.x[i])\n path_record.path_y.append(path.y[i])\n if len(path_record.path_x) > 10000:\n path_record.path_x.pop(0)\n path_record.path_y.pop(0)\n my_node.talker(c_speed, path_record)\n except:\n print('local path send fail')\n pass\n print('Finish')\n end = time.time()\n if show_animation:\n plt.grid(True)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nMAX_SPEED = 30.0\nMAX_ACCEL = 50.0\nMAX_CURVATURE = 30.0\nMAX_ROAD_WIDTH = 10.0\nD_ROAD_W = 2.0\nDT = 0.3\nMAXT = 6.0\nMINT = 4.0\nTARGET_SPEED = 15.0 / 3.6\nD_T_S = 10.0 / 3.6\nN_S_SAMPLE = 0.1\nROBOT_RADIUS = 2.3\nTHRESH_DIST = 0.01\nKJ = 0.8\nKT = 0.1\nKD = 20.0\nKLAT = 0.8\nKLON = 0.2\nshow_animation = True\nGob_x = []\nGob_y = []\nPathFail_flag = 0\n\n\nclass FrenetPath:\n\n def __init__(self):\n self.t = []\n self.d = []\n self.d_d = []\n self.d_dd = []\n self.d_ddd = []\n self.s = []\n self.s_d = []\n self.s_dd = []\n self.s_ddd = []\n self.cd = 0.0\n self.cv = 0.0\n self.cf = 0.0\n self.x = []\n self.y = []\n self.yaw = []\n self.ds = []\n self.c = []\n\n\ndef calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):\n frenet_paths = []\n for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):\n for Ti in np.arange(MINT, MAXT, DT):\n fp = FrenetPath()\n lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)\n fp.t = [t for t in np.arange(0.0, Ti, DT)]\n fp.d = [lat_qp.calc_point(t) for t in fp.t]\n fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]\n fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]\n fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]\n for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, \n TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):\n tfp = copy.deepcopy(fp)\n lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)\n tfp.s = [lon_qp.calc_point(t) for t in fp.t]\n tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]\n tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]\n tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]\n Jp = sum(np.power(tfp.d_ddd, 2))\n Js = sum(np.power(tfp.s_ddd, 2))\n ds = (TARGET_SPEED - tfp.s_d[-1]) ** 2\n tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1] ** 2\n tfp.cv = KJ * Js + KT * Ti + KD * ds\n tfp.cf = KLAT * tfp.cd + KLON * tfp.cv\n frenet_paths.append(tfp)\n return frenet_paths\n\n\ndef calc_global_paths(fplist, csp):\n for fp in fplist:\n for i in range(len(fp.s)):\n ix, iy = csp.calc_position(fp.s[i])\n if ix is None:\n break\n iyaw = csp.calc_yaw(fp.s[i])\n di = fp.d[i]\n fx = ix + di * math.cos(iyaw + math.pi / 2.0)\n fy = iy + di * math.sin(iyaw + math.pi / 2.0)\n fp.x.append(fx)\n fp.y.append(fy)\n for i in range(len(fp.x) - 1):\n dx = fp.x[i + 1] - fp.x[i]\n dy = fp.y[i + 1] - fp.y[i]\n fp.yaw.append(math.atan2(dy, dx))\n fp.ds.append(math.sqrt(dx ** 2 + dy ** 2))\n fp.yaw.append(fp.yaw[-1])\n fp.ds.append(fp.ds[-1])\n for i in range(len(fp.yaw) - 1):\n fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])\n return fplist\n\n\ndef check_collision(fp, ob):\n for i in range(len(ob[:, 0])):\n d = [((ix - ob[i, 0]) ** 2 + (iy - ob[i, 1]) ** 2) for ix, iy in\n zip(fp.x, fp.y)]\n collision = any([(di <= ROBOT_RADIUS ** 2) for di in d])\n if collision:\n return False\n return True\n\n\ndef check_paths(fplist, ob):\n \"\"\"\n check path above max speed, max a, does collision or not\n \"\"\"\n okind = []\n for i in range(len(fplist)):\n if any([(v > MAX_SPEED) for v in fplist[i].s_d]):\n continue\n elif any([(abs(a) > MAX_ACCEL) for a in fplist[i].s_dd]):\n continue\n elif any([(abs(c) > MAX_CURVATURE) for c in fplist[i].c]):\n continue\n elif not check_collision(fplist[i], ob):\n continue\n okind.append(i)\n return [fplist[i] for i in okind]\n\n\ndef frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):\n ob = np.array(ob)\n fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)\n fplist = calc_global_paths(fplist, csp)\n fplist = check_paths(fplist, ob)\n mincost = float('inf')\n bestpath = None\n for fp in fplist:\n if mincost >= fp.cf:\n mincost = fp.cf\n bestpath = fp\n return bestpath\n\n\ndef generate_road_widle(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n road_left_ix = ix + MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_left_iy = iy + MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s) +\n math.pi / 2.0)\n road_right_ix = ix - MAX_ROAD_WIDTH / 2 * math.cos(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_right_iy = iy - MAX_ROAD_WIDTH / 2 * math.sin(csp.calc_yaw(i_s\n ) + math.pi / 2.0)\n road_left_x.append(road_left_ix)\n road_left_y.append(road_left_iy)\n road_right_x.append(road_right_ix)\n road_right_y.append(road_right_iy)\n return road_left_x, road_left_y, road_right_x, road_right_y\n\n\ndef generate_target_course(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(csp.calc_yaw(i_s))\n rk.append(csp.calc_curvature(i_s))\n return rx, ry, ryaw, rk, csp\n\n\ndef load_global_path():\n global zero_cord_x, zero_cord_y\n bet = 0.1\n blank = []\n white = []\n yellow = []\n GPS_x = []\n GPS_y = []\n nums, ber = np.loadtxt(\n '/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt'\n , dtype=str, delimiter=',', unpack=True)\n for i in range(len(nums)):\n if not nums[i] in blank:\n yellow.append(float(nums[i]))\n white.append(float(ber[i]))\n bx = yellow[0]\n by = white[0]\n for i in range(len(yellow)):\n dx = yellow[i] - bx\n dy = white[i] - by\n dis = math.sqrt(dx ** 2 + dy ** 2)\n if dis > bet:\n GPS_x.append(yellow[i])\n GPS_y.append(white[i])\n bx = yellow[i]\n by = white[i]\n GPS_x = np.array(GPS_x)\n GPS_y = np.array(GPS_y)\n zero_cord_x = GPS_x[0]\n zero_cord_y = GPS_y[0]\n GPS_x = GPS_x - zero_cord_x\n GPS_y = GPS_y - zero_cord_y\n plt.plot(GPS_x, GPS_y, '-r', label='GPS point ')\n plt.plot()\n plt.show()\n return GPS_x, GPS_y\n\n\nclass Info(object):\n\n def __init__(self):\n self.CurrGPS_lat = float(-1)\n self.CurrGPS_lon = float(-1)\n self.CurrentVelocity = float(-1)\n self.Target_Velocity = float(-1)\n self.ImuYaw = float(-1)\n self.Target_Theta = float(-1)\n self.gob = np.array([])\n self.ob = np.array([])\n self.gobx = np.array([])\n self.goby = np.array([])\n rospy.Subscriber('coordinate', Point, self.FeedbackCallbackObs)\n sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.\n FeedbackCallbackGPSIMU, queue_size=10)\n rospy.Subscriber('Motor_Feedback_mssage', Motor_Feedback, self.\n RVcallback, queue_size=10)\n\n def FeedbackCallbackGPSIMU(self, msg):\n self.CurrGPS_lat = msg.latitude\n self.CurrGPS_lon = msg.longitude\n self.ImuYaw = (90 - msg.course_angle) * np.pi / 180\n\n def FeedbackCallbackObs(self, msg):\n global Gob_x\n global Gob_y\n self.gobx = msg.x\n self.goby = msg.y\n Gob_x.append(self.gobx)\n Gob_y.append(self.goby)\n self.gob = np.column_stack((Gob_x, Gob_y))\n\n def RVcallback(self, msg):\n self.CurrentVelocity = msg.Base_Vehspd\n\n def init(self):\n return (self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx,\n self.goby, self.gob, self.CurrentVelocity)\n\n def talker(self, Target_Velocity, path_record):\n self.rate = rospy.Rate(100)\n self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32,\n queue_size=10)\n self.path_pub = rospy.Publisher('trajectory', localPath, queue_size=50)\n self.pub_Velocity.publish(Target_Velocity)\n self.path_pub.publish(path_record)\n\n\ndef get_transalation(curr_gps_lat, curr_gps_lon):\n curr_posy = float(curr_gps_lon) - zero_cord_y\n curr_posx = float(curr_gps_lat) - zero_cord_x\n return curr_posx, curr_posy\n\n\ndef get_transformation(pt, curr_yaw, T):\n c, s = np.cos(curr_yaw), np.sin(curr_yaw)\n R = np.array(((c, -s), (s, c)))\n pt = pt.dot(R) + T\n return pt\n\n\ndef get_arc_length(tx, ty, st):\n arc_length = 0\n for x in range(1, st):\n arc_length = arc_length + np.hypot(tx[x - 1] - tx[x], ty[x - 1] - ty[x]\n )\n return arc_length\n\n\ndef get_lateral_dist(tx, ty, curr_posx, curr_posy):\n dist = []\n for x in range(0, len(tx) - 1):\n dist.append(np.hypot(float(curr_posx) - tx[x], float(curr_posy) -\n ty[x]))\n lat_dist = min(dist)\n st = dist.index(min(dist))\n theta1 = math.atan2(ty[st] - ty[st - 1], tx[st] - tx[st - 1])\n theta2 = math.atan2(curr_posy - ty[st - 1], curr_posx - tx[st - 1])\n if lat_dist < THRESH_DIST:\n lat_dist = 0\n curr_posx = tx[st]\n curr_posy = ty[st]\n if theta2 < theta1:\n lat_dist = -lat_dist\n return st, lat_dist, curr_posx, curr_posy\n\n\ndef proportional_control(target, current):\n a = 1.0 * (target - current)\n return a\n\n\ndef main():\n ptx = []\n pty = []\n ptx, pty = load_global_path()\n tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)\n road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(\n ptx, pty)\n c_speed = 5.0 / 3.6\n c_acc = 1.0\n c_d_dd = 0\n c_d_d = 0\n area = 25.0\n start = time.time()\n rospy.init_node('AvoidObstacles_PlannerOut', anonymous=False)\n my_node = Info()\n while not rospy.is_shutdown():\n (CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity\n ) = my_node.init()\n ob = []\n if CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n T = [curr_posx, curr_posy]\n curr_yaw = ImuYaw\n if len(gob) == 0:\n ob = [[-20, -20]]\n else:\n ob = gob\n ob_len = len(ob) - 1\n for x in xrange(0, ob_len):\n ob = np.array(ob)\n ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)\n try:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat,\n CurrGPS_lon)\n spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty,\n curr_posx, curr_posy)\n s0 = get_arc_length(tx, ty, spt)\n path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d,\n c_d_dd, ob)\n c_speed = path.s_d[1]\n c_d_d = path.d_d[1]\n c_d_dd = path.d_dd[1]\n if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:\n print('Goal')\n c_speed = 0.0\n break\n if show_animation:\n plt.cla()\n plt.plot(tx, ty, '-.k')\n plt.plot(road_left_x, road_left_y, '-k')\n plt.plot(road_right_x, road_right_y, '-k')\n plt.plot(ob[:, 0], ob[:, 1], 'ob')\n plt.plot(path.x[1:], path.y[1:], '-or')\n plt.plot(path.x[1], path.y[1], 'vc')\n plt.xlim(path.x[1] - area, path.x[1] + area)\n plt.ylim(path.y[1] - area, path.y[1] + area)\n plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw),\n math.sin(curr_yaw), fc='r', ec='k', head_width=0.5,\n head_length=1.0)\n plt.title('v[km/h]:' + str(c_speed)[0:4])\n plt.xlabel(u'x/m', fontsize=14)\n plt.ylabel(u'y/m', fontsize=14)\n plt.pause(0.0001)\n PathFail_flag = 0\n except:\n PathFail_flag = 1\n print(\"Don't find optimal path\")\n global Gob_x\n global Gob_y\n Gob_x *= 0\n Gob_y *= 0\n try:\n \"\"\"\n acc = proportional_control(6, CurrentVelocity)\n temp1=path.yaw[1] `\n temp2=curr_yaw \n \n if temp1<0:\n temp1=6.28+temp1\n if temp2<0:\n temp2=6.28+temp2\n\n val = temp1-temp2\n \n if val > 3.14:\n val = val - 6.28\n if val < -3.14:\n val = val + 6.28\n \n val = math.degrees(val)\n \n if val > 50:\n val = 50\n if val < -50:\n val = -50\n \n my_node.talker(acc,val)\n \"\"\"\n path_record = localPath()\n for i in range(len(path.x[1:])):\n path_record.path_x.append(path.x[i])\n path_record.path_y.append(path.y[i])\n if len(path_record.path_x) > 10000:\n path_record.path_x.pop(0)\n path_record.path_y.pop(0)\n my_node.talker(c_speed, path_record)\n except:\n print('local path send fail')\n pass\n print('Finish')\n end = time.time()\n if show_animation:\n plt.grid(True)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python2\n# -*- coding: UTF-8 -*-\n# coding: utf-8\n#!/usr/bin/env python\n\n\n'''\n发布轨迹信息 \npath.x; path.y; c_speed;\n\n'''\n\n\n\n\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport copy\nimport math\nfrom cubic_spline import Spline2D\nfrom polynomials import QuarticPolynomial, QuinticPolynomial\nimport time\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Int32\nfrom geometry_msgs.msg import Point\nfrom nav_msgs.msg import Path\nfrom local_planner.msg import localPath\nfrom geometry_msgs.msg import PoseStamped, Quaternion\nimport tf\nfrom CAN_driver.msg import Motor_Feedback\nfrom GNSS_driver.msg import GNSS_CAN\nimport sys\n\n\n\n# 参数\nMAX_SPEED = 30.0 # 最大速度 [m/s]\nMAX_ACCEL = 50.0 # 最大加速度 [m/ss]\nMAX_CURVATURE = 30.0 # 最大曲率 [1/m]\nMAX_ROAD_WIDTH = 10.0 # 最大道路宽度 [m]\nD_ROAD_W = 2.0 # 路宽采样间隔 [m]\nDT = 0.3 # Delta T[s]\nMAXT = 6.0 # 最大预测时间 [m]\nMINT = 4.0 # 最小预测时间 [m]\nTARGET_SPEED = 15.0/3.6 # 目标速度 [m/s] 即纵向速度保持\nD_T_S = 10.0/3.6 # 目标opo][]o][o][\\o][o][o速度采样间隔 [m/s]\nN_S_SAMPLE = 0.1 # 目标速度采样数量\nROBOT_RADIUS = 2.3 # 车辆半径 [m]\nTHRESH_DIST=0.01\n\n# 损失函数权重\nKJ = 0.8\nKT = 0.1\nKD = 20.0\nKLAT = 0.8\nKLON = 0.2\nshow_animation = True\n\n\nGob_x = []\nGob_y = []\n\n\n#规划失败标志 1 决策层需要\nPathFail_flag = 0 \n\n\nclass FrenetPath:\n\n def __init__(self):\n self.t = []\n self.d = []\n self.d_d = []\n self.d_dd = []\n self.d_ddd = []\n self.s = []\n self.s_d = []\n self.s_dd = []\n self.s_ddd = []\n self.cd = 0.0\n self.cv = 0.0\n self.cf = 0.0\n\n self.x = []\n self.y = []\n self.yaw = []\n self.ds = []\n self.c = []\n\n\ndef calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0):\n\n frenet_paths = []\n\n # generate path to each offset goal\n for di in np.arange(-MAX_ROAD_WIDTH, MAX_ROAD_WIDTH, D_ROAD_W):\n # 采样,并对每一个目标配置生成轨迹\n # Lateral motion planning\n for Ti in np.arange(MINT, MAXT, DT):\n fp = FrenetPath()\n # 计算出关于目标配置di,Ti的横向多项式\n lat_qp = QuinticPolynomial(c_d, c_d_d, c_d_dd, di, 0.0, 0.0, Ti)\n\n fp.t = [t for t in np.arange(0.0, Ti, DT)]\n fp.d = [lat_qp.calc_point(t) for t in fp.t]\n fp.d_d = [lat_qp.calc_first_derivative(t) for t in fp.t]\n fp.d_dd = [lat_qp.calc_second_derivative(t) for t in fp.t]\n fp.d_ddd = [lat_qp.calc_third_derivative(t) for t in fp.t]\n\n # 纵向速度规划 (速度保持)\n # Loongitudinal motion planning (Velocity keeping)\n for tv in np.arange(TARGET_SPEED - D_T_S * N_S_SAMPLE, TARGET_SPEED + D_T_S * N_S_SAMPLE, D_T_S):\n tfp = copy.deepcopy(fp)\n lon_qp = QuarticPolynomial(s0, c_speed, 0.0, tv, 0.0, Ti)\n\n tfp.s = [lon_qp.calc_point(t) for t in fp.t]\n tfp.s_d = [lon_qp.calc_first_derivative(t) for t in fp.t]\n tfp.s_dd = [lon_qp.calc_second_derivative(t) for t in fp.t]\n tfp.s_ddd = [lon_qp.calc_third_derivative(t) for t in fp.t]\n\n\n ###########################################################\n #高速时的损失函数\n ###########################################################\n Jp = sum(np.power(tfp.d_ddd, 2)) # square of jerk\n Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk\n # square of diff from target speed\n ds = (TARGET_SPEED - tfp.s_d[-1])**2\n # 横向的损失函数\n tfp.cd = KJ * Jp + KT * Ti + KD * tfp.d[-1]**2\n # 纵向的损失函数\n tfp.cv = KJ * Js + KT * Ti + KD * ds\n # 总的损失函数为d 和 s方向的损失函数乘对应的系数相加\n\n #########################################################\n #低速时的损失函数\n #########################################################\n # # 低速时的损失函数\n # ltfp = copy.deepcopy(tfp)\n # ltfp.d_sss = [lat_qp.calc_third_derivative(s) for s in tfp.s]\n # Jp_s = sum(np.power(ltfp.d_sss, 2)) # square of jerk\n # Js = sum(np.power(tfp.s_ddd, 2)) # square of jerk\n # # S = s1 - s0\n # dS = tfp.s[-1] - s0\n # #横向的损失函数\n # tfp.cd = KJ * Jp_s + KT * dS + KD * tfp.d[-1] ** 2\n # #纵向的损失函数\n # tfp.cv = KJ * Js + KT * Ti + KD * ds\n \n tfp.cf = KLAT * tfp.cd + KLON * tfp.cv\n frenet_paths.append(tfp)\n return frenet_paths\n\n\ndef calc_global_paths(fplist, csp):\n for fp in fplist:\n # calc global positions\n for i in range(len(fp.s)):\n ix, iy = csp.calc_position(fp.s[i])\n if ix is None:\n break\n iyaw = csp.calc_yaw(fp.s[i])\n di = fp.d[i]\n fx = ix + di * math.cos(iyaw + math.pi / 2.0)\n fy = iy + di * math.sin(iyaw + math.pi / 2.0)\n fp.x.append(fx)\n fp.y.append(fy)\n\n # calc yaw and ds\n for i in range(len(fp.x) - 1):\n dx = fp.x[i + 1] - fp.x[i]\n dy = fp.y[i + 1] - fp.y[i]\n fp.yaw.append(math.atan2(dy, dx))\n fp.ds.append(math.sqrt(dx**2 + dy**2))\n\n fp.yaw.append(fp.yaw[-1])\n fp.ds.append(fp.ds[-1])\n\n # calc curvature\n for i in range(len(fp.yaw) - 1):\n fp.c.append((fp.yaw[i + 1] - fp.yaw[i]) / fp.ds[i])\n\n return fplist\n\n\ndef check_collision(fp, ob):\n \n for i in range(len(ob[:, 0])):\n d = [((ix - ob[i, 0])**2 + (iy - ob[i, 1])**2)\n for (ix, iy) in zip(fp.x, fp.y)]\n collision = any([di <= ROBOT_RADIUS**2 for di in d])\n if collision:\n return False\n return True\n\n\ndef check_paths(fplist, ob):\n\n \"\"\"\n check path above max speed, max a, does collision or not\n \"\"\"\n okind = []\n for i in range(len(fplist)):\n if any([v > MAX_SPEED for v in fplist[i].s_d]): # Max speed check\n continue\n elif any([abs(a) > MAX_ACCEL for a in fplist[i].s_dd]): # Max accel check\n continue\n elif any([abs(c) > MAX_CURVATURE for c in fplist[i].c]): # Max curvature check\n continue\n elif not check_collision(fplist[i], ob):\n continue\n okind.append(i)\n return [fplist[i] for i in okind]\n\n\ndef frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob):\n ob = np.array(ob)\n fplist = calc_frenet_paths(c_speed, c_d, c_d_d, c_d_dd, s0)\n fplist = calc_global_paths(fplist, csp)\n fplist = check_paths(fplist, ob)\n\n # find minimum cost path\n mincost = float(\"inf\")\n bestpath = None\n for fp in fplist:\n if mincost >= fp.cf:\n mincost = fp.cf\n bestpath = fp\n return bestpath\n\n\ndef generate_road_widle(x,y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1)\n road_left_x, road_left_y, road_right_x, road_right_y = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n road_left_ix = ix + MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)\n road_left_iy = iy + MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)\n road_right_ix = ix - MAX_ROAD_WIDTH/2 * math.cos(csp.calc_yaw(i_s)+math.pi / 2.0)\n road_right_iy = iy - MAX_ROAD_WIDTH/2 * math.sin(csp.calc_yaw(i_s)+math.pi / 2.0)\n road_left_x.append(road_left_ix)\n road_left_y.append(road_left_iy)\n road_right_x.append(road_right_ix)\n road_right_y.append(road_right_iy)\n return road_left_x, road_left_y, road_right_x, road_right_y\n\ndef generate_target_course(x, y):\n csp = Spline2D(x, y)\n s = np.arange(0, csp.s[-1], 0.1) #0.1\n rx, ry, ryaw, rk = [], [], [], []\n for i_s in s:\n ix, iy = csp.calc_position(i_s)\n rx.append(ix)\n ry.append(iy)\n ryaw.append(csp.calc_yaw(i_s))\n rk.append(csp.calc_curvature(i_s))\n return rx, ry, ryaw, rk, csp\n\n\n#######################################################################################\ndef load_global_path():\n global zero_cord_x,zero_cord_y\n bet = 0.1 \n blank = [] #buffer\n white = [] #buffer\n yellow = [] #buffer\n GPS_x = [] #所采集预描点的x\n GPS_y = [] #所采集预描点的x\n #读取预描点\n nums, ber = np.loadtxt(\"/home/robot/Robot/Smart_robot_ws/src/GNSS_driver/save_point_data/rightdoubleliner.txt\", dtype=str, delimiter=',', unpack=True)\n for i in range(len(nums)):\n if not nums[i] in blank: #去除重复点\n #blank.append(nums[i])\n yellow.append(float(nums[i]))\n white.append(float(ber[i]))\n bx = yellow[0] #起始点坐标\n by = white[0]\n for i in range(len(yellow)):\n dx = yellow[i] - bx\n dy = white[i] - by\n dis = math.sqrt(dx ** 2 + dy ** 2) \n if dis > bet: #选取大于设定的距离的点\n GPS_x.append(yellow[i]) #使cx,cy中点均满足要求\n GPS_y.append(white[i])\n bx = yellow[i]\n by = white[i] \n GPS_x = np.array(GPS_x) #将列表转换成数组\n GPS_y = np.array(GPS_y)\n #print(\"cx:\",cx)\n #print(\"cy:\",cy)\n \n zero_cord_x = GPS_x[0]\n zero_cord_y = GPS_y[0]\n GPS_x = GPS_x - zero_cord_x\n GPS_y = GPS_y - zero_cord_y\n plt.plot(GPS_x,GPS_y, \"-r\", label=\"GPS point \")\n plt.plot()\n plt.show() \n\n return GPS_x, GPS_y\n\nclass Info(object):\n def __init__(self):\n self.CurrGPS_lat = float(-1)\n self.CurrGPS_lon = float(-1)\n self.CurrentVelocity = float(-1)\n self.Target_Velocity = float(-1)\n self.ImuYaw = float(-1)\n self.Target_Theta = float(-1)\n #self.CommandMessage = Car_Input()\n self.gob = np.array([])\n self.ob = np.array([])\n self.gobx = np.array([])\n self.goby = np.array([])\n\n # Subscribers\n\n rospy.Subscriber(\"coordinate\", Point, self.FeedbackCallbackObs)\n sub = rospy.Subscriber('gnss_message', GNSS_CAN, self.FeedbackCallbackGPSIMU,queue_size = 10) #订阅GPS数据\n rospy.Subscriber(\"Motor_Feedback_mssage\", Motor_Feedback,self.RVcallback,queue_size = 10)\n \n\n \n \n def FeedbackCallbackGPSIMU(self, msg): \n self.CurrGPS_lat = msg.latitude \n self.CurrGPS_lon = msg.longitude \n self.ImuYaw = (90-msg.course_angle)*np.pi/180\n #print(self.CurrGPS_lat,self.CurrGPS_lon,self.ImuYaw)\n\n def FeedbackCallbackObs(self, msg):\n global Gob_x\n global Gob_y\n self.gobx = msg.x\n self.goby = msg.y\n #print(\"msg.x\",\"msg.y\", msg.x, msg.y)\n Gob_x.append(self.gobx)\n Gob_y.append(self.goby) \n #print(\"Gob_x\",\"Gob_y\", Gob_x, Gob_y)\n #np.append(self.gobx,5)\n #np.append(self.goby,5)\n \n self.gob = np.column_stack((Gob_x, Gob_y))\n #print(self.gobx,self.goby)\n #print(self.gob)\n\n def RVcallback(self,msg):\n \n self.CurrentVelocity = msg.Base_Vehspd\n #print(\"*\"*50)\n #print(\"rv:\",rv)\n #rospy.loginfo('I heard: %s', data.data)\n\n\n def init(self):\n return self.CurrGPS_lat, self.CurrGPS_lon, self.ImuYaw, self.gobx, self.goby, self.gob, self.CurrentVelocity\n\n\n def talker(self,Target_Velocity, path_record):\n self.rate = rospy.Rate(100) # 10hz\n self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象\n # 定义发布器 path_pub 发布 trajectory\n self.path_pub = rospy.Publisher('trajectory', localPath, queue_size = 50) #定义Publisher对象\n self.pub_Velocity.publish(Target_Velocity)\n # 发布路径\n self.path_pub.publish(path_record)\n #self.rate.sleep()\n\n\n\n# def talker(self,Target_Velocity,Target_Theta):\n# self.pub_Velocity = rospy.Publisher('Car_Velocity', Float32, queue_size = 10) #定义Publisher对象\n# self.pub_Steering = rospy.Publisher('Car_Steering', Float32, queue_size = 10)\n# self.rate = rospy.Rate(100) # 10hz\n# self.pub_Velocity.publish(Target_Velocity)\n# self.pub_Steering.publish(Target_Theta)\n# self.rate.sleep()\n\n\n\n\n\n\n#######################################################################################\ndef get_transalation(curr_gps_lat,curr_gps_lon):\n curr_posy=(float(curr_gps_lon)-zero_cord_y)\n curr_posx=(float(curr_gps_lat)-zero_cord_x)\n #print(\"curr_posy,curr_posx=\",curr_posy,curr_posx)\n return curr_posx, curr_posy\n\n\n\ndef get_transformation(pt,curr_yaw,T):\n c, s = np.cos(curr_yaw), np.sin(curr_yaw)\n R = (np.array(((c,-s), (s, c))))\n pt=pt.dot(R)+T\n return pt\n\n\n\ndef get_arc_length(tx,ty,st):\n arc_length=0\n for x in range(1,st):\n arc_length=arc_length+(np.hypot((tx[x-1]-tx[x]),(ty[x-1]-ty[x])))\n return arc_length\n\n\n\ndef get_lateral_dist(tx,ty,curr_posx,curr_posy):\n dist=[]\n for x in range(0,len(tx)-1):\n dist.append(np.hypot((float(curr_posx)-tx[x]),(float(curr_posy)-ty[x])))\n lat_dist=min(dist)\n st=dist.index(min(dist))\n theta1=math.atan2((ty[st]-ty[st-1]),(tx[st]-tx[st-1]))\n theta2=math.atan2((curr_posy-ty[st-1]),(curr_posx-tx[st-1]))\n if lat_dist<THRESH_DIST:\n lat_dist=0\n curr_posx=tx[st]\n curr_posy=ty[st]\n if theta2<theta1:\n lat_dist=-lat_dist\n # print(lat_dist)\n return st, lat_dist, curr_posx, curr_posy\n\n\n\ndef proportional_control(target, current):\n #print(\"*\"*50)\n #print(\"current=\",current)\n #print(\"target - current\",target - current)\n a = 1.0 * (target - current)\n\n return a\n\n\n\n\n\n\ndef main():\n\n ptx = []\n pty = []\n\n ptx, pty = load_global_path()\n tx, ty, tyaw, tc, csp = generate_target_course(ptx, pty)\n #print(csp)\n road_left_x, road_left_y, road_right_x, road_right_y = generate_road_widle(ptx, pty)\n \n #当前车速及加速度\n c_speed = 5.0/3.6\n c_acc = 1.0\n c_d_dd = 0\n c_d_d = 0\n area = 25.0 # animation area length [m]\n start = time.time()\n rospy.init_node('AvoidObstacles_PlannerOut', anonymous = False)\n my_node = Info()\n \n \n while not rospy.is_shutdown():\n CurrGPS_lat, CurrGPS_lon, ImuYaw, gobx, goby, gob, CurrentVelocity = my_node.init()\n #print(\"gob\",gob)\n ob = []\n \n if (CurrGPS_lat != -1 and CurrGPS_lon != -1 and ImuYaw != -1):\n \n \n\n \n \n \n #print(CurrGPS_lat,CurrGPS_lon,ImuYaw, curr_posx, curr_posy)\n #print(gobx,goby,gob)\n #path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)\n #s0 = path.s[1]\n #c_d = path.d[1]\n #c_d_d = path.d_d[1]\n #c_d_dd = path.d_dd[1]\n #c_speed = path.s_d[1]\n \n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n T = [curr_posx, curr_posy]\n \n \n \n \n curr_yaw = ImuYaw #+ math.pi / 2\n \n \n if (len(gob) == 0):\n ob = [[-20, -20]]\n \n else:\n ob = gob\n \n \n ob_len = len(ob)-1\n for x in xrange(0, ob_len):\n #print(\"ob_transformation\",ob)\n ob = np.array(ob)\n #ob[x, :] = .2 * ob[x, :]\n ob[x, :] = get_transformation(ob[x, :], -curr_yaw, T)\n #print(\"ob_transformation\",ob)\n #############################################################\n \n \n \n \n \n # c_d_dd = c_acc*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))+curr_yaw)\n \n \n #spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)\n \n #curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n \n \n \n try:\n curr_posx, curr_posy = get_transalation(CurrGPS_lat, CurrGPS_lon)\n spt, c_d, curr_posx, curr_posy = get_lateral_dist(tx, ty, curr_posx, curr_posy)\n s0 = get_arc_length(tx, ty, spt)\n path = frenet_optimal_planning(csp, s0, c_speed, c_d, c_d_d, c_d_dd, ob)\n c_speed = path.s_d[1] \n #c_d_d = c_speed*math.cos(math.atan2((ty[spt]-curr_posy),(tx[spt]-curr_posx))-curr_yaw)\n c_d_d = path.d_d[1] \n c_d_dd = path.d_dd[1] \n \n if np.hypot(path.x[1] - tx[-1], path.y[1] - ty[-1]) <= 1.0:\n print(\"Goal\")\n c_speed = 0.0\n break\n if show_animation:\n plt.cla()\n plt.plot(tx, ty, \"-.k\")\n plt.plot(road_left_x, road_left_y, \"-k\")\n plt.plot(road_right_x, road_right_y, \"-k\")\n plt.plot(ob[:, 0], ob[:, 1], \"ob\")\n plt.plot(path.x[1:], path.y[1:], \"-or\")\n plt.plot(path.x[1], path.y[1], \"vc\")\n plt.xlim(path.x[1] - area, path.x[1] + area)\n plt.ylim(path.y[1] - area, path.y[1] + area)\n plt.arrow(curr_posx, curr_posy, math.cos(curr_yaw), math.sin(curr_yaw),fc=\"r\", ec=\"k\", head_width=0.5, head_length=1.0)\n plt.title(\"v[km/h]:\" + str(c_speed)[0:4])\n plt.xlabel(u'x/m', fontsize=14) # 设置x轴,并设定字号大小\n plt.ylabel(u'y/m', fontsize=14) # 设置y轴,并设定字号大小\n plt.pause(0.0001)\n \n \n \n ####################规划成功############### \n ###########################################\n PathFail_flag = 0 \n ###########################################\n \n \n except:\n ###############规划失败################\n PathFail_flag = 1\n print(\"Don't find optimal path\")\n \n ################对障碍物堆栈清空############\n ############################################\n ############################################\n global Gob_x\n global Gob_y\n Gob_x*=0\n Gob_y*=0 \n ############################################\n ############################################\n \n \n \n############################################################################### \n \n \n try:\n '''\n acc = proportional_control(6, CurrentVelocity)\n temp1=path.yaw[1] `\n temp2=curr_yaw \n \n if temp1<0:\n temp1=6.28+temp1\n if temp2<0:\n temp2=6.28+temp2\n\n val = temp1-temp2\n \n if val > 3.14:\n val = val - 6.28\n if val < -3.14:\n val = val + 6.28\n \n val = math.degrees(val)\n \n if val > 50:\n val = 50\n if val < -50:\n val = -50\n \n my_node.talker(acc,val)\n '''\n path_record = localPath()\n\n # 配置路径\n for i in range(len(path.x[1:])):\n\n #print(\"path_x\",path.x[i])\n \n path_record.path_x.append(path.x[i])\n path_record.path_y.append(path.y[i]) \n # 路径数量限制\n if len(path_record.path_x) > 10000:\n path_record.path_x.pop(0)\n path_record.path_y.pop(0)\n # 发布路径`\n my_node.talker(c_speed, path_record)\n \n except: \n print(\"local path send fail\")\n pass\n #my_node.talker(c_speed, path.x[1:], path.y[1:])\n #except:\n # pass\n\n print(\"Finish\")\n end = time.time()\n #print(\"total time: \", end - start)\n\n if show_animation:\n plt.grid(True)\n plt.show()\n\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
20,
21,
24,
25,
27
]
}
|
[
20,
21,
24,
25,
27
] |
#!/usr/bin/python3
"""Unittest for max_integer([..])
"""
import unittest
max_integer = __import__('6-max_integer').max_integer
class TestMaxInteger(unittest.TestCase):
""" Interactive tests """
def test_max(self):
"""Tests max_integer"""
self.assertEqual(max_integer([1, 2, 3]), 3)
self.assertEqual(max_integer([6, 2, 6]), 6)
self.assertEqual(max_integer([0, 0, 0]), 0)
self.assertEqual(max_integer([1, 5, 3]), 5)
self.assertEqual(max_integer([1, 2, -3]), 2)
self.assertEqual(max_integer([-1, -2, -3]), -1)
self.assertEqual(max_integer([2]), 2)
self.assertEqual(max_integer([]), None)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "f799fdfde537bbe8f6c49a5e1a15cf6f910a0d45",
"index": 889,
"step-1": "<mask token>\n\n\nclass TestMaxInteger(unittest.TestCase):\n <mask token>\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\n<mask token>\n",
"step-3": "<mask token>\nmax_integer = __import__('6-max_integer').max_integer\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nmax_integer = __import__('6-max_integer').max_integer\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/python3\n\"\"\"Unittest for max_integer([..])\n\"\"\"\nimport unittest\nmax_integer = __import__('6-max_integer').max_integer\n\n\nclass TestMaxInteger(unittest.TestCase):\n \"\"\" Interactive tests \"\"\"\n def test_max(self):\n \"\"\"Tests max_integer\"\"\"\n self.assertEqual(max_integer([1, 2, 3]), 3)\n self.assertEqual(max_integer([6, 2, 6]), 6)\n self.assertEqual(max_integer([0, 0, 0]), 0)\n self.assertEqual(max_integer([1, 5, 3]), 5)\n self.assertEqual(max_integer([1, 2, -3]), 2)\n self.assertEqual(max_integer([-1, -2, -3]), -1)\n self.assertEqual(max_integer([2]), 2)\n self.assertEqual(max_integer([]), None)\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j) / float(a))
norm_conf.append(tmp_arr)
<|reserved_special_token_0|>
plt.clf()
<|reserved_special_token_0|>
ax.set_aspect(1)
<|reserved_special_token_0|>
for x in range(width):
for y in range(height):
ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=
'center', verticalalignment='center')
<|reserved_special_token_0|>
plt.xticks(range(width), alphabet[:width])
plt.yticks(range(height), alphabet[:height])
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.savefig('confusion_matrix.png', format='png')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
conf_arr = [[2987, 58, 955, 832, 1991, 181, 986], [142, 218, 195, 44, 235,
11, 27], [524, 8, 3482, 478, 2406, 708, 588], [140, 0, 386, 12491, 793,
182, 438], [368, 15, 883, 635, 6331, 71, 1357], [77, 0, 942, 394, 223,
4530, 176], [224, 7, 601, 929, 2309, 99, 5761]]
conf_arr = np.transpose(np.array(conf_arr))
norm_conf = []
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j) / float(a))
norm_conf.append(tmp_arr)
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet, interpolation='nearest')
width, height = conf_arr.shape
for x in range(width):
for y in range(height):
ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=
'center', verticalalignment='center')
cb = fig.colorbar(res)
alphabet = '0123456789'
plt.xticks(range(width), alphabet[:width])
plt.yticks(range(height), alphabet[:height])
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.savefig('confusion_matrix.png', format='png')
<|reserved_special_token_1|>
import numpy as np
import matplotlib.pyplot as plt
conf_arr = [[2987, 58, 955, 832, 1991, 181, 986], [142, 218, 195, 44, 235,
11, 27], [524, 8, 3482, 478, 2406, 708, 588], [140, 0, 386, 12491, 793,
182, 438], [368, 15, 883, 635, 6331, 71, 1357], [77, 0, 942, 394, 223,
4530, 176], [224, 7, 601, 929, 2309, 99, 5761]]
conf_arr = np.transpose(np.array(conf_arr))
norm_conf = []
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j) / float(a))
norm_conf.append(tmp_arr)
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet, interpolation='nearest')
width, height = conf_arr.shape
for x in range(width):
for y in range(height):
ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=
'center', verticalalignment='center')
cb = fig.colorbar(res)
alphabet = '0123456789'
plt.xticks(range(width), alphabet[:width])
plt.yticks(range(height), alphabet[:height])
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.savefig('confusion_matrix.png', format='png')
|
flexible
|
{
"blob_id": "923a2979df3c37583eec712880ad821541bd898b",
"index": 8735,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i, 0)\n for j in i:\n tmp_arr.append(float(j) / float(a))\n norm_conf.append(tmp_arr)\n<mask token>\nplt.clf()\n<mask token>\nax.set_aspect(1)\n<mask token>\nfor x in range(width):\n for y in range(height):\n ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=\n 'center', verticalalignment='center')\n<mask token>\nplt.xticks(range(width), alphabet[:width])\nplt.yticks(range(height), alphabet[:height])\nplt.xlabel('Predicted Label')\nplt.ylabel('True Label')\nplt.savefig('confusion_matrix.png', format='png')\n",
"step-3": "<mask token>\nconf_arr = [[2987, 58, 955, 832, 1991, 181, 986], [142, 218, 195, 44, 235, \n 11, 27], [524, 8, 3482, 478, 2406, 708, 588], [140, 0, 386, 12491, 793,\n 182, 438], [368, 15, 883, 635, 6331, 71, 1357], [77, 0, 942, 394, 223, \n 4530, 176], [224, 7, 601, 929, 2309, 99, 5761]]\nconf_arr = np.transpose(np.array(conf_arr))\nnorm_conf = []\nfor i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i, 0)\n for j in i:\n tmp_arr.append(float(j) / float(a))\n norm_conf.append(tmp_arr)\nfig = plt.figure()\nplt.clf()\nax = fig.add_subplot(111)\nax.set_aspect(1)\nres = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet, interpolation='nearest')\nwidth, height = conf_arr.shape\nfor x in range(width):\n for y in range(height):\n ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=\n 'center', verticalalignment='center')\ncb = fig.colorbar(res)\nalphabet = '0123456789'\nplt.xticks(range(width), alphabet[:width])\nplt.yticks(range(height), alphabet[:height])\nplt.xlabel('Predicted Label')\nplt.ylabel('True Label')\nplt.savefig('confusion_matrix.png', format='png')\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nconf_arr = [[2987, 58, 955, 832, 1991, 181, 986], [142, 218, 195, 44, 235, \n 11, 27], [524, 8, 3482, 478, 2406, 708, 588], [140, 0, 386, 12491, 793,\n 182, 438], [368, 15, 883, 635, 6331, 71, 1357], [77, 0, 942, 394, 223, \n 4530, 176], [224, 7, 601, 929, 2309, 99, 5761]]\nconf_arr = np.transpose(np.array(conf_arr))\nnorm_conf = []\nfor i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i, 0)\n for j in i:\n tmp_arr.append(float(j) / float(a))\n norm_conf.append(tmp_arr)\nfig = plt.figure()\nplt.clf()\nax = fig.add_subplot(111)\nax.set_aspect(1)\nres = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet, interpolation='nearest')\nwidth, height = conf_arr.shape\nfor x in range(width):\n for y in range(height):\n ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=\n 'center', verticalalignment='center')\ncb = fig.colorbar(res)\nalphabet = '0123456789'\nplt.xticks(range(width), alphabet[:width])\nplt.yticks(range(height), alphabet[:height])\nplt.xlabel('Predicted Label')\nplt.ylabel('True Label')\nplt.savefig('confusion_matrix.png', format='png')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time
from numpy import empty
from src.utils import normalize_input_sentence, evaluate, add_begin_and_trailing_tag, check_for_terminal_argument
from classes.BaseTagger import BaseTagger
from src.CONSTANT import POS_TAG_KEYNAME, WORD_KEYNAME, TRUETAG_KEYNAME, DEFAULT_TRAINING_FILENAME
import sys
import os
# TODO check all document
class ForwardBackward(BaseTagger):
"""
For Learning: Calculate probability of an observation sequence given a HMM: P(O | lambda)
"""
def __init__(self):
"""
Constructor
"""
# TODO Need to seperate input reading into whether a class method or static function
self.path_to_file = check_for_terminal_argument()
BaseTagger.__init__(self)
raise NotImplementedError
def probabilities(self):
"""
Return the probabilities of a hidden state sequence given observed output sequence
:return:
"""
raise NotImplementedError
def prob_given_state(self, start=1, end=len(self.T)): # , start, end):
"""
Return the probabilities of output from "start" to "end" given current (hidden) state
:param start: start of observing time
:param end: end of observing time
:return: probabilities.
***********************
* return format *
***********************
"""
# for state_index in range(len(self.tagset)):
# self.alpha[1][state_index] = 0
raise NotImplementedError
def tag(self):
"""
alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda
model
"""
self.alpha = self.prob_given_state()
raise NotImplementedError
|
normal
|
{
"blob_id": "8cc0314d48f81ceead863245443548297e8188f8",
"index": 9610,
"step-1": "<mask token>\n\n\nclass ForwardBackward(BaseTagger):\n <mask token>\n <mask token>\n\n def probabilities(self):\n \"\"\"\n Return the probabilities of a hidden state sequence given observed output sequence\n :return:\n \"\"\"\n raise NotImplementedError\n <mask token>\n\n def tag(self):\n \"\"\"\n alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda\n model\n \"\"\"\n self.alpha = self.prob_given_state()\n raise NotImplementedError\n",
"step-2": "<mask token>\n\n\nclass ForwardBackward(BaseTagger):\n <mask token>\n <mask token>\n\n def probabilities(self):\n \"\"\"\n Return the probabilities of a hidden state sequence given observed output sequence\n :return:\n \"\"\"\n raise NotImplementedError\n\n def prob_given_state(self, start=1, end=len(self.T)):\n \"\"\"\n Return the probabilities of output from \"start\" to \"end\" given current (hidden) state\n :param start: start of observing time\n :param end: end of observing time\n :return: probabilities.\n ***********************\n * return format *\n ***********************\n \"\"\"\n raise NotImplementedError\n\n def tag(self):\n \"\"\"\n alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda\n model\n \"\"\"\n self.alpha = self.prob_given_state()\n raise NotImplementedError\n",
"step-3": "<mask token>\n\n\nclass ForwardBackward(BaseTagger):\n <mask token>\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n self.path_to_file = check_for_terminal_argument()\n BaseTagger.__init__(self)\n raise NotImplementedError\n\n def probabilities(self):\n \"\"\"\n Return the probabilities of a hidden state sequence given observed output sequence\n :return:\n \"\"\"\n raise NotImplementedError\n\n def prob_given_state(self, start=1, end=len(self.T)):\n \"\"\"\n Return the probabilities of output from \"start\" to \"end\" given current (hidden) state\n :param start: start of observing time\n :param end: end of observing time\n :return: probabilities.\n ***********************\n * return format *\n ***********************\n \"\"\"\n raise NotImplementedError\n\n def tag(self):\n \"\"\"\n alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda\n model\n \"\"\"\n self.alpha = self.prob_given_state()\n raise NotImplementedError\n",
"step-4": "import time\nfrom numpy import empty\nfrom src.utils import normalize_input_sentence, evaluate, add_begin_and_trailing_tag, check_for_terminal_argument\nfrom classes.BaseTagger import BaseTagger\nfrom src.CONSTANT import POS_TAG_KEYNAME, WORD_KEYNAME, TRUETAG_KEYNAME, DEFAULT_TRAINING_FILENAME\nimport sys\nimport os\n\n\nclass ForwardBackward(BaseTagger):\n \"\"\"\n For Learning: Calculate probability of an observation sequence given a HMM: P(O | lambda)\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n self.path_to_file = check_for_terminal_argument()\n BaseTagger.__init__(self)\n raise NotImplementedError\n\n def probabilities(self):\n \"\"\"\n Return the probabilities of a hidden state sequence given observed output sequence\n :return:\n \"\"\"\n raise NotImplementedError\n\n def prob_given_state(self, start=1, end=len(self.T)):\n \"\"\"\n Return the probabilities of output from \"start\" to \"end\" given current (hidden) state\n :param start: start of observing time\n :param end: end of observing time\n :return: probabilities.\n ***********************\n * return format *\n ***********************\n \"\"\"\n raise NotImplementedError\n\n def tag(self):\n \"\"\"\n alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda\n model\n \"\"\"\n self.alpha = self.prob_given_state()\n raise NotImplementedError\n",
"step-5": "import time\n\nfrom numpy import empty\nfrom src.utils import normalize_input_sentence, evaluate, add_begin_and_trailing_tag, check_for_terminal_argument\nfrom classes.BaseTagger import BaseTagger\nfrom src.CONSTANT import POS_TAG_KEYNAME, WORD_KEYNAME, TRUETAG_KEYNAME, DEFAULT_TRAINING_FILENAME\nimport sys\nimport os\n\n\n# TODO check all document\n\nclass ForwardBackward(BaseTagger):\n \"\"\"\n For Learning: Calculate probability of an observation sequence given a HMM: P(O | lambda)\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Constructor\n \"\"\"\n # TODO Need to seperate input reading into whether a class method or static function\n\n self.path_to_file = check_for_terminal_argument()\n BaseTagger.__init__(self)\n raise NotImplementedError\n\n def probabilities(self):\n \"\"\"\n Return the probabilities of a hidden state sequence given observed output sequence\n :return:\n \"\"\"\n raise NotImplementedError\n\n def prob_given_state(self, start=1, end=len(self.T)): # , start, end):\n \"\"\"\n Return the probabilities of output from \"start\" to \"end\" given current (hidden) state\n :param start: start of observing time\n :param end: end of observing time\n :return: probabilities.\n ***********************\n * return format *\n ***********************\n \"\"\"\n\n # for state_index in range(len(self.tagset)):\n # self.alpha[1][state_index] = 0\n\n raise NotImplementedError\n\n def tag(self):\n \"\"\"\n alpha_t_i: probability of state S[i] at time t with the observed sequence O={o1, ..., oT} with lambda\n model\n \"\"\"\n self.alpha = self.prob_given_state()\n\n raise NotImplementedError\n",
"step-ids": [
3,
4,
5,
7,
8
]
}
|
[
3,
4,
5,
7,
8
] |
<|reserved_special_token_0|>
def average(run):
print('____________________________________')
sum = 0
for i in range(0, len(run)):
sum += run[i]
avg = sum / len(run)
print('Average score of the team is :', avg)
def high(run):
print('______________________________________')
max = run[0]
for i in range(len(run)):
if max < run[i]:
max = run[i]
print('Highest run score by the player is :', max)
def low(run):
print('____________________________________')
mim = run[0]
for i in range(len(run)):
if mim > run[i]:
mim = run[i]
print('Lowest runs scored by the player is :', mim)
def check(run):
print('_______________________________________')
count = 0
for i in range(0, len(run)):
if run[i] >= 50:
count += 1
else:
pass
print("Count of the player score more than '50' are :", count)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def average(run):
print('____________________________________')
sum = 0
for i in range(0, len(run)):
sum += run[i]
avg = sum / len(run)
print('Average score of the team is :', avg)
def high(run):
print('______________________________________')
max = run[0]
for i in range(len(run)):
if max < run[i]:
max = run[i]
print('Highest run score by the player is :', max)
def low(run):
print('____________________________________')
mim = run[0]
for i in range(len(run)):
if mim > run[i]:
mim = run[i]
print('Lowest runs scored by the player is :', mim)
def check(run):
print('_______________________________________')
count = 0
for i in range(0, len(run)):
if run[i] >= 50:
count += 1
else:
pass
print("Count of the player score more than '50' are :", count)
def feq(run):
print('___________________________________')
max = 0
result = run[0]
for i in run:
freq = run.count(i)
if freq > max:
max = freq
result = i
print(f'run scored with the highest frequncy {result} is', max)
print("-------------'THANKYOU---------------")
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(no_players):
run_score = int(input('Enter the runs scored by the player ' + str(i +
1) + ':'))
run.append(run_score)
def average(run):
print('____________________________________')
sum = 0
for i in range(0, len(run)):
sum += run[i]
avg = sum / len(run)
print('Average score of the team is :', avg)
def high(run):
print('______________________________________')
max = run[0]
for i in range(len(run)):
if max < run[i]:
max = run[i]
print('Highest run score by the player is :', max)
def low(run):
print('____________________________________')
mim = run[0]
for i in range(len(run)):
if mim > run[i]:
mim = run[i]
print('Lowest runs scored by the player is :', mim)
def check(run):
print('_______________________________________')
count = 0
for i in range(0, len(run)):
if run[i] >= 50:
count += 1
else:
pass
print("Count of the player score more than '50' are :", count)
def feq(run):
print('___________________________________')
max = 0
result = run[0]
for i in run:
freq = run.count(i)
if freq > max:
max = freq
result = i
print(f'run scored with the highest frequncy {result} is', max)
print("-------------'THANKYOU---------------")
average(run)
high(run)
low(run)
check(run)
feq(run)
<|reserved_special_token_1|>
run = []
no_players = int(input('enter the number of the players in the team :'))
for i in range(no_players):
run_score = int(input('Enter the runs scored by the player ' + str(i +
1) + ':'))
run.append(run_score)
def average(run):
print('____________________________________')
sum = 0
for i in range(0, len(run)):
sum += run[i]
avg = sum / len(run)
print('Average score of the team is :', avg)
def high(run):
print('______________________________________')
max = run[0]
for i in range(len(run)):
if max < run[i]:
max = run[i]
print('Highest run score by the player is :', max)
def low(run):
print('____________________________________')
mim = run[0]
for i in range(len(run)):
if mim > run[i]:
mim = run[i]
print('Lowest runs scored by the player is :', mim)
def check(run):
print('_______________________________________')
count = 0
for i in range(0, len(run)):
if run[i] >= 50:
count += 1
else:
pass
print("Count of the player score more than '50' are :", count)
def feq(run):
print('___________________________________')
max = 0
result = run[0]
for i in run:
freq = run.count(i)
if freq > max:
max = freq
result = i
print(f'run scored with the highest frequncy {result} is', max)
print("-------------'THANKYOU---------------")
average(run)
high(run)
low(run)
check(run)
feq(run)
<|reserved_special_token_1|>
run=[] #Creating a empty list
no_players=int(input("enter the number of the players in the team :"))
for i in range (no_players):
run_score=int(input("Enter the runs scored by the player "+str(i+1)+":"))
run.append(run_score)
#code for the average score of the team
def average(run):
print("____________________________________")
sum=0
for i in range (0,len(run)):
sum+=run[i]
avg=sum/len(run)
print("Average score of the team is :",avg)
#code for the maximun runs scored by the players in the team
def high(run):
print("______________________________________")
max=run[0]
for i in range(len(run)):
if max<run[i]:
max=run[i]
print("Highest run score by the player is :",max)
#code for the minimum runs scored by the players in the team
def low(run):
print("____________________________________")
mim=run[0]
for i in range(len(run)):
if mim>run[i]:
mim=run[i]
print("Lowest runs scored by the player is :",mim)
#code for the runs scored more than 50 runs in the the team
def check(run):
print("_______________________________________")
count=0
for i in range(0,len(run)):
if run[i]>=50:
count+=1
else:
pass
print("Count of the player score more than '50' are :",count)
#code for the runs scored for higher number of the frequency
def feq(run):
print("___________________________________")
max=0
result=run[0]
for i in run:
freq=run.count(i)
if freq>max:
max=freq
result=i
print(f"run scored with the highest frequncy {result} is",max)
print("-------------'THANKYOU---------------")
average(run)
high(run)
low(run)
check(run)
feq(run)
|
flexible
|
{
"blob_id": "3d7ca468a1f7aa1602bff22167e9550ad515fa79",
"index": 4777,
"step-1": "<mask token>\n\n\ndef average(run):\n print('____________________________________')\n sum = 0\n for i in range(0, len(run)):\n sum += run[i]\n avg = sum / len(run)\n print('Average score of the team is :', avg)\n\n\ndef high(run):\n print('______________________________________')\n max = run[0]\n for i in range(len(run)):\n if max < run[i]:\n max = run[i]\n print('Highest run score by the player is :', max)\n\n\ndef low(run):\n print('____________________________________')\n mim = run[0]\n for i in range(len(run)):\n if mim > run[i]:\n mim = run[i]\n print('Lowest runs scored by the player is :', mim)\n\n\ndef check(run):\n print('_______________________________________')\n count = 0\n for i in range(0, len(run)):\n if run[i] >= 50:\n count += 1\n else:\n pass\n print(\"Count of the player score more than '50' are :\", count)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef average(run):\n print('____________________________________')\n sum = 0\n for i in range(0, len(run)):\n sum += run[i]\n avg = sum / len(run)\n print('Average score of the team is :', avg)\n\n\ndef high(run):\n print('______________________________________')\n max = run[0]\n for i in range(len(run)):\n if max < run[i]:\n max = run[i]\n print('Highest run score by the player is :', max)\n\n\ndef low(run):\n print('____________________________________')\n mim = run[0]\n for i in range(len(run)):\n if mim > run[i]:\n mim = run[i]\n print('Lowest runs scored by the player is :', mim)\n\n\ndef check(run):\n print('_______________________________________')\n count = 0\n for i in range(0, len(run)):\n if run[i] >= 50:\n count += 1\n else:\n pass\n print(\"Count of the player score more than '50' are :\", count)\n\n\ndef feq(run):\n print('___________________________________')\n max = 0\n result = run[0]\n for i in run:\n freq = run.count(i)\n if freq > max:\n max = freq\n result = i\n print(f'run scored with the highest frequncy {result} is', max)\n print(\"-------------'THANKYOU---------------\")\n\n\n<mask token>\n",
"step-3": "<mask token>\nfor i in range(no_players):\n run_score = int(input('Enter the runs scored by the player ' + str(i + \n 1) + ':'))\n run.append(run_score)\n\n\ndef average(run):\n print('____________________________________')\n sum = 0\n for i in range(0, len(run)):\n sum += run[i]\n avg = sum / len(run)\n print('Average score of the team is :', avg)\n\n\ndef high(run):\n print('______________________________________')\n max = run[0]\n for i in range(len(run)):\n if max < run[i]:\n max = run[i]\n print('Highest run score by the player is :', max)\n\n\ndef low(run):\n print('____________________________________')\n mim = run[0]\n for i in range(len(run)):\n if mim > run[i]:\n mim = run[i]\n print('Lowest runs scored by the player is :', mim)\n\n\ndef check(run):\n print('_______________________________________')\n count = 0\n for i in range(0, len(run)):\n if run[i] >= 50:\n count += 1\n else:\n pass\n print(\"Count of the player score more than '50' are :\", count)\n\n\ndef feq(run):\n print('___________________________________')\n max = 0\n result = run[0]\n for i in run:\n freq = run.count(i)\n if freq > max:\n max = freq\n result = i\n print(f'run scored with the highest frequncy {result} is', max)\n print(\"-------------'THANKYOU---------------\")\n\n\naverage(run)\nhigh(run)\nlow(run)\ncheck(run)\nfeq(run)\n",
"step-4": "run = []\nno_players = int(input('enter the number of the players in the team :'))\nfor i in range(no_players):\n run_score = int(input('Enter the runs scored by the player ' + str(i + \n 1) + ':'))\n run.append(run_score)\n\n\ndef average(run):\n print('____________________________________')\n sum = 0\n for i in range(0, len(run)):\n sum += run[i]\n avg = sum / len(run)\n print('Average score of the team is :', avg)\n\n\ndef high(run):\n print('______________________________________')\n max = run[0]\n for i in range(len(run)):\n if max < run[i]:\n max = run[i]\n print('Highest run score by the player is :', max)\n\n\ndef low(run):\n print('____________________________________')\n mim = run[0]\n for i in range(len(run)):\n if mim > run[i]:\n mim = run[i]\n print('Lowest runs scored by the player is :', mim)\n\n\ndef check(run):\n print('_______________________________________')\n count = 0\n for i in range(0, len(run)):\n if run[i] >= 50:\n count += 1\n else:\n pass\n print(\"Count of the player score more than '50' are :\", count)\n\n\ndef feq(run):\n print('___________________________________')\n max = 0\n result = run[0]\n for i in run:\n freq = run.count(i)\n if freq > max:\n max = freq\n result = i\n print(f'run scored with the highest frequncy {result} is', max)\n print(\"-------------'THANKYOU---------------\")\n\n\naverage(run)\nhigh(run)\nlow(run)\ncheck(run)\nfeq(run)\n",
"step-5": "run=[] #Creating a empty list \r\nno_players=int(input(\"enter the number of the players in the team :\")) \r\nfor i in range (no_players):\r\n run_score=int(input(\"Enter the runs scored by the player \"+str(i+1)+\":\"))\r\n run.append(run_score)\r\n#code for the average score of the team\r\ndef average(run):\r\n print(\"____________________________________\")\r\n sum=0\r\n for i in range (0,len(run)):\r\n sum+=run[i]\r\n avg=sum/len(run)\r\n print(\"Average score of the team is :\",avg)\r\n#code for the maximun runs scored by the players in the team\r\ndef high(run):\r\n print(\"______________________________________\")\r\n max=run[0]\r\n for i in range(len(run)):\r\n if max<run[i]:\r\n max=run[i]\r\n print(\"Highest run score by the player is :\",max)\r\n#code for the minimum runs scored by the players in the team\r\ndef low(run):\r\n print(\"____________________________________\")\r\n mim=run[0]\r\n for i in range(len(run)):\r\n if mim>run[i]:\r\n mim=run[i]\r\n print(\"Lowest runs scored by the player is :\",mim)\r\n#code for the runs scored more than 50 runs in the the team\r\ndef check(run): \r\n print(\"_______________________________________\") \r\n count=0\r\n for i in range(0,len(run)):\r\n if run[i]>=50:\r\n count+=1\r\n else:\r\n pass\r\n print(\"Count of the player score more than '50' are :\",count)\r\n#code for the runs scored for higher number of the frequency\r\ndef feq(run):\r\n print(\"___________________________________\")\r\n max=0\r\n result=run[0]\r\n for i in run:\r\n freq=run.count(i)\r\n if freq>max:\r\n max=freq\r\n result=i\r\n \r\n print(f\"run scored with the highest frequncy {result} is\",max)\r\n print(\"-------------'THANKYOU---------------\")\r\n\r\naverage(run)\r\nhigh(run)\r\nlow(run)\r\ncheck(run)\r\nfeq(run)\r\n\r\n\r\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import cv2 as cv
import numpy as np
import pytesseract as tes
text = get_text_from_image("resizedReceipt.jpg")
print(text)
def get_text_from_image(imageName):
img = preprocess(imageName)
result = tes.image_to_string(img)
return result
def preprocess(image_name):
image = cv.imread(image_name)
gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
receiptBox = find_receipt_box(gray)
M, w, h = perspective_transform(receiptBox)
receiptImg = apply_perspective_correction(gray, M, w, h)
receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)
return receiptImg
def find_receipt_box(image):
"""
Finds a contour around the receipt in the given image.
Returns the bounding box and the binary image
"""
# gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
gray = cv.medianBlur(image, 15, 0)
_, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)
k = np.ones((25, 25))
thresh = cv.erode(thresh, k, iterations=1)
thresh = cv.dilate(thresh, k, iterations=1)
contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
contours = sorted(contours[0], key=cv.contourArea, reverse=True)
contour = contours[0]
rect = cv.minAreaRect(contour)
box = cv.boxPoints(rect)
box = np.int0(box)
return box
def perspective_transform(contour):
"""Produces the transformation matrix and the new size for perspective correction"""
ord_rect = np.float32(order_rect(contour))
(tl, tr, br, bl) = ord_rect
dist_top = np.linalg.norm(tl - tr)
dist_btm = np.linalg.norm(bl - br)
width = max(dist_btm, dist_top)
dist_left = np.linalg.norm(tl - tr)
dist_right = np.linalg.norm(tr - br)
height = max(dist_left, dist_right)
dest_corners = np.array([
[0, 0],
[width - 1, 0],
[width - 1, height - 1],
[0, height - 1]
], dtype=ord_rect.dtype)
M = cv.getPerspectiveTransform(ord_rect, dest_corners)
return M, width, height
def order_rect(pts):
"""
orders a rectangle in the order top-left, top-right,
bottom-right, bottom-left
"""
new = np.zeros((4, 2), dtype="int64")
s = pts.sum(axis=1)
new[0] = pts[np.argmin(s)]
new[2] = pts[np.argmax(s)]
diff = np.diff(pts, axis=1)
new[1] = pts[np.argmin(diff)]
new[3] = pts[np.argmax(diff)]
return new
def apply_perspective_correction(image, M, width, height):
"""Crops the contour and applies perspective correction"""
warped = cv.warpPerspective(image, M, (width, height))
return warped
|
normal
|
{
"blob_id": "e480136aca96e45cc8a7ca34c1a9d09b96a5a4da",
"index": 4152,
"step-1": "<mask token>\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\n<mask token>\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n",
"step-2": "<mask token>\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n",
"step-3": "<mask token>\ntext = get_text_from_image('resizedReceipt.jpg')\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n",
"step-4": "import cv2 as cv\nimport numpy as np\nimport pytesseract as tes\ntext = get_text_from_image('resizedReceipt.jpg')\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.\n ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n tl, tr, br, bl = ord_rect\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n dest_corners = np.array([[0, 0], [width - 1, 0], [width - 1, height - 1\n ], [0, height - 1]], dtype=ord_rect.dtype)\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype='int64')\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n",
"step-5": "import cv2 as cv\nimport numpy as np\nimport pytesseract as tes\n\n\ntext = get_text_from_image(\"resizedReceipt.jpg\")\nprint(text)\n\n\ndef get_text_from_image(imageName):\n img = preprocess(imageName)\n result = tes.image_to_string(img)\n return result\n\n\ndef preprocess(image_name):\n image = cv.imread(image_name)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n receiptBox = find_receipt_box(gray)\n M, w, h = perspective_transform(receiptBox)\n receiptImg = apply_perspective_correction(gray, M, w, h)\n receiptImg = cv.adaptiveThreshold(receiptImg, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 10)\n return receiptImg\n\n\ndef find_receipt_box(image):\n \"\"\"\n Finds a contour around the receipt in the given image.\n Returns the bounding box and the binary image\n \"\"\"\n # gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n gray = cv.medianBlur(image, 15, 0)\n _, thresh = cv.threshold(gray, 255, 125, cv.THRESH_BINARY | cv.THRESH_OTSU)\n k = np.ones((25, 25))\n thresh = cv.erode(thresh, k, iterations=1)\n thresh = cv.dilate(thresh, k, iterations=1)\n contours = cv.findContours(thresh, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours[0], key=cv.contourArea, reverse=True)\n contour = contours[0]\n rect = cv.minAreaRect(contour)\n box = cv.boxPoints(rect)\n box = np.int0(box)\n return box\n\n\ndef perspective_transform(contour):\n \"\"\"Produces the transformation matrix and the new size for perspective correction\"\"\"\n ord_rect = np.float32(order_rect(contour))\n (tl, tr, br, bl) = ord_rect\n\n dist_top = np.linalg.norm(tl - tr)\n dist_btm = np.linalg.norm(bl - br)\n width = max(dist_btm, dist_top)\n\n dist_left = np.linalg.norm(tl - tr)\n dist_right = np.linalg.norm(tr - br)\n height = max(dist_left, dist_right)\n\n dest_corners = np.array([\n [0, 0],\n [width - 1, 0],\n [width - 1, height - 1],\n [0, height - 1]\n ], dtype=ord_rect.dtype)\n\n M = cv.getPerspectiveTransform(ord_rect, dest_corners)\n return M, width, height\n\n\ndef order_rect(pts):\n \"\"\"\n orders a rectangle in the order top-left, top-right,\n bottom-right, bottom-left\n \"\"\"\n new = np.zeros((4, 2), dtype=\"int64\")\n s = pts.sum(axis=1)\n new[0] = pts[np.argmin(s)]\n new[2] = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis=1)\n new[1] = pts[np.argmin(diff)]\n new[3] = pts[np.argmax(diff)]\n\n return new\n\n\ndef apply_perspective_correction(image, M, width, height):\n \"\"\"Crops the contour and applies perspective correction\"\"\"\n warped = cv.warpPerspective(image, M, (width, height))\n return warped\n",
"step-ids": [
5,
7,
8,
9,
10
]
}
|
[
5,
7,
8,
9,
10
] |
# -*- coding: utf-8 -*-
from services.interfaces.i_service import IService
from services.dbservices.db_service import DBService
class GetCommunitiesByOffsetService(IService):
def __init__(self, core, parameters):
super(GetCommunitiesByOffsetService, self).__init__(core, parameters)
def run(self):
return DBService(self.core).getNextFields("Communities", self.parameters["start"], self.parameters["offset"])
|
normal
|
{
"blob_id": "051bd11c42815ec8f8ece8eae9d33890da77129c",
"index": 148,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass GetCommunitiesByOffsetService(IService):\n <mask token>\n\n def run(self):\n return DBService(self.core).getNextFields('Communities', self.\n parameters['start'], self.parameters['offset'])\n",
"step-3": "<mask token>\n\n\nclass GetCommunitiesByOffsetService(IService):\n\n def __init__(self, core, parameters):\n super(GetCommunitiesByOffsetService, self).__init__(core, parameters)\n\n def run(self):\n return DBService(self.core).getNextFields('Communities', self.\n parameters['start'], self.parameters['offset'])\n",
"step-4": "from services.interfaces.i_service import IService\nfrom services.dbservices.db_service import DBService\n\n\nclass GetCommunitiesByOffsetService(IService):\n\n def __init__(self, core, parameters):\n super(GetCommunitiesByOffsetService, self).__init__(core, parameters)\n\n def run(self):\n return DBService(self.core).getNextFields('Communities', self.\n parameters['start'], self.parameters['offset'])\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom services.interfaces.i_service import IService\nfrom services.dbservices.db_service import DBService\n\nclass GetCommunitiesByOffsetService(IService):\n def __init__(self, core, parameters):\n super(GetCommunitiesByOffsetService, self).__init__(core, parameters)\n\n def run(self):\n return DBService(self.core).getNextFields(\"Communities\", self.parameters[\"start\"], self.parameters[\"offset\"])\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
load_dotenv()
<|reserved_special_token_0|>
print('Ready!')
@bot.command()
async def stop(ctx):
await ctx.message.delete()
await ctx.voice_client.disconnect()
@bot.command()
async def wew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'wow.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def thicc(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'THICC.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def woof(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'barks.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def welcome(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'Welcome.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def grapefruit(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'grapefruit.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def hello(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'hello.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def winning(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'winning.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def basingstoke(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'basingstoke.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def milleb(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'milleb.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def jew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'jew.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def whatisgoingonhere(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'here.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def BWEKFAST(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'bwekfast.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
bot.run(TOKEN)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
load_dotenv()
TOKEN = os.getenv('TOKEN')
bot = commands.Bot(command_prefix='.', case_insensitive=True)
print('Ready!')
@bot.command()
async def stop(ctx):
await ctx.message.delete()
await ctx.voice_client.disconnect()
@bot.command()
async def wew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'wow.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def thicc(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'THICC.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def woof(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'barks.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def welcome(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'Welcome.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def grapefruit(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'grapefruit.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def hello(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'hello.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def winning(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'winning.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def basingstoke(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'basingstoke.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def milleb(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'milleb.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def jew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'jew.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def whatisgoingonhere(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'here.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def BWEKFAST(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'bwekfast.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
bot.run(TOKEN)
<|reserved_special_token_1|>
import discord
from discord.ext import commands
import asyncio
import glob
from dotenv import load_dotenv
import os
load_dotenv()
TOKEN = os.getenv('TOKEN')
bot = commands.Bot(command_prefix='.', case_insensitive=True)
print('Ready!')
@bot.command()
async def stop(ctx):
await ctx.message.delete()
await ctx.voice_client.disconnect()
@bot.command()
async def wew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'wow.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def thicc(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'THICC.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def woof(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'barks.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def welcome(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'Welcome.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def grapefruit(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'grapefruit.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def hello(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'hello.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def winning(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'winning.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def basingstoke(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'basingstoke.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def milleb(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'milleb.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def jew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'jew.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def whatisgoingonhere(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'here.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def BWEKFAST(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = 'bwekfast.mp3'
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
bot.run(TOKEN)
<|reserved_special_token_1|>
#Import Discord Package
import discord
from discord.ext import commands
import asyncio
import glob
from dotenv import load_dotenv
import os
load_dotenv() # Load your Discord Token
TOKEN = os.getenv("TOKEN")
bot = commands.Bot(command_prefix='.',case_insensitive=True)
print('Ready!')
@bot.command()
async def stop(ctx):
await ctx.message.delete()
await ctx.voice_client.disconnect()
@bot.command()
async def wew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("wow.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def thicc(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("THICC.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def woof(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("barks.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def welcome(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("Welcome.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def grapefruit(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("grapefruit.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def hello(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("hello.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def winning(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("winning.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def basingstoke(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("basingstoke.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def milleb(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("milleb.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def jew(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("jew.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def whatisgoingonhere(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("here.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
@bot.command()
async def BWEKFAST(ctx):
await ctx.message.delete()
channel = ctx.author.voice.channel
print(channel)
await channel.connect()
vc = ctx.voice_client
counter = 0
song = ("bwekfast.mp3")
vc.play(discord.FFmpegPCMAudio(song))
while vc.is_playing():
await asyncio.sleep(1)
counter = counter + 1
await vc.disconnect()
bot.run (TOKEN)
|
flexible
|
{
"blob_id": "41842e8b75860c65e87e9db1f7ae058957e37e45",
"index": 1822,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nload_dotenv()\n<mask token>\nprint('Ready!')\n\n\n@bot.command()\nasync def stop(ctx):\n await ctx.message.delete()\n await ctx.voice_client.disconnect()\n\n\n@bot.command()\nasync def wew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'wow.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def thicc(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'THICC.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def woof(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'barks.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def welcome(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'Welcome.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def grapefruit(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'grapefruit.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def hello(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'hello.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def winning(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'winning.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def basingstoke(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'basingstoke.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def milleb(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'milleb.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def jew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'jew.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def whatisgoingonhere(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'here.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def BWEKFAST(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'bwekfast.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\nbot.run(TOKEN)\n",
"step-3": "<mask token>\nload_dotenv()\nTOKEN = os.getenv('TOKEN')\nbot = commands.Bot(command_prefix='.', case_insensitive=True)\nprint('Ready!')\n\n\n@bot.command()\nasync def stop(ctx):\n await ctx.message.delete()\n await ctx.voice_client.disconnect()\n\n\n@bot.command()\nasync def wew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'wow.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def thicc(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'THICC.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def woof(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'barks.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def welcome(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'Welcome.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def grapefruit(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'grapefruit.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def hello(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'hello.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def winning(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'winning.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def basingstoke(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'basingstoke.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def milleb(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'milleb.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def jew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'jew.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def whatisgoingonhere(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'here.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def BWEKFAST(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'bwekfast.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\nbot.run(TOKEN)\n",
"step-4": "import discord\nfrom discord.ext import commands\nimport asyncio\nimport glob\nfrom dotenv import load_dotenv\nimport os\nload_dotenv()\nTOKEN = os.getenv('TOKEN')\nbot = commands.Bot(command_prefix='.', case_insensitive=True)\nprint('Ready!')\n\n\n@bot.command()\nasync def stop(ctx):\n await ctx.message.delete()\n await ctx.voice_client.disconnect()\n\n\n@bot.command()\nasync def wew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'wow.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def thicc(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'THICC.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def woof(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'barks.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def welcome(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'Welcome.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def grapefruit(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'grapefruit.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def hello(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'hello.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def winning(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'winning.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def basingstoke(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'basingstoke.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def milleb(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'milleb.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def jew(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'jew.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def whatisgoingonhere(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'here.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def BWEKFAST(ctx):\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n counter = 0\n song = 'bwekfast.mp3'\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\nbot.run(TOKEN)\n",
"step-5": "#Import Discord Package\nimport discord\nfrom discord.ext import commands\nimport asyncio\nimport glob\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv() # Load your Discord Token\n\nTOKEN = os.getenv(\"TOKEN\") \n\nbot = commands.Bot(command_prefix='.',case_insensitive=True)\n \nprint('Ready!')\n\n@bot.command()\nasync def stop(ctx):\n await ctx.message.delete()\n await ctx.voice_client.disconnect()\n\n@bot.command()\nasync def wew(ctx):\n\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n \n counter = 0\n\n song = (\"wow.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def thicc(ctx):\n\n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"THICC.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n \n\n@bot.command()\nasync def woof(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"barks.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def welcome(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"Welcome.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n@bot.command()\nasync def grapefruit(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"grapefruit.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def hello(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"hello.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def winning(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"winning.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def basingstoke(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"basingstoke.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def milleb(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"milleb.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def jew(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"jew.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def whatisgoingonhere(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"here.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n@bot.command()\nasync def BWEKFAST(ctx):\n \n await ctx.message.delete()\n channel = ctx.author.voice.channel\n print(channel)\n await channel.connect()\n vc = ctx.voice_client\n\n counter = 0\n\n song = (\"bwekfast.mp3\")\n vc.play(discord.FFmpegPCMAudio(song))\n while vc.is_playing():\n await asyncio.sleep(1)\n counter = counter + 1\n await vc.disconnect()\n\n\n\n\nbot.run (TOKEN)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def year_choices():
return [(r, r) for r in range(1984, datetime.date.today().year + 1)]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def year_choices():
return [(r, r) for r in range(1984, datetime.date.today().year + 1)]
def current_year():
return datetime.date.today().year
<|reserved_special_token_1|>
import datetime
def year_choices():
return [(r, r) for r in range(1984, datetime.date.today().year + 1)]
def current_year():
return datetime.date.today().year
|
flexible
|
{
"blob_id": "90bb70b0a97c7872c8581a176ebacc50df8e1f72",
"index": 464,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef year_choices():\n return [(r, r) for r in range(1984, datetime.date.today().year + 1)]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef year_choices():\n return [(r, r) for r in range(1984, datetime.date.today().year + 1)]\n\n\ndef current_year():\n return datetime.date.today().year\n",
"step-4": "import datetime\n\n\ndef year_choices():\n return [(r, r) for r in range(1984, datetime.date.today().year + 1)]\n\n\ndef current_year():\n return datetime.date.today().year\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
'''
def diff_of_gaussians(img):
grey_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
blur_img_grey = cv2.GaussianBlur(grey_img, (9,9), 0)
blur_img_colour = cv2.GaussianBlur(img, (9,9), 0)
#plt.figure(figsize = (20,2))
#plt.imshow(blur_img_grey, cmap = 'gray')
#plt.show()
#plt.imshow(blur_img_colour)
#plt.show()
fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(nrows = 2, ncols = 2)
edges_grey = cv2.Canny(grey_img,100,200)
edges = cv2.Canny(img, 100, 200)
#plt.subplot(411)
ax1.imshow(edges_grey, cmap = 'gray')
#plt.imshow(edges_grey, cmap = 'gray')
#plt.show()
#plt.subplot(412)
ax2.imshow(edges);
#plt.imshow(edges)
#plt.subplot(421)
ax3.imshow(canny(grey_img), cmap = 'gray')
#plt.show()
#plt.subplot(422)
ax4.imshow(canny(img))
#plt.show()
plt.show()
#plt.imshow(blur_img_grey - grey_img, cmap = 'gray')
#plt.show()
#plt.imshow(blur_img_colour - img)
#plt.show()
return
'''
def canny(img):
# changes in intensity are to be captured.
# Canny and the Sobel operator work no greyscale images
grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Gaussian Blurring to reduce noise, removes high-frequncy components in the image
# High frequency due to high ISO of the camera, contours that aren't really edges.
# https://www.youtube.com/watch?v=uihBwtPIBxM
blurred_img = cv2.GaussianBlur(grey_img, (9,9), 0)
# Canny Edge Detector, identifying any sharp changes in intesity, Uses edge-gradients
# the strongest gradents are then traced
# https://www.youtube.com/watch?v=sRFM5IEqR2w
canny_filtered = cv2.Canny(blurred_img, 30, 150)
return canny_filtered
def region_of_interest(img):
height, width = img.shape
height -= 60
width -= 10
#Reducing the image size to "focus" more on the center of the frame (region of interest)
#These dimensions are later used in the generation of the mask
#The reduction in height enables us to ignore the part of the image corresponding to the dashboard.
#Coordinates marking our "region of interest"
#The top-left of the image is (0,0)
Polygons = np.array([
[(width, height),(50, height), (int((3/8) * width), int((3/4) * height)),(int((5/8) * width), int((3/4) * height))]
])
#(width, height),(50,height) removes what's visible of the dash of the car.
mask = np.zeros_like(img)
# filling mask
cv2.fillConvexPoly(img = mask, points = Polygons, color = 255, lineType = cv2.LINE_AA)
# Uncomment "return mask" to see the "region of interest" marked in white
mask_img = cv2.bitwise_and(img, mask)
# mask_img now has the detected edges in our region of interest.
#return mask
return mask_img
def dispay_lines(img, lines):
line_img = np.zeros_like(img)
if lines is not None:
for x1,y1,x2,y2 in lines:
cv2.line(line_img, (x1,y1), (x2,y2), (0,255,0), 30)
return line_img
def get_cords(img, line_slope_int):
slope, intercept = line_slope_int
y1 = img.shape[0]
#Line starts from the bottom left
y2 = int(y1 * (4/5))
# The line goes 1 fifth of the way up
x1 = int((y1 - intercept) / slope)
x2 = int((y2 - intercept) / slope)
#from y = mx + c
#print(img.shape)
height, width, _ = img.shape
if x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 < 0 or y2 > height or y2 < 0:
return np.array([0,0,0,0])
return np.array([x1, y1, x2, y2])
def average_slope_intercept(img, lines):
left_fit = []
right_fit = []
#if lines is None:
# return (np.array([0,0,0,0]), np.array([0,0,0,0]))
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
slope, intercept = np.polyfit((x1,x2), (y1,y2), 1)
#Linear least squares :) (not exactly but it's easy to think of it like this)
print(slope, intercept)
#left lines have a positive slope.
if slope >= 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
if left_fit:
left_fit_avg = np.average(left_fit, axis = 0)
left_line = get_cords(img, left_fit_avg)
else:
left_line = np.array([0,0,0,0])
if right_fit:
right_fit_avg = np.average(right_fit, axis = 0)
right_line = get_cords(img, right_fit_avg)
else:
right_line = np.array([0,0,0,0])
return np.array([left_line, right_line])
if __name__ == "__main__":
cap = cv2.VideoCapture("./../Downloads/detect_lanes_from.mp4")
lines = np.asarray((np.array([0,0,0,0]), np.array([0,0,0,0])))
estimate = lines
while (cap.isOpened()):
_, frame = cap.read()
canny_img = canny(frame)
masked_img = region_of_interest(canny_img)
estimate = lines
#print(len(estimate), len(lines))
# Finding straight lines and therefore the lane lines --> Hough transform
lines = cv2.HoughLinesP(masked_img, 1, (np.pi / 180), 100, np.array([]), minLineLength = 10, maxLineGap = 500)
#print(estimate.shape, lines.shape)
if lines is None:
lines = estimate
# https://www.youtube.com/watch?v=4zHbI-fFIlI watch at 1.5x lol
avg_lines = average_slope_intercept(frame, lines)
#print(avg_lines)
line_img = dispay_lines(frame, avg_lines)
img_frame = cv2.addWeighted(frame, 1, line_img, 0.8, 0)
cv2.imshow("colour_camera_frame", img_frame)
cv2.imshow("contoured", masked_img)
if cv2.waitKey(2) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "c3a7a8a006f717057a7ad2920f19d82842b04a85",
"index": 9510,
"step-1": "<mask token>\n\n\ndef canny(img):\n grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)\n canny_filtered = cv2.Canny(blurred_img, 30, 150)\n return canny_filtered\n\n\n<mask token>\n\n\ndef dispay_lines(img, lines):\n line_img = np.zeros_like(img)\n if lines is not None:\n for x1, y1, x2, y2 in lines:\n cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)\n return line_img\n\n\ndef get_cords(img, line_slope_int):\n slope, intercept = line_slope_int\n y1 = img.shape[0]\n y2 = int(y1 * (4 / 5))\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n height, width, _ = img.shape\n if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <\n 0 or y2 > height or y2 < 0):\n return np.array([0, 0, 0, 0])\n return np.array([x1, y1, x2, y2])\n\n\ndef average_slope_intercept(img, lines):\n left_fit = []\n right_fit = []\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)\n print(slope, intercept)\n if slope >= 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n if left_fit:\n left_fit_avg = np.average(left_fit, axis=0)\n left_line = get_cords(img, left_fit_avg)\n else:\n left_line = np.array([0, 0, 0, 0])\n if right_fit:\n right_fit_avg = np.average(right_fit, axis=0)\n right_line = get_cords(img, right_fit_avg)\n else:\n right_line = np.array([0, 0, 0, 0])\n return np.array([left_line, right_line])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef canny(img):\n grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)\n canny_filtered = cv2.Canny(blurred_img, 30, 150)\n return canny_filtered\n\n\ndef region_of_interest(img):\n height, width = img.shape\n height -= 60\n width -= 10\n Polygons = np.array([[(width, height), (50, height), (int(3 / 8 * width\n ), int(3 / 4 * height)), (int(5 / 8 * width), int(3 / 4 * height))]])\n mask = np.zeros_like(img)\n cv2.fillConvexPoly(img=mask, points=Polygons, color=255, lineType=cv2.\n LINE_AA)\n mask_img = cv2.bitwise_and(img, mask)\n return mask_img\n\n\ndef dispay_lines(img, lines):\n line_img = np.zeros_like(img)\n if lines is not None:\n for x1, y1, x2, y2 in lines:\n cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)\n return line_img\n\n\ndef get_cords(img, line_slope_int):\n slope, intercept = line_slope_int\n y1 = img.shape[0]\n y2 = int(y1 * (4 / 5))\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n height, width, _ = img.shape\n if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <\n 0 or y2 > height or y2 < 0):\n return np.array([0, 0, 0, 0])\n return np.array([x1, y1, x2, y2])\n\n\ndef average_slope_intercept(img, lines):\n left_fit = []\n right_fit = []\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)\n print(slope, intercept)\n if slope >= 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n if left_fit:\n left_fit_avg = np.average(left_fit, axis=0)\n left_line = get_cords(img, left_fit_avg)\n else:\n left_line = np.array([0, 0, 0, 0])\n if right_fit:\n right_fit_avg = np.average(right_fit, axis=0)\n right_line = get_cords(img, right_fit_avg)\n else:\n right_line = np.array([0, 0, 0, 0])\n return np.array([left_line, right_line])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef canny(img):\n grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)\n canny_filtered = cv2.Canny(blurred_img, 30, 150)\n return canny_filtered\n\n\ndef region_of_interest(img):\n height, width = img.shape\n height -= 60\n width -= 10\n Polygons = np.array([[(width, height), (50, height), (int(3 / 8 * width\n ), int(3 / 4 * height)), (int(5 / 8 * width), int(3 / 4 * height))]])\n mask = np.zeros_like(img)\n cv2.fillConvexPoly(img=mask, points=Polygons, color=255, lineType=cv2.\n LINE_AA)\n mask_img = cv2.bitwise_and(img, mask)\n return mask_img\n\n\ndef dispay_lines(img, lines):\n line_img = np.zeros_like(img)\n if lines is not None:\n for x1, y1, x2, y2 in lines:\n cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)\n return line_img\n\n\ndef get_cords(img, line_slope_int):\n slope, intercept = line_slope_int\n y1 = img.shape[0]\n y2 = int(y1 * (4 / 5))\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n height, width, _ = img.shape\n if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <\n 0 or y2 > height or y2 < 0):\n return np.array([0, 0, 0, 0])\n return np.array([x1, y1, x2, y2])\n\n\ndef average_slope_intercept(img, lines):\n left_fit = []\n right_fit = []\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)\n print(slope, intercept)\n if slope >= 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n if left_fit:\n left_fit_avg = np.average(left_fit, axis=0)\n left_line = get_cords(img, left_fit_avg)\n else:\n left_line = np.array([0, 0, 0, 0])\n if right_fit:\n right_fit_avg = np.average(right_fit, axis=0)\n right_line = get_cords(img, right_fit_avg)\n else:\n right_line = np.array([0, 0, 0, 0])\n return np.array([left_line, right_line])\n\n\nif __name__ == '__main__':\n cap = cv2.VideoCapture('./../Downloads/detect_lanes_from.mp4')\n lines = np.asarray((np.array([0, 0, 0, 0]), np.array([0, 0, 0, 0])))\n estimate = lines\n while cap.isOpened():\n _, frame = cap.read()\n canny_img = canny(frame)\n masked_img = region_of_interest(canny_img)\n estimate = lines\n lines = cv2.HoughLinesP(masked_img, 1, np.pi / 180, 100, np.array([\n ]), minLineLength=10, maxLineGap=500)\n if lines is None:\n lines = estimate\n avg_lines = average_slope_intercept(frame, lines)\n line_img = dispay_lines(frame, avg_lines)\n img_frame = cv2.addWeighted(frame, 1, line_img, 0.8, 0)\n cv2.imshow('colour_camera_frame', img_frame)\n cv2.imshow('contoured', masked_img)\n if cv2.waitKey(2) == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n<mask token>\n\n\ndef canny(img):\n grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)\n canny_filtered = cv2.Canny(blurred_img, 30, 150)\n return canny_filtered\n\n\ndef region_of_interest(img):\n height, width = img.shape\n height -= 60\n width -= 10\n Polygons = np.array([[(width, height), (50, height), (int(3 / 8 * width\n ), int(3 / 4 * height)), (int(5 / 8 * width), int(3 / 4 * height))]])\n mask = np.zeros_like(img)\n cv2.fillConvexPoly(img=mask, points=Polygons, color=255, lineType=cv2.\n LINE_AA)\n mask_img = cv2.bitwise_and(img, mask)\n return mask_img\n\n\ndef dispay_lines(img, lines):\n line_img = np.zeros_like(img)\n if lines is not None:\n for x1, y1, x2, y2 in lines:\n cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)\n return line_img\n\n\ndef get_cords(img, line_slope_int):\n slope, intercept = line_slope_int\n y1 = img.shape[0]\n y2 = int(y1 * (4 / 5))\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n height, width, _ = img.shape\n if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <\n 0 or y2 > height or y2 < 0):\n return np.array([0, 0, 0, 0])\n return np.array([x1, y1, x2, y2])\n\n\ndef average_slope_intercept(img, lines):\n left_fit = []\n right_fit = []\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)\n print(slope, intercept)\n if slope >= 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n if left_fit:\n left_fit_avg = np.average(left_fit, axis=0)\n left_line = get_cords(img, left_fit_avg)\n else:\n left_line = np.array([0, 0, 0, 0])\n if right_fit:\n right_fit_avg = np.average(right_fit, axis=0)\n right_line = get_cords(img, right_fit_avg)\n else:\n right_line = np.array([0, 0, 0, 0])\n return np.array([left_line, right_line])\n\n\nif __name__ == '__main__':\n cap = cv2.VideoCapture('./../Downloads/detect_lanes_from.mp4')\n lines = np.asarray((np.array([0, 0, 0, 0]), np.array([0, 0, 0, 0])))\n estimate = lines\n while cap.isOpened():\n _, frame = cap.read()\n canny_img = canny(frame)\n masked_img = region_of_interest(canny_img)\n estimate = lines\n lines = cv2.HoughLinesP(masked_img, 1, np.pi / 180, 100, np.array([\n ]), minLineLength=10, maxLineGap=500)\n if lines is None:\n lines = estimate\n avg_lines = average_slope_intercept(frame, lines)\n line_img = dispay_lines(frame, avg_lines)\n img_frame = cv2.addWeighted(frame, 1, line_img, 0.8, 0)\n cv2.imshow('colour_camera_frame', img_frame)\n cv2.imshow('contoured', masked_img)\n if cv2.waitKey(2) == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n'''\ndef diff_of_gaussians(img):\n\n grey_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n blur_img_grey = cv2.GaussianBlur(grey_img, (9,9), 0)\n blur_img_colour = cv2.GaussianBlur(img, (9,9), 0)\n\n #plt.figure(figsize = (20,2))\n #plt.imshow(blur_img_grey, cmap = 'gray')\n #plt.show()\n #plt.imshow(blur_img_colour)\n #plt.show()\n\n fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(nrows = 2, ncols = 2)\n edges_grey = cv2.Canny(grey_img,100,200)\n edges = cv2.Canny(img, 100, 200)\n #plt.subplot(411)\n ax1.imshow(edges_grey, cmap = 'gray')\n #plt.imshow(edges_grey, cmap = 'gray')\n #plt.show()\n #plt.subplot(412)\n ax2.imshow(edges);\n #plt.imshow(edges)\n #plt.subplot(421)\n ax3.imshow(canny(grey_img), cmap = 'gray')\n #plt.show()\n #plt.subplot(422)\n ax4.imshow(canny(img))\n #plt.show()\n\n plt.show()\n #plt.imshow(blur_img_grey - grey_img, cmap = 'gray')\n #plt.show()\n #plt.imshow(blur_img_colour - img)\n #plt.show()\n\n return\n'''\n\ndef canny(img):\n # changes in intensity are to be captured.\n # Canny and the Sobel operator work no greyscale images\n grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n # Gaussian Blurring to reduce noise, removes high-frequncy components in the image\n # High frequency due to high ISO of the camera, contours that aren't really edges.\n # https://www.youtube.com/watch?v=uihBwtPIBxM\n blurred_img = cv2.GaussianBlur(grey_img, (9,9), 0)\n\n # Canny Edge Detector, identifying any sharp changes in intesity, Uses edge-gradients\n # the strongest gradents are then traced\n # https://www.youtube.com/watch?v=sRFM5IEqR2w\n canny_filtered = cv2.Canny(blurred_img, 30, 150)\n return canny_filtered\n\ndef region_of_interest(img):\n height, width = img.shape\n height -= 60\n width -= 10\n #Reducing the image size to \"focus\" more on the center of the frame (region of interest)\n #These dimensions are later used in the generation of the mask\n #The reduction in height enables us to ignore the part of the image corresponding to the dashboard.\n\n #Coordinates marking our \"region of interest\"\n #The top-left of the image is (0,0)\n Polygons = np.array([\n [(width, height),(50, height), (int((3/8) * width), int((3/4) * height)),(int((5/8) * width), int((3/4) * height))]\n ])\n #(width, height),(50,height) removes what's visible of the dash of the car.\n mask = np.zeros_like(img)\n # filling mask\n cv2.fillConvexPoly(img = mask, points = Polygons, color = 255, lineType = cv2.LINE_AA)\n # Uncomment \"return mask\" to see the \"region of interest\" marked in white\n mask_img = cv2.bitwise_and(img, mask)\n # mask_img now has the detected edges in our region of interest.\n #return mask\n return mask_img\n\ndef dispay_lines(img, lines):\n line_img = np.zeros_like(img)\n if lines is not None:\n for x1,y1,x2,y2 in lines:\n cv2.line(line_img, (x1,y1), (x2,y2), (0,255,0), 30)\n return line_img\n\ndef get_cords(img, line_slope_int):\n slope, intercept = line_slope_int\n y1 = img.shape[0]\n #Line starts from the bottom left\n y2 = int(y1 * (4/5))\n # The line goes 1 fifth of the way up\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n #from y = mx + c\n #print(img.shape)\n height, width, _ = img.shape\n if x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 < 0 or y2 > height or y2 < 0:\n return np.array([0,0,0,0])\n return np.array([x1, y1, x2, y2])\n\ndef average_slope_intercept(img, lines):\n left_fit = []\n right_fit = []\n #if lines is None:\n # return (np.array([0,0,0,0]), np.array([0,0,0,0]))\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n slope, intercept = np.polyfit((x1,x2), (y1,y2), 1)\n #Linear least squares :) (not exactly but it's easy to think of it like this)\n print(slope, intercept)\n #left lines have a positive slope.\n if slope >= 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n\n if left_fit:\n left_fit_avg = np.average(left_fit, axis = 0)\n left_line = get_cords(img, left_fit_avg)\n else:\n left_line = np.array([0,0,0,0])\n if right_fit:\n right_fit_avg = np.average(right_fit, axis = 0)\n right_line = get_cords(img, right_fit_avg)\n else:\n right_line = np.array([0,0,0,0])\n\n return np.array([left_line, right_line])\n\nif __name__ == \"__main__\":\n cap = cv2.VideoCapture(\"./../Downloads/detect_lanes_from.mp4\")\n lines = np.asarray((np.array([0,0,0,0]), np.array([0,0,0,0])))\n estimate = lines\n while (cap.isOpened()):\n _, frame = cap.read()\n canny_img = canny(frame)\n masked_img = region_of_interest(canny_img)\n estimate = lines\n #print(len(estimate), len(lines))\n # Finding straight lines and therefore the lane lines --> Hough transform\n lines = cv2.HoughLinesP(masked_img, 1, (np.pi / 180), 100, np.array([]), minLineLength = 10, maxLineGap = 500)\n #print(estimate.shape, lines.shape)\n if lines is None:\n lines = estimate\n # https://www.youtube.com/watch?v=4zHbI-fFIlI watch at 1.5x lol\n avg_lines = average_slope_intercept(frame, lines)\n #print(avg_lines)\n line_img = dispay_lines(frame, avg_lines)\n img_frame = cv2.addWeighted(frame, 1, line_img, 0.8, 0)\n\n cv2.imshow(\"colour_camera_frame\", img_frame)\n cv2.imshow(\"contoured\", masked_img)\n if cv2.waitKey(2) == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
K_model.summary()
<|reserved_special_token_0|>
for line in k_file.readlines():
line = line.rstrip()
contents = line.split('\t')
label = contents.pop()
labels.append([float(label)])
features.append([float(i) for i in contents])
pass
<|reserved_special_token_0|>
for ins in range(len(labels)):
pred = K_model(np.array([features[ins]]).astype(np.float32))
MAE += abs(pred - labels[ins]) / len(labels)
pass
print(MAE)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
K_model = tf.keras.models.load_model('K_model.h5')
K_model.summary()
features, labels = [], []
k_file = open('ts.tab')
for line in k_file.readlines():
line = line.rstrip()
contents = line.split('\t')
label = contents.pop()
labels.append([float(label)])
features.append([float(i) for i in contents])
pass
MAE = 0
for ins in range(len(labels)):
pred = K_model(np.array([features[ins]]).astype(np.float32))
MAE += abs(pred - labels[ins]) / len(labels)
pass
print(MAE)
<|reserved_special_token_1|>
import numpy as np
import tensorflow as tf
K_model = tf.keras.models.load_model('K_model.h5')
K_model.summary()
features, labels = [], []
k_file = open('ts.tab')
for line in k_file.readlines():
line = line.rstrip()
contents = line.split('\t')
label = contents.pop()
labels.append([float(label)])
features.append([float(i) for i in contents])
pass
MAE = 0
for ins in range(len(labels)):
pred = K_model(np.array([features[ins]]).astype(np.float32))
MAE += abs(pred - labels[ins]) / len(labels)
pass
print(MAE)
<|reserved_special_token_1|>
import numpy as np
import tensorflow as tf
K_model = tf.keras.models.load_model('K_model.h5')
K_model.summary()
features, labels = [], []
# k_file = open('dataset_20200409.tab')
k_file = open('ts.tab')
for line in k_file.readlines():
line = line.rstrip()
contents = line.split("\t")
label = contents.pop()
labels.append([float(label)])
features.append([float(i) for i in contents])
pass
MAE = 0
for ins in range(len(labels)):
pred = K_model(np.array([features[ins]]).astype(np.float32))
MAE += abs(pred - labels[ins]) / len(labels)
pass
print(MAE)
|
flexible
|
{
"blob_id": "1c2a862f995869e3241dd835edb69399141bfb64",
"index": 8926,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nK_model.summary()\n<mask token>\nfor line in k_file.readlines():\n line = line.rstrip()\n contents = line.split('\\t')\n label = contents.pop()\n labels.append([float(label)])\n features.append([float(i) for i in contents])\npass\n<mask token>\nfor ins in range(len(labels)):\n pred = K_model(np.array([features[ins]]).astype(np.float32))\n MAE += abs(pred - labels[ins]) / len(labels)\npass\nprint(MAE)\n",
"step-3": "<mask token>\nK_model = tf.keras.models.load_model('K_model.h5')\nK_model.summary()\nfeatures, labels = [], []\nk_file = open('ts.tab')\nfor line in k_file.readlines():\n line = line.rstrip()\n contents = line.split('\\t')\n label = contents.pop()\n labels.append([float(label)])\n features.append([float(i) for i in contents])\npass\nMAE = 0\nfor ins in range(len(labels)):\n pred = K_model(np.array([features[ins]]).astype(np.float32))\n MAE += abs(pred - labels[ins]) / len(labels)\npass\nprint(MAE)\n",
"step-4": "import numpy as np\nimport tensorflow as tf\nK_model = tf.keras.models.load_model('K_model.h5')\nK_model.summary()\nfeatures, labels = [], []\nk_file = open('ts.tab')\nfor line in k_file.readlines():\n line = line.rstrip()\n contents = line.split('\\t')\n label = contents.pop()\n labels.append([float(label)])\n features.append([float(i) for i in contents])\npass\nMAE = 0\nfor ins in range(len(labels)):\n pred = K_model(np.array([features[ins]]).astype(np.float32))\n MAE += abs(pred - labels[ins]) / len(labels)\npass\nprint(MAE)\n",
"step-5": "import numpy as np \nimport tensorflow as tf\n\nK_model = tf.keras.models.load_model('K_model.h5')\nK_model.summary()\n\nfeatures, labels = [], []\n# k_file = open('dataset_20200409.tab')\nk_file = open('ts.tab')\nfor line in k_file.readlines():\n line = line.rstrip()\n contents = line.split(\"\\t\")\n label = contents.pop()\n labels.append([float(label)])\n features.append([float(i) for i in contents])\npass \n\nMAE = 0\nfor ins in range(len(labels)):\n pred = K_model(np.array([features[ins]]).astype(np.float32))\n MAE += abs(pred - labels[ins]) / len(labels)\npass \nprint(MAE)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_task(*_):
env = normalize(GymEnv('DartWalker2d-v1', record_video=False))
policy_sep = GaussianHLCPolicy(env_spec=env.spec, hidden_sizes=(64, 32),
sub_out_dim=3, option_dim=2)
policy_sep = joblib.load(
'data/local/experiment/Walker2d_hlc_2/policy_0.pkl')
"""# copy parameter from integrated controller to separate controller
hrl_pol_param = policy_int._mean_network.get_params()
hlc_param = policy_sep._mean_network.get_params()
llc_param = policy_sep._lowlevelnetwork.get_params()
for param in hlc_param:
for hrl_param in hrl_pol_param:
if param.name == hrl_param.name:
param.set_value(hrl_param.get_value(borrow=True))
for param in llc_param:
for hrl_param in hrl_pol_param:
if param.name == hrl_param.name:
param.set_value(hrl_param.get_value(borrow=True))"""
baseline = LinearFeatureBaseline(env_spec=env.spec)
"""o = np.random.random(17)*0
o[0]=1.25
a, ainfo = policy_int.get_action(o)
a2, a2info = policy_sep.get_action(o)
action1 = ainfo['mean']
action2 = policy_sep.lowlevel_action(o, a2)
print(action1)
print(action2)
abc"""
algo2 = TRPO(env=env, policy=policy_sep, baseline=baseline, batch_size=
15000, max_path_length=env.horizon, n_itr=200, discount=0.99,
step_size=0.01, epopt_epsilon=1.0, epopt_after_iter=0)
algo2.train()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def run_task(*_):
env = normalize(GymEnv('DartWalker2d-v1', record_video=False))
policy_sep = GaussianHLCPolicy(env_spec=env.spec, hidden_sizes=(64, 32),
sub_out_dim=3, option_dim=2)
policy_sep = joblib.load(
'data/local/experiment/Walker2d_hlc_2/policy_0.pkl')
"""# copy parameter from integrated controller to separate controller
hrl_pol_param = policy_int._mean_network.get_params()
hlc_param = policy_sep._mean_network.get_params()
llc_param = policy_sep._lowlevelnetwork.get_params()
for param in hlc_param:
for hrl_param in hrl_pol_param:
if param.name == hrl_param.name:
param.set_value(hrl_param.get_value(borrow=True))
for param in llc_param:
for hrl_param in hrl_pol_param:
if param.name == hrl_param.name:
param.set_value(hrl_param.get_value(borrow=True))"""
baseline = LinearFeatureBaseline(env_spec=env.spec)
"""o = np.random.random(17)*0
o[0]=1.25
a, ainfo = policy_int.get_action(o)
a2, a2info = policy_sep.get_action(o)
action1 = ainfo['mean']
action2 = policy_sep.lowlevel_action(o, a2)
print(action1)
print(action2)
abc"""
algo2 = TRPO(env=env, policy=policy_sep, baseline=baseline, batch_size=
15000, max_path_length=env.horizon, n_itr=200, discount=0.99,
step_size=0.01, epopt_epsilon=1.0, epopt_after_iter=0)
algo2.train()
run_experiment_lite(run_task, n_parallel=2, snapshot_mode='last', seed=1,
exp_name='Walker2d_hlc_cont')
<|reserved_special_token_1|>
from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.policies.gaussian_rbf_policy import GaussianRBFPolicy
from rllab.policies.gaussian_hmlp_policy import GaussianHMLPPolicy
from rllab.policies.gaussian_hlc_policy import GaussianHLCPolicy
import numpy as np
import joblib
def run_task(*_):
env = normalize(GymEnv('DartWalker2d-v1', record_video=False))
policy_sep = GaussianHLCPolicy(env_spec=env.spec, hidden_sizes=(64, 32),
sub_out_dim=3, option_dim=2)
policy_sep = joblib.load(
'data/local/experiment/Walker2d_hlc_2/policy_0.pkl')
"""# copy parameter from integrated controller to separate controller
hrl_pol_param = policy_int._mean_network.get_params()
hlc_param = policy_sep._mean_network.get_params()
llc_param = policy_sep._lowlevelnetwork.get_params()
for param in hlc_param:
for hrl_param in hrl_pol_param:
if param.name == hrl_param.name:
param.set_value(hrl_param.get_value(borrow=True))
for param in llc_param:
for hrl_param in hrl_pol_param:
if param.name == hrl_param.name:
param.set_value(hrl_param.get_value(borrow=True))"""
baseline = LinearFeatureBaseline(env_spec=env.spec)
"""o = np.random.random(17)*0
o[0]=1.25
a, ainfo = policy_int.get_action(o)
a2, a2info = policy_sep.get_action(o)
action1 = ainfo['mean']
action2 = policy_sep.lowlevel_action(o, a2)
print(action1)
print(action2)
abc"""
algo2 = TRPO(env=env, policy=policy_sep, baseline=baseline, batch_size=
15000, max_path_length=env.horizon, n_itr=200, discount=0.99,
step_size=0.01, epopt_epsilon=1.0, epopt_after_iter=0)
algo2.train()
run_experiment_lite(run_task, n_parallel=2, snapshot_mode='last', seed=1,
exp_name='Walker2d_hlc_cont')
<|reserved_special_token_1|>
from rllab.algos.trpo import TRPO
from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline
from rllab.envs.gym_env import GymEnv
from rllab.envs.normalized_env import normalize
from rllab.misc.instrument import run_experiment_lite
from rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy
from rllab.policies.gaussian_rbf_policy import GaussianRBFPolicy
from rllab.policies.gaussian_hmlp_policy import GaussianHMLPPolicy
from rllab.policies.gaussian_hlc_policy import GaussianHLCPolicy
import numpy as np
import joblib
def run_task(*_):
env = normalize(GymEnv("DartWalker2d-v1", record_video=False))
policy_sep = GaussianHLCPolicy(
env_spec=env.spec,
# The neural network policy should have two hidden layers, each with 32 hidden units.
hidden_sizes=(64,32),
sub_out_dim=3,
option_dim=2,
#init_std=0.1,
)
policy_sep = joblib.load('data/local/experiment/Walker2d_hlc_2/policy_0.pkl')
'''# copy parameter from integrated controller to separate controller
hrl_pol_param = policy_int._mean_network.get_params()
hlc_param = policy_sep._mean_network.get_params()
llc_param = policy_sep._lowlevelnetwork.get_params()
for param in hlc_param:
for hrl_param in hrl_pol_param:
if param.name == hrl_param.name:
param.set_value(hrl_param.get_value(borrow=True))
for param in llc_param:
for hrl_param in hrl_pol_param:
if param.name == hrl_param.name:
param.set_value(hrl_param.get_value(borrow=True))'''
baseline = LinearFeatureBaseline(env_spec=env.spec)
'''o = np.random.random(17)*0
o[0]=1.25
a, ainfo = policy_int.get_action(o)
a2, a2info = policy_sep.get_action(o)
action1 = ainfo['mean']
action2 = policy_sep.lowlevel_action(o, a2)
print(action1)
print(action2)
abc'''
algo2 = TRPO(
env=env,
policy=policy_sep,
baseline=baseline,
batch_size=15000,
max_path_length=env.horizon,
n_itr=200,
discount=0.99,
step_size=0.01,
epopt_epsilon = 1.0,
epopt_after_iter = 0,
# Uncomment both lines (this and the plot parameter below) to enable plotting
# plot=True,
)
algo2.train()
run_experiment_lite(
run_task,
# Number of parallel workers for sampling
n_parallel=2,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
# Specifies the seed for the experiment. If this is not provided, a random seed
# will be used
seed=1,
exp_name='Walker2d_hlc_cont',
# plot=True
)
|
flexible
|
{
"blob_id": "9f479ad2acf4f6deb0ca4db606c3d804979c10bd",
"index": 3804,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_task(*_):\n env = normalize(GymEnv('DartWalker2d-v1', record_video=False))\n policy_sep = GaussianHLCPolicy(env_spec=env.spec, hidden_sizes=(64, 32),\n sub_out_dim=3, option_dim=2)\n policy_sep = joblib.load(\n 'data/local/experiment/Walker2d_hlc_2/policy_0.pkl')\n \"\"\"# copy parameter from integrated controller to separate controller\n hrl_pol_param = policy_int._mean_network.get_params()\n hlc_param = policy_sep._mean_network.get_params()\n llc_param = policy_sep._lowlevelnetwork.get_params()\n\n for param in hlc_param:\n for hrl_param in hrl_pol_param:\n if param.name == hrl_param.name:\n param.set_value(hrl_param.get_value(borrow=True))\n\n for param in llc_param:\n for hrl_param in hrl_pol_param:\n if param.name == hrl_param.name:\n param.set_value(hrl_param.get_value(borrow=True))\"\"\"\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n \"\"\"o = np.random.random(17)*0\n o[0]=1.25\n a, ainfo = policy_int.get_action(o)\n a2, a2info = policy_sep.get_action(o)\n action1 = ainfo['mean']\n action2 = policy_sep.lowlevel_action(o, a2)\n print(action1)\n print(action2)\n abc\"\"\"\n algo2 = TRPO(env=env, policy=policy_sep, baseline=baseline, batch_size=\n 15000, max_path_length=env.horizon, n_itr=200, discount=0.99,\n step_size=0.01, epopt_epsilon=1.0, epopt_after_iter=0)\n algo2.train()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run_task(*_):\n env = normalize(GymEnv('DartWalker2d-v1', record_video=False))\n policy_sep = GaussianHLCPolicy(env_spec=env.spec, hidden_sizes=(64, 32),\n sub_out_dim=3, option_dim=2)\n policy_sep = joblib.load(\n 'data/local/experiment/Walker2d_hlc_2/policy_0.pkl')\n \"\"\"# copy parameter from integrated controller to separate controller\n hrl_pol_param = policy_int._mean_network.get_params()\n hlc_param = policy_sep._mean_network.get_params()\n llc_param = policy_sep._lowlevelnetwork.get_params()\n\n for param in hlc_param:\n for hrl_param in hrl_pol_param:\n if param.name == hrl_param.name:\n param.set_value(hrl_param.get_value(borrow=True))\n\n for param in llc_param:\n for hrl_param in hrl_pol_param:\n if param.name == hrl_param.name:\n param.set_value(hrl_param.get_value(borrow=True))\"\"\"\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n \"\"\"o = np.random.random(17)*0\n o[0]=1.25\n a, ainfo = policy_int.get_action(o)\n a2, a2info = policy_sep.get_action(o)\n action1 = ainfo['mean']\n action2 = policy_sep.lowlevel_action(o, a2)\n print(action1)\n print(action2)\n abc\"\"\"\n algo2 = TRPO(env=env, policy=policy_sep, baseline=baseline, batch_size=\n 15000, max_path_length=env.horizon, n_itr=200, discount=0.99,\n step_size=0.01, epopt_epsilon=1.0, epopt_after_iter=0)\n algo2.train()\n\n\nrun_experiment_lite(run_task, n_parallel=2, snapshot_mode='last', seed=1,\n exp_name='Walker2d_hlc_cont')\n",
"step-4": "from rllab.algos.trpo import TRPO\nfrom rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom rllab.envs.gym_env import GymEnv\nfrom rllab.envs.normalized_env import normalize\nfrom rllab.misc.instrument import run_experiment_lite\nfrom rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy\nfrom rllab.policies.gaussian_rbf_policy import GaussianRBFPolicy\nfrom rllab.policies.gaussian_hmlp_policy import GaussianHMLPPolicy\nfrom rllab.policies.gaussian_hlc_policy import GaussianHLCPolicy\nimport numpy as np\nimport joblib\n\n\ndef run_task(*_):\n env = normalize(GymEnv('DartWalker2d-v1', record_video=False))\n policy_sep = GaussianHLCPolicy(env_spec=env.spec, hidden_sizes=(64, 32),\n sub_out_dim=3, option_dim=2)\n policy_sep = joblib.load(\n 'data/local/experiment/Walker2d_hlc_2/policy_0.pkl')\n \"\"\"# copy parameter from integrated controller to separate controller\n hrl_pol_param = policy_int._mean_network.get_params()\n hlc_param = policy_sep._mean_network.get_params()\n llc_param = policy_sep._lowlevelnetwork.get_params()\n\n for param in hlc_param:\n for hrl_param in hrl_pol_param:\n if param.name == hrl_param.name:\n param.set_value(hrl_param.get_value(borrow=True))\n\n for param in llc_param:\n for hrl_param in hrl_pol_param:\n if param.name == hrl_param.name:\n param.set_value(hrl_param.get_value(borrow=True))\"\"\"\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n \"\"\"o = np.random.random(17)*0\n o[0]=1.25\n a, ainfo = policy_int.get_action(o)\n a2, a2info = policy_sep.get_action(o)\n action1 = ainfo['mean']\n action2 = policy_sep.lowlevel_action(o, a2)\n print(action1)\n print(action2)\n abc\"\"\"\n algo2 = TRPO(env=env, policy=policy_sep, baseline=baseline, batch_size=\n 15000, max_path_length=env.horizon, n_itr=200, discount=0.99,\n step_size=0.01, epopt_epsilon=1.0, epopt_after_iter=0)\n algo2.train()\n\n\nrun_experiment_lite(run_task, n_parallel=2, snapshot_mode='last', seed=1,\n exp_name='Walker2d_hlc_cont')\n",
"step-5": "from rllab.algos.trpo import TRPO\nfrom rllab.baselines.linear_feature_baseline import LinearFeatureBaseline\nfrom rllab.envs.gym_env import GymEnv\nfrom rllab.envs.normalized_env import normalize\nfrom rllab.misc.instrument import run_experiment_lite\nfrom rllab.policies.gaussian_mlp_policy import GaussianMLPPolicy\nfrom rllab.policies.gaussian_rbf_policy import GaussianRBFPolicy\nfrom rllab.policies.gaussian_hmlp_policy import GaussianHMLPPolicy\nfrom rllab.policies.gaussian_hlc_policy import GaussianHLCPolicy\n\nimport numpy as np\nimport joblib\n\ndef run_task(*_):\n env = normalize(GymEnv(\"DartWalker2d-v1\", record_video=False))\n\n policy_sep = GaussianHLCPolicy(\n env_spec=env.spec,\n # The neural network policy should have two hidden layers, each with 32 hidden units.\n hidden_sizes=(64,32),\n sub_out_dim=3,\n option_dim=2,\n #init_std=0.1,\n )\n\n policy_sep = joblib.load('data/local/experiment/Walker2d_hlc_2/policy_0.pkl')\n\n '''# copy parameter from integrated controller to separate controller\n hrl_pol_param = policy_int._mean_network.get_params()\n hlc_param = policy_sep._mean_network.get_params()\n llc_param = policy_sep._lowlevelnetwork.get_params()\n\n for param in hlc_param:\n for hrl_param in hrl_pol_param:\n if param.name == hrl_param.name:\n param.set_value(hrl_param.get_value(borrow=True))\n\n for param in llc_param:\n for hrl_param in hrl_pol_param:\n if param.name == hrl_param.name:\n param.set_value(hrl_param.get_value(borrow=True))'''\n\n\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n\n '''o = np.random.random(17)*0\n o[0]=1.25\n a, ainfo = policy_int.get_action(o)\n a2, a2info = policy_sep.get_action(o)\n action1 = ainfo['mean']\n action2 = policy_sep.lowlevel_action(o, a2)\n print(action1)\n print(action2)\n abc'''\n\n algo2 = TRPO(\n env=env,\n policy=policy_sep,\n baseline=baseline,\n batch_size=15000,\n max_path_length=env.horizon,\n n_itr=200,\n discount=0.99,\n step_size=0.01,\n epopt_epsilon = 1.0,\n epopt_after_iter = 0,\n # Uncomment both lines (this and the plot parameter below) to enable plotting\n # plot=True,\n )\n algo2.train()\n\n\nrun_experiment_lite(\n run_task,\n # Number of parallel workers for sampling\n n_parallel=2,\n # Only keep the snapshot parameters for the last iteration\n snapshot_mode=\"last\",\n # Specifies the seed for the experiment. If this is not provided, a random seed\n # will be used\n seed=1,\n exp_name='Walker2d_hlc_cont',\n # plot=True\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def blur():
image = cv2.imread(IMG_PATH + '/jjang.jpg')
kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]
filter_imgs = {}
blur_imgs = {}
for ksize in kernel_sizes:
title = f'ksize: {ksize}'
kernel = np.ones(ksize)
kernel /= kernel.size
filter_imgs[title] = cv2.filter2D(image, -1, kernel)
blur_imgs[title] = cv2.blur(image, ksize)
resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)
resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)
def gaussian():
image = cv2.imread(IMG_PATH + '/jjang.jpg')
kernel_size = 5, 5
blur_imgs = {}
blur_imgs['original'] = image
blur_imgs['blur'] = cv2.blur(image, kernel_size)
blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)
result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def blur():
image = cv2.imread(IMG_PATH + '/jjang.jpg')
kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]
filter_imgs = {}
blur_imgs = {}
for ksize in kernel_sizes:
title = f'ksize: {ksize}'
kernel = np.ones(ksize)
kernel /= kernel.size
filter_imgs[title] = cv2.filter2D(image, -1, kernel)
blur_imgs[title] = cv2.blur(image, ksize)
resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)
resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)
def gaussian():
image = cv2.imread(IMG_PATH + '/jjang.jpg')
kernel_size = 5, 5
blur_imgs = {}
blur_imgs['original'] = image
blur_imgs['blur'] = cv2.blur(image, kernel_size)
blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)
result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)
def bilateral():
image = cv2.imread(IMG_PATH + '/jjang.jpg')
kernel_size = 5, 5
blur_imgs = {}
blur_imgs['original'] = image
blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)
blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)
blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150
)
result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)
if __name__ == '__main__':
bilateral()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
IMG_PATH = '../sample_imgs'
def blur():
image = cv2.imread(IMG_PATH + '/jjang.jpg')
kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]
filter_imgs = {}
blur_imgs = {}
for ksize in kernel_sizes:
title = f'ksize: {ksize}'
kernel = np.ones(ksize)
kernel /= kernel.size
filter_imgs[title] = cv2.filter2D(image, -1, kernel)
blur_imgs[title] = cv2.blur(image, ksize)
resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)
resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)
def gaussian():
image = cv2.imread(IMG_PATH + '/jjang.jpg')
kernel_size = 5, 5
blur_imgs = {}
blur_imgs['original'] = image
blur_imgs['blur'] = cv2.blur(image, kernel_size)
blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)
result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)
def bilateral():
image = cv2.imread(IMG_PATH + '/jjang.jpg')
kernel_size = 5, 5
blur_imgs = {}
blur_imgs['original'] = image
blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)
blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)
blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150
)
result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)
if __name__ == '__main__':
bilateral()
<|reserved_special_token_1|>
import cv2
import numpy as np
import show_imgs as si
IMG_PATH = '../sample_imgs'
def blur():
image = cv2.imread(IMG_PATH + '/jjang.jpg')
kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]
filter_imgs = {}
blur_imgs = {}
for ksize in kernel_sizes:
title = f'ksize: {ksize}'
kernel = np.ones(ksize)
kernel /= kernel.size
filter_imgs[title] = cv2.filter2D(image, -1, kernel)
blur_imgs[title] = cv2.blur(image, ksize)
resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)
resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)
def gaussian():
image = cv2.imread(IMG_PATH + '/jjang.jpg')
kernel_size = 5, 5
blur_imgs = {}
blur_imgs['original'] = image
blur_imgs['blur'] = cv2.blur(image, kernel_size)
blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)
result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)
def bilateral():
image = cv2.imread(IMG_PATH + '/jjang.jpg')
kernel_size = 5, 5
blur_imgs = {}
blur_imgs['original'] = image
blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)
blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)
blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150
)
result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)
if __name__ == '__main__':
bilateral()
<|reserved_special_token_1|>
import cv2
import numpy as np
import show_imgs as si
IMG_PATH = "../sample_imgs"
def blur():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]
filter_imgs = {}
blur_imgs = {}
for ksize in kernel_sizes:
title = f"ksize: {ksize}"
kernel = np.ones(ksize)
kernel /= kernel.size
filter_imgs[title] = cv2.filter2D(image, -1, kernel)
blur_imgs[title] = cv2.blur(image, ksize)
resimg = si.show_imgs(filter_imgs, "cv2.filter2D", 3)
resimg = si.show_imgs(blur_imgs, "cv2.blur", 3)
def gaussian():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_size = (5, 5)
blur_imgs = {}
blur_imgs["original"] = image
blur_imgs["blur"] = cv2.blur(image, kernel_size)
blur_imgs["GaussianBlur"] = cv2.GaussianBlur(image, kernel_size, 0)
result_img = si.show_imgs(blur_imgs, "GaussianBlur", 3, 1000)
def bilateral():
image = cv2.imread(IMG_PATH + "/jjang.jpg")
kernel_size = (5, 5)
blur_imgs = {}
blur_imgs["original"] = image
blur_imgs["gaussian"] = cv2.GaussianBlur(image, kernel_size, 0)
blur_imgs["bilateral (5,50,50)"] = cv2.bilateralFilter(image, 5, 50, 50)
blur_imgs["bilateral (5,150,150)"] = cv2.bilateralFilter(image, 5, 150, 150)
result_img = si.show_imgs(blur_imgs, "Bilateral Filter", 2)
if __name__ == "__main__":
# gaussian()
bilateral()
|
flexible
|
{
"blob_id": "8e5d05d925d47a85ad7c211f26af7951be048d32",
"index": 9351,
"step-1": "<mask token>\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150\n )\n result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)\n\n\nif __name__ == '__main__':\n bilateral()\n",
"step-3": "<mask token>\nIMG_PATH = '../sample_imgs'\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150\n )\n result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)\n\n\nif __name__ == '__main__':\n bilateral()\n",
"step-4": "import cv2\nimport numpy as np\nimport show_imgs as si\nIMG_PATH = '../sample_imgs'\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f'ksize: {ksize}'\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, 'cv2.filter2D', 3)\n resimg = si.show_imgs(blur_imgs, 'cv2.blur', 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['blur'] = cv2.blur(image, kernel_size)\n blur_imgs['GaussianBlur'] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, 'GaussianBlur', 3, 1000)\n\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + '/jjang.jpg')\n kernel_size = 5, 5\n blur_imgs = {}\n blur_imgs['original'] = image\n blur_imgs['gaussian'] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs['bilateral (5,50,50)'] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs['bilateral (5,150,150)'] = cv2.bilateralFilter(image, 5, 150, 150\n )\n result_img = si.show_imgs(blur_imgs, 'Bilateral Filter', 2)\n\n\nif __name__ == '__main__':\n bilateral()\n",
"step-5": "import cv2\nimport numpy as np\nimport show_imgs as si\nIMG_PATH = \"../sample_imgs\"\n\n\ndef blur():\n image = cv2.imread(IMG_PATH + \"/jjang.jpg\")\n kernel_sizes = [(1, 1), (3, 3), (5, 5), (7, 7), (7, 1), (1, 7)]\n filter_imgs = {}\n blur_imgs = {}\n for ksize in kernel_sizes:\n title = f\"ksize: {ksize}\"\n kernel = np.ones(ksize)\n kernel /= kernel.size\n filter_imgs[title] = cv2.filter2D(image, -1, kernel)\n blur_imgs[title] = cv2.blur(image, ksize)\n resimg = si.show_imgs(filter_imgs, \"cv2.filter2D\", 3)\n resimg = si.show_imgs(blur_imgs, \"cv2.blur\", 3)\n\n\ndef gaussian():\n image = cv2.imread(IMG_PATH + \"/jjang.jpg\")\n kernel_size = (5, 5)\n blur_imgs = {}\n blur_imgs[\"original\"] = image\n blur_imgs[\"blur\"] = cv2.blur(image, kernel_size)\n blur_imgs[\"GaussianBlur\"] = cv2.GaussianBlur(image, kernel_size, 0)\n result_img = si.show_imgs(blur_imgs, \"GaussianBlur\", 3, 1000)\n\ndef bilateral():\n image = cv2.imread(IMG_PATH + \"/jjang.jpg\")\n kernel_size = (5, 5)\n blur_imgs = {}\n blur_imgs[\"original\"] = image\n blur_imgs[\"gaussian\"] = cv2.GaussianBlur(image, kernel_size, 0)\n blur_imgs[\"bilateral (5,50,50)\"] = cv2.bilateralFilter(image, 5, 50, 50)\n blur_imgs[\"bilateral (5,150,150)\"] = cv2.bilateralFilter(image, 5, 150, 150)\n result_img = si.show_imgs(blur_imgs, \"Bilateral Filter\", 2)\n\n\n\nif __name__ == \"__main__\":\n # gaussian()\n bilateral()\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
version https://git-lfs.github.com/spec/v1
oid sha256:91f725dc0dba902c5c2c91c065346ab402c8bdbf4b5b13bdaec6773df5d06e49
size 964
|
normal
|
{
"blob_id": "42187f460a64572d2581ed5baec41eaff47466f8",
"index": 8672,
"step-1": "version https://git-lfs.github.com/spec/v1\noid sha256:91f725dc0dba902c5c2c91c065346ab402c8bdbf4b5b13bdaec6773df5d06e49\nsize 964\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import graphics
from graphics import *
class Renderer():
def __init__(self, engine, width=700, height=600):
self.width = width
self.height = height
self.engine = engine
self.win = GraphWin("Game Board", width, height)
self.win.setBackground("blue")
def update(self):
self.win.update()
def get_window(self):
return(self.win)
def get_width(self):
return self.width
def draw_board(self):
for i in range(0, 6): #Determines size of terrain
horLines = Line(Point(0, i*self.height/6),Point(self.width, i*self.height/6))
horLines.setOutline('black')
horLines.draw(self.win)
for j in range(0, 7):
verLines = Line(Point(j*self.width/7, 0),Point(j*self.width/7, self.height))
verLines.setOutline('black')
verLines.draw(self.win)
for y in range(0,6):
for x in range(0,7):
slot = Circle(Point(x*self.width/7+50,y*self.height/6+50),37.5)
slot.setFill("white")
slot.draw(self.win)
def update_pieces(self,x,y,color):
board = self.engine.get_board()
pointY = y*self.height/6
pointX = x*self.width/7
piece = Circle(Point(pointX+50,pointY+50),37.5)
if color == 'r':
piece.setFill("red")
else:
piece.setFill("black")
piece.draw(self.win)
def end(self):
self.get_window().close()
class Menu(): #CHANGE TO SELF. WIDTH AND HIEGHT
def __init__(self,window):
self.window = window
skyBlue = color_rgb(135,206,250)
royalBlue = color_rgb(65,105,225)
self.menu = Rectangle(Point(.2*500,.15*500),Point(.8*500,.8*500))
self.menu.setFill(skyBlue)
self.menu.setOutline(skyBlue)
self.save = Rectangle(Point(.25*500,.2*500),Point(.75*500,.35*500))
self.save.setOutline(royalBlue)
self.save.setFill(royalBlue)
self.saveTxt = Text(Point(.50*500,.275*500), "SAVE")
self.saveTxt.setSize(30)
self.saveTxt.setFace("helvetica")
self.saveTxt.setStyle("bold")
self.load = Rectangle(Point(.25*500,.4*500),Point(.75*500,.55*500))
self.load.setOutline(royalBlue)
self.load.setFill(royalBlue)
self.loadTxt = Text(Point(.50*500,.475*500), "LOAD")
self.loadTxt.setSize(30)
self.loadTxt.setFace("helvetica")
self.loadTxt.setStyle("bold")
self.quit = Rectangle(Point(.25*500,.6*500),Point(.75*500,.75*500))
self.quit.setOutline(royalBlue)
self.quit.setFill(royalBlue)
self.quitTxt = Text(Point(.50*500,.675*500), "QUIT")
self.quitTxt.setSize(30)
self.quitTxt.setFace("helvetica")
self.quitTxt.setStyle("bold")
def openMenu(self):
self.menu.draw(self.window)
self.save.draw(self.window)
self.saveTxt.draw(self.window)
self.load.draw(self.window)
self.loadTxt.draw(self.window)
self.quit.draw(self.window)
self.quitTxt.draw(self.window)
def closeMenu(self):
self.menu.undraw()
self.save.undraw()
self.saveTxt.undraw()
self.load.undraw()
self.loadTxt.undraw()
self.quit.undraw()
self.quitTxt.undraw()
|
normal
|
{
"blob_id": "85a3682f144f02aa412d45c901f76c65de2e816d",
"index": 5599,
"step-1": "<mask token>\n\n\nclass Renderer:\n <mask token>\n <mask token>\n <mask token>\n\n def get_width(self):\n return self.width\n\n def draw_board(self):\n for i in range(0, 6):\n horLines = Line(Point(0, i * self.height / 6), Point(self.width,\n i * self.height / 6))\n horLines.setOutline('black')\n horLines.draw(self.win)\n for j in range(0, 7):\n verLines = Line(Point(j * self.width / 7, 0), Point(j * self.\n width / 7, self.height))\n verLines.setOutline('black')\n verLines.draw(self.win)\n for y in range(0, 6):\n for x in range(0, 7):\n slot = Circle(Point(x * self.width / 7 + 50, y * self.\n height / 6 + 50), 37.5)\n slot.setFill('white')\n slot.draw(self.win)\n <mask token>\n\n def end(self):\n self.get_window().close()\n\n\nclass Menu:\n\n def __init__(self, window):\n self.window = window\n skyBlue = color_rgb(135, 206, 250)\n royalBlue = color_rgb(65, 105, 225)\n self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,\n 0.8 * 500))\n self.menu.setFill(skyBlue)\n self.menu.setOutline(skyBlue)\n self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 * \n 500, 0.35 * 500))\n self.save.setOutline(royalBlue)\n self.save.setFill(royalBlue)\n self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')\n self.saveTxt.setSize(30)\n self.saveTxt.setFace('helvetica')\n self.saveTxt.setStyle('bold')\n self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 * \n 500, 0.55 * 500))\n self.load.setOutline(royalBlue)\n self.load.setFill(royalBlue)\n self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')\n self.loadTxt.setSize(30)\n self.loadTxt.setFace('helvetica')\n self.loadTxt.setStyle('bold')\n self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 * \n 500, 0.75 * 500))\n self.quit.setOutline(royalBlue)\n self.quit.setFill(royalBlue)\n self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')\n self.quitTxt.setSize(30)\n self.quitTxt.setFace('helvetica')\n self.quitTxt.setStyle('bold')\n\n def openMenu(self):\n self.menu.draw(self.window)\n self.save.draw(self.window)\n self.saveTxt.draw(self.window)\n self.load.draw(self.window)\n self.loadTxt.draw(self.window)\n self.quit.draw(self.window)\n self.quitTxt.draw(self.window)\n\n def closeMenu(self):\n self.menu.undraw()\n self.save.undraw()\n self.saveTxt.undraw()\n self.load.undraw()\n self.loadTxt.undraw()\n self.quit.undraw()\n self.quitTxt.undraw()\n",
"step-2": "<mask token>\n\n\nclass Renderer:\n <mask token>\n <mask token>\n <mask token>\n\n def get_width(self):\n return self.width\n\n def draw_board(self):\n for i in range(0, 6):\n horLines = Line(Point(0, i * self.height / 6), Point(self.width,\n i * self.height / 6))\n horLines.setOutline('black')\n horLines.draw(self.win)\n for j in range(0, 7):\n verLines = Line(Point(j * self.width / 7, 0), Point(j * self.\n width / 7, self.height))\n verLines.setOutline('black')\n verLines.draw(self.win)\n for y in range(0, 6):\n for x in range(0, 7):\n slot = Circle(Point(x * self.width / 7 + 50, y * self.\n height / 6 + 50), 37.5)\n slot.setFill('white')\n slot.draw(self.win)\n\n def update_pieces(self, x, y, color):\n board = self.engine.get_board()\n pointY = y * self.height / 6\n pointX = x * self.width / 7\n piece = Circle(Point(pointX + 50, pointY + 50), 37.5)\n if color == 'r':\n piece.setFill('red')\n else:\n piece.setFill('black')\n piece.draw(self.win)\n\n def end(self):\n self.get_window().close()\n\n\nclass Menu:\n\n def __init__(self, window):\n self.window = window\n skyBlue = color_rgb(135, 206, 250)\n royalBlue = color_rgb(65, 105, 225)\n self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,\n 0.8 * 500))\n self.menu.setFill(skyBlue)\n self.menu.setOutline(skyBlue)\n self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 * \n 500, 0.35 * 500))\n self.save.setOutline(royalBlue)\n self.save.setFill(royalBlue)\n self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')\n self.saveTxt.setSize(30)\n self.saveTxt.setFace('helvetica')\n self.saveTxt.setStyle('bold')\n self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 * \n 500, 0.55 * 500))\n self.load.setOutline(royalBlue)\n self.load.setFill(royalBlue)\n self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')\n self.loadTxt.setSize(30)\n self.loadTxt.setFace('helvetica')\n self.loadTxt.setStyle('bold')\n self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 * \n 500, 0.75 * 500))\n self.quit.setOutline(royalBlue)\n self.quit.setFill(royalBlue)\n self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')\n self.quitTxt.setSize(30)\n self.quitTxt.setFace('helvetica')\n self.quitTxt.setStyle('bold')\n\n def openMenu(self):\n self.menu.draw(self.window)\n self.save.draw(self.window)\n self.saveTxt.draw(self.window)\n self.load.draw(self.window)\n self.loadTxt.draw(self.window)\n self.quit.draw(self.window)\n self.quitTxt.draw(self.window)\n\n def closeMenu(self):\n self.menu.undraw()\n self.save.undraw()\n self.saveTxt.undraw()\n self.load.undraw()\n self.loadTxt.undraw()\n self.quit.undraw()\n self.quitTxt.undraw()\n",
"step-3": "<mask token>\n\n\nclass Renderer:\n\n def __init__(self, engine, width=700, height=600):\n self.width = width\n self.height = height\n self.engine = engine\n self.win = GraphWin('Game Board', width, height)\n self.win.setBackground('blue')\n <mask token>\n <mask token>\n\n def get_width(self):\n return self.width\n\n def draw_board(self):\n for i in range(0, 6):\n horLines = Line(Point(0, i * self.height / 6), Point(self.width,\n i * self.height / 6))\n horLines.setOutline('black')\n horLines.draw(self.win)\n for j in range(0, 7):\n verLines = Line(Point(j * self.width / 7, 0), Point(j * self.\n width / 7, self.height))\n verLines.setOutline('black')\n verLines.draw(self.win)\n for y in range(0, 6):\n for x in range(0, 7):\n slot = Circle(Point(x * self.width / 7 + 50, y * self.\n height / 6 + 50), 37.5)\n slot.setFill('white')\n slot.draw(self.win)\n\n def update_pieces(self, x, y, color):\n board = self.engine.get_board()\n pointY = y * self.height / 6\n pointX = x * self.width / 7\n piece = Circle(Point(pointX + 50, pointY + 50), 37.5)\n if color == 'r':\n piece.setFill('red')\n else:\n piece.setFill('black')\n piece.draw(self.win)\n\n def end(self):\n self.get_window().close()\n\n\nclass Menu:\n\n def __init__(self, window):\n self.window = window\n skyBlue = color_rgb(135, 206, 250)\n royalBlue = color_rgb(65, 105, 225)\n self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,\n 0.8 * 500))\n self.menu.setFill(skyBlue)\n self.menu.setOutline(skyBlue)\n self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 * \n 500, 0.35 * 500))\n self.save.setOutline(royalBlue)\n self.save.setFill(royalBlue)\n self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')\n self.saveTxt.setSize(30)\n self.saveTxt.setFace('helvetica')\n self.saveTxt.setStyle('bold')\n self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 * \n 500, 0.55 * 500))\n self.load.setOutline(royalBlue)\n self.load.setFill(royalBlue)\n self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')\n self.loadTxt.setSize(30)\n self.loadTxt.setFace('helvetica')\n self.loadTxt.setStyle('bold')\n self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 * \n 500, 0.75 * 500))\n self.quit.setOutline(royalBlue)\n self.quit.setFill(royalBlue)\n self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')\n self.quitTxt.setSize(30)\n self.quitTxt.setFace('helvetica')\n self.quitTxt.setStyle('bold')\n\n def openMenu(self):\n self.menu.draw(self.window)\n self.save.draw(self.window)\n self.saveTxt.draw(self.window)\n self.load.draw(self.window)\n self.loadTxt.draw(self.window)\n self.quit.draw(self.window)\n self.quitTxt.draw(self.window)\n\n def closeMenu(self):\n self.menu.undraw()\n self.save.undraw()\n self.saveTxt.undraw()\n self.load.undraw()\n self.loadTxt.undraw()\n self.quit.undraw()\n self.quitTxt.undraw()\n",
"step-4": "<mask token>\n\n\nclass Renderer:\n\n def __init__(self, engine, width=700, height=600):\n self.width = width\n self.height = height\n self.engine = engine\n self.win = GraphWin('Game Board', width, height)\n self.win.setBackground('blue')\n\n def update(self):\n self.win.update()\n\n def get_window(self):\n return self.win\n\n def get_width(self):\n return self.width\n\n def draw_board(self):\n for i in range(0, 6):\n horLines = Line(Point(0, i * self.height / 6), Point(self.width,\n i * self.height / 6))\n horLines.setOutline('black')\n horLines.draw(self.win)\n for j in range(0, 7):\n verLines = Line(Point(j * self.width / 7, 0), Point(j * self.\n width / 7, self.height))\n verLines.setOutline('black')\n verLines.draw(self.win)\n for y in range(0, 6):\n for x in range(0, 7):\n slot = Circle(Point(x * self.width / 7 + 50, y * self.\n height / 6 + 50), 37.5)\n slot.setFill('white')\n slot.draw(self.win)\n\n def update_pieces(self, x, y, color):\n board = self.engine.get_board()\n pointY = y * self.height / 6\n pointX = x * self.width / 7\n piece = Circle(Point(pointX + 50, pointY + 50), 37.5)\n if color == 'r':\n piece.setFill('red')\n else:\n piece.setFill('black')\n piece.draw(self.win)\n\n def end(self):\n self.get_window().close()\n\n\nclass Menu:\n\n def __init__(self, window):\n self.window = window\n skyBlue = color_rgb(135, 206, 250)\n royalBlue = color_rgb(65, 105, 225)\n self.menu = Rectangle(Point(0.2 * 500, 0.15 * 500), Point(0.8 * 500,\n 0.8 * 500))\n self.menu.setFill(skyBlue)\n self.menu.setOutline(skyBlue)\n self.save = Rectangle(Point(0.25 * 500, 0.2 * 500), Point(0.75 * \n 500, 0.35 * 500))\n self.save.setOutline(royalBlue)\n self.save.setFill(royalBlue)\n self.saveTxt = Text(Point(0.5 * 500, 0.275 * 500), 'SAVE')\n self.saveTxt.setSize(30)\n self.saveTxt.setFace('helvetica')\n self.saveTxt.setStyle('bold')\n self.load = Rectangle(Point(0.25 * 500, 0.4 * 500), Point(0.75 * \n 500, 0.55 * 500))\n self.load.setOutline(royalBlue)\n self.load.setFill(royalBlue)\n self.loadTxt = Text(Point(0.5 * 500, 0.475 * 500), 'LOAD')\n self.loadTxt.setSize(30)\n self.loadTxt.setFace('helvetica')\n self.loadTxt.setStyle('bold')\n self.quit = Rectangle(Point(0.25 * 500, 0.6 * 500), Point(0.75 * \n 500, 0.75 * 500))\n self.quit.setOutline(royalBlue)\n self.quit.setFill(royalBlue)\n self.quitTxt = Text(Point(0.5 * 500, 0.675 * 500), 'QUIT')\n self.quitTxt.setSize(30)\n self.quitTxt.setFace('helvetica')\n self.quitTxt.setStyle('bold')\n\n def openMenu(self):\n self.menu.draw(self.window)\n self.save.draw(self.window)\n self.saveTxt.draw(self.window)\n self.load.draw(self.window)\n self.loadTxt.draw(self.window)\n self.quit.draw(self.window)\n self.quitTxt.draw(self.window)\n\n def closeMenu(self):\n self.menu.undraw()\n self.save.undraw()\n self.saveTxt.undraw()\n self.load.undraw()\n self.loadTxt.undraw()\n self.quit.undraw()\n self.quitTxt.undraw()\n",
"step-5": "import graphics \nfrom graphics import *\n\nclass Renderer():\n def __init__(self, engine, width=700, height=600):\n self.width = width\n self.height = height\n self.engine = engine\n self.win = GraphWin(\"Game Board\", width, height)\n self.win.setBackground(\"blue\")\n\n def update(self):\n self.win.update()\n\n\n def get_window(self):\n return(self.win)\n\n\n def get_width(self):\n return self.width\n\n\n def draw_board(self):\n for i in range(0, 6): #Determines size of terrain\n horLines = Line(Point(0, i*self.height/6),Point(self.width, i*self.height/6))\n horLines.setOutline('black')\n horLines.draw(self.win)\n\n for j in range(0, 7):\n verLines = Line(Point(j*self.width/7, 0),Point(j*self.width/7, self.height))\n verLines.setOutline('black')\n verLines.draw(self.win)\n\n for y in range(0,6):\n for x in range(0,7):\n slot = Circle(Point(x*self.width/7+50,y*self.height/6+50),37.5)\n slot.setFill(\"white\")\n slot.draw(self.win)\n\n def update_pieces(self,x,y,color):\n board = self.engine.get_board()\n pointY = y*self.height/6\n pointX = x*self.width/7\n piece = Circle(Point(pointX+50,pointY+50),37.5)\n if color == 'r':\n piece.setFill(\"red\")\n else:\n piece.setFill(\"black\")\n piece.draw(self.win)\n\n\n def end(self):\n self.get_window().close()\n\nclass Menu(): #CHANGE TO SELF. WIDTH AND HIEGHT\n def __init__(self,window):\n self.window = window\n\n skyBlue = color_rgb(135,206,250)\n royalBlue = color_rgb(65,105,225)\n\n self.menu = Rectangle(Point(.2*500,.15*500),Point(.8*500,.8*500))\n self.menu.setFill(skyBlue)\n self.menu.setOutline(skyBlue)\n\n self.save = Rectangle(Point(.25*500,.2*500),Point(.75*500,.35*500))\n self.save.setOutline(royalBlue)\n self.save.setFill(royalBlue)\n\n self.saveTxt = Text(Point(.50*500,.275*500), \"SAVE\")\n self.saveTxt.setSize(30)\n self.saveTxt.setFace(\"helvetica\")\n self.saveTxt.setStyle(\"bold\")\n\n self.load = Rectangle(Point(.25*500,.4*500),Point(.75*500,.55*500))\n self.load.setOutline(royalBlue)\n self.load.setFill(royalBlue)\n\n self.loadTxt = Text(Point(.50*500,.475*500), \"LOAD\")\n self.loadTxt.setSize(30)\n self.loadTxt.setFace(\"helvetica\")\n self.loadTxt.setStyle(\"bold\")\n\n self.quit = Rectangle(Point(.25*500,.6*500),Point(.75*500,.75*500))\n self.quit.setOutline(royalBlue)\n self.quit.setFill(royalBlue)\n\n self.quitTxt = Text(Point(.50*500,.675*500), \"QUIT\")\n self.quitTxt.setSize(30)\n self.quitTxt.setFace(\"helvetica\")\n self.quitTxt.setStyle(\"bold\")\n\n def openMenu(self):\n self.menu.draw(self.window)\n self.save.draw(self.window)\n self.saveTxt.draw(self.window)\n self.load.draw(self.window)\n self.loadTxt.draw(self.window)\n self.quit.draw(self.window)\n self.quitTxt.draw(self.window)\n\n def closeMenu(self):\n self.menu.undraw()\n self.save.undraw()\n self.saveTxt.undraw()\n self.load.undraw()\n self.loadTxt.undraw()\n self.quit.undraw()\n self.quitTxt.undraw()",
"step-ids": [
8,
9,
10,
12,
14
]
}
|
[
8,
9,
10,
12,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('final_syn_train.csv', 'r') as zhidao:
reader = csv.reader(zhidao)
cluster = []
cur = []
stand = ''
for line in reader:
if line[1] == stand:
cur.append(line[0])
else:
if cur:
cluster.append(cur)
stand = line[1]
cur = [line[0]]
cluster.append(cur)
for i in range(len(cluster)):
for j in range(len(cluster[i])):
k = random.randint(0, len(cluster[i]) - 1)
writer.writerow([cluster[i][j], cluster[i][k], 1])
m = n = 0
for _ in range(3):
while m == i:
m = random.randint(0, len(cluster) - 1)
n = random.randint(0, len(cluster[m]) - 1)
writer.writerow([cluster[i][j], cluster[m][n], 0])
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('standard.csv', 'r') as standard:
reader = csv.reader(standard)
stand = []
for line in reader:
stand.append(line[0])
with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:
reader = csv.reader(zhidao)
for line in reader:
writer.writerow([line[0], line[1], 1])
for _ in range(3):
k = random.randint(0, 208)
writer.writerow([line[0], stand[k], 0])
<|reserved_special_token_1|>
import random
import csv
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('final_syn_train.csv', 'r') as zhidao:
reader = csv.reader(zhidao)
cluster = []
cur = []
stand = ''
for line in reader:
if line[1] == stand:
cur.append(line[0])
else:
if cur:
cluster.append(cur)
stand = line[1]
cur = [line[0]]
cluster.append(cur)
for i in range(len(cluster)):
for j in range(len(cluster[i])):
k = random.randint(0, len(cluster[i]) - 1)
writer.writerow([cluster[i][j], cluster[i][k], 1])
m = n = 0
for _ in range(3):
while m == i:
m = random.randint(0, len(cluster) - 1)
n = random.randint(0, len(cluster[m]) - 1)
writer.writerow([cluster[i][j], cluster[m][n], 0])
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('standard.csv', 'r') as standard:
reader = csv.reader(standard)
stand = []
for line in reader:
stand.append(line[0])
with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:
reader = csv.reader(zhidao)
for line in reader:
writer.writerow([line[0], line[1], 1])
for _ in range(3):
k = random.randint(0, 208)
writer.writerow([line[0], stand[k], 0])
<|reserved_special_token_1|>
import random
import csv
# 提取随机问,同类组成正例,异类组成负例,正:负=1:3
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('final_syn_train.csv', 'r') as zhidao:
reader = csv.reader(zhidao)
cluster = []
cur = []
stand = ''
# 将同一标准问的随机问组成一个数组
for line in reader:
if line[1] == stand:
cur.append(line[0])
else:
if cur:
cluster.append(cur)
stand = line[1]
cur = [line[0]]
cluster.append(cur)
# 遍历每个分类中的每个句子,在同类数组中取一条数据组成正例,在异类数组中取3条数据组成反例
for i in range(len(cluster)):
for j in range(len(cluster[i])):
k = random.randint(0, len(cluster[i])-1)
writer.writerow([cluster[i][j], cluster[i][k], 1])
m = n = 0
for _ in range(3):
while m == i:
m = random.randint(0, len(cluster)-1)
n = random.randint(0, len(cluster[m])-1)
writer.writerow([cluster[i][j], cluster[m][n], 0])
# 提取随机问,与正确标准问组成正例,与非正确标准问组成负例,正:负=1:3 (此方法效果更好)
with open('final_regroup.csv', 'w', newline='') as train:
writer = csv.writer(train)
with open('standard.csv', 'r') as standard:
reader = csv.reader(standard)
stand = []
for line in reader:
stand.append(line[0])
with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:
reader = csv.reader(zhidao)
for line in reader:
writer.writerow([line[0], line[1], 1])
for _ in range(3):
k = random.randint(0, 208)
writer.writerow([line[0], stand[k], 0])
|
flexible
|
{
"blob_id": "3a09cbd71d23b1320af9b8ddcfc65b223e487b21",
"index": 1811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('final_regroup.csv', 'w', newline='') as train:\n writer = csv.writer(train)\n with open('final_syn_train.csv', 'r') as zhidao:\n reader = csv.reader(zhidao)\n cluster = []\n cur = []\n stand = ''\n for line in reader:\n if line[1] == stand:\n cur.append(line[0])\n else:\n if cur:\n cluster.append(cur)\n stand = line[1]\n cur = [line[0]]\n cluster.append(cur)\n for i in range(len(cluster)):\n for j in range(len(cluster[i])):\n k = random.randint(0, len(cluster[i]) - 1)\n writer.writerow([cluster[i][j], cluster[i][k], 1])\n m = n = 0\n for _ in range(3):\n while m == i:\n m = random.randint(0, len(cluster) - 1)\n n = random.randint(0, len(cluster[m]) - 1)\n writer.writerow([cluster[i][j], cluster[m][n], 0])\nwith open('final_regroup.csv', 'w', newline='') as train:\n writer = csv.writer(train)\n with open('standard.csv', 'r') as standard:\n reader = csv.reader(standard)\n stand = []\n for line in reader:\n stand.append(line[0])\n with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:\n reader = csv.reader(zhidao)\n for line in reader:\n writer.writerow([line[0], line[1], 1])\n for _ in range(3):\n k = random.randint(0, 208)\n writer.writerow([line[0], stand[k], 0])\n",
"step-3": "import random\nimport csv\nwith open('final_regroup.csv', 'w', newline='') as train:\n writer = csv.writer(train)\n with open('final_syn_train.csv', 'r') as zhidao:\n reader = csv.reader(zhidao)\n cluster = []\n cur = []\n stand = ''\n for line in reader:\n if line[1] == stand:\n cur.append(line[0])\n else:\n if cur:\n cluster.append(cur)\n stand = line[1]\n cur = [line[0]]\n cluster.append(cur)\n for i in range(len(cluster)):\n for j in range(len(cluster[i])):\n k = random.randint(0, len(cluster[i]) - 1)\n writer.writerow([cluster[i][j], cluster[i][k], 1])\n m = n = 0\n for _ in range(3):\n while m == i:\n m = random.randint(0, len(cluster) - 1)\n n = random.randint(0, len(cluster[m]) - 1)\n writer.writerow([cluster[i][j], cluster[m][n], 0])\nwith open('final_regroup.csv', 'w', newline='') as train:\n writer = csv.writer(train)\n with open('standard.csv', 'r') as standard:\n reader = csv.reader(standard)\n stand = []\n for line in reader:\n stand.append(line[0])\n with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:\n reader = csv.reader(zhidao)\n for line in reader:\n writer.writerow([line[0], line[1], 1])\n for _ in range(3):\n k = random.randint(0, 208)\n writer.writerow([line[0], stand[k], 0])\n",
"step-4": "import random\r\nimport csv\r\n\r\n\r\n# 提取随机问,同类组成正例,异类组成负例,正:负=1:3\r\nwith open('final_regroup.csv', 'w', newline='') as train:\r\n writer = csv.writer(train)\r\n with open('final_syn_train.csv', 'r') as zhidao:\r\n reader = csv.reader(zhidao)\r\n cluster = []\r\n cur = []\r\n stand = ''\r\n # 将同一标准问的随机问组成一个数组\r\n for line in reader:\r\n if line[1] == stand:\r\n cur.append(line[0])\r\n else:\r\n if cur:\r\n cluster.append(cur)\r\n stand = line[1]\r\n cur = [line[0]]\r\n cluster.append(cur)\r\n\r\n # 遍历每个分类中的每个句子,在同类数组中取一条数据组成正例,在异类数组中取3条数据组成反例\r\n for i in range(len(cluster)):\r\n for j in range(len(cluster[i])):\r\n k = random.randint(0, len(cluster[i])-1)\r\n writer.writerow([cluster[i][j], cluster[i][k], 1])\r\n m = n = 0\r\n for _ in range(3):\r\n while m == i:\r\n m = random.randint(0, len(cluster)-1)\r\n n = random.randint(0, len(cluster[m])-1)\r\n writer.writerow([cluster[i][j], cluster[m][n], 0])\r\n\r\n\r\n# 提取随机问,与正确标准问组成正例,与非正确标准问组成负例,正:负=1:3 (此方法效果更好)\r\nwith open('final_regroup.csv', 'w', newline='') as train:\r\n writer = csv.writer(train)\r\n with open('standard.csv', 'r') as standard:\r\n reader = csv.reader(standard)\r\n stand = []\r\n for line in reader:\r\n stand.append(line[0])\r\n with open('final_syn_train.csv', 'r', encoding='gbk') as zhidao:\r\n reader = csv.reader(zhidao)\r\n for line in reader:\r\n writer.writerow([line[0], line[1], 1])\r\n for _ in range(3):\r\n k = random.randint(0, 208)\r\n writer.writerow([line[0], stand[k], 0])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):
if type == 'ip' and max(start, end) > 255:
end = 255
i = random.randint(start, end)
return prefix + str(i)
def get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):
if type == 'ip' and max(start, end) > 255:
end = 255
sequences = []
for i in range(start, end + 1):
sequences.append(prefix + str(i))
if num > len(sequences):
num = len(sequences)
choices = random.sample(sequences, num)
return choices
def popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=
'rootCA.cer'):
curl_cmd = (
'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
.format(cert, eth, user, proxy, url))
subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
try:
subp.wait(2)
except Exception as e:
print('curl_request_timeout, error: ', e)
return
if subp.poll() == 0:
print(subp.communicate()[1])
else:
print('curl_request-失败: ', curl_cmd)
return
def system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=
'rootCA.cer', is_http=False, debug=False):
"""
-I: header request
-k: skip ssl
--no-keepalive, keepalive=close
"""
curl_cmd = ''
debug = False
if is_http:
basic_cmd = (
'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'
)
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(eth, user, proxy, url)
else:
basic_cmd = (
'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
)
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)
try:
os_p = os.system(curl_cmd)
print('curl_cmd=', curl_cmd)
except Exception as e:
print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.
format(curl_cmd, e, url, user))
return
def get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',
pre_www='www.'):
"""
用于url分类测试,测试文件中存放大量的url地址
:param from_file: str
:return: list, URL_list(Generator)
"""
txtfile = open(from_file, 'r', encoding='utf-8')
url_list = txtfile.readlines()
for i in range(0, len(url_list)):
url_list[i] = url_list[i].replace('\n', '')
if url_index >= 0:
url_var = url_list[i].split(spliter)[url_index].replace(' ', '')
protocol_header = url_var[:9].lower()
if pre_www not in url_var and not ('http://' in protocol_header or
'https://' in protocol_header or 'ftp://' in protocol_header):
url_var = pre_www + url_var
url_list[i] = url_var
protocol_header = url_list[i][:9].lower()
if ('http://' in protocol_header or 'https://' in protocol_header or
'ftp://' in protocol_header):
pass
else:
url_list[i] = 'https://' + url_list[i]
return url_list
def get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,
eth_num=254):
"""
inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0
inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253
sequence: start with 0
eth_num: eth sequence start with 0
"""
user_index = sequence % user_num + user_start
eth_index = sequence % eth_num + eth_start
"""
user_index = sequence
if sequence>user_num: #循环,复用,取余
user_index = sequence % user_num + user_start
eth_index = sequence
if eth_index>eth_num: #循环,复用,取余
eth_index = eth_index % eth_num + eth_start
"""
return user_index, eth_index
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):
if type == 'ip' and max(start, end) > 255:
end = 255
i = random.randint(start, end)
return prefix + str(i)
def get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):
if type == 'ip' and max(start, end) > 255:
end = 255
sequences = []
for i in range(start, end + 1):
sequences.append(prefix + str(i))
if num > len(sequences):
num = len(sequences)
choices = random.sample(sequences, num)
return choices
def popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=
'rootCA.cer'):
curl_cmd = (
'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
.format(cert, eth, user, proxy, url))
subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
try:
subp.wait(2)
except Exception as e:
print('curl_request_timeout, error: ', e)
return
if subp.poll() == 0:
print(subp.communicate()[1])
else:
print('curl_request-失败: ', curl_cmd)
return
def system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=
'rootCA.cer', is_http=False, debug=False):
"""
-I: header request
-k: skip ssl
--no-keepalive, keepalive=close
"""
curl_cmd = ''
debug = False
if is_http:
basic_cmd = (
'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'
)
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(eth, user, proxy, url)
else:
basic_cmd = (
'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
)
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)
try:
os_p = os.system(curl_cmd)
print('curl_cmd=', curl_cmd)
except Exception as e:
print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.
format(curl_cmd, e, url, user))
return
def get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',
pre_www='www.'):
"""
用于url分类测试,测试文件中存放大量的url地址
:param from_file: str
:return: list, URL_list(Generator)
"""
txtfile = open(from_file, 'r', encoding='utf-8')
url_list = txtfile.readlines()
for i in range(0, len(url_list)):
url_list[i] = url_list[i].replace('\n', '')
if url_index >= 0:
url_var = url_list[i].split(spliter)[url_index].replace(' ', '')
protocol_header = url_var[:9].lower()
if pre_www not in url_var and not ('http://' in protocol_header or
'https://' in protocol_header or 'ftp://' in protocol_header):
url_var = pre_www + url_var
url_list[i] = url_var
protocol_header = url_list[i][:9].lower()
if ('http://' in protocol_header or 'https://' in protocol_header or
'ftp://' in protocol_header):
pass
else:
url_list[i] = 'https://' + url_list[i]
return url_list
def get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,
eth_num=254):
"""
inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0
inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253
sequence: start with 0
eth_num: eth sequence start with 0
"""
user_index = sequence % user_num + user_start
eth_index = sequence % eth_num + eth_start
"""
user_index = sequence
if sequence>user_num: #循环,复用,取余
user_index = sequence % user_num + user_start
eth_index = sequence
if eth_index>eth_num: #循环,复用,取余
eth_index = eth_index % eth_num + eth_start
"""
return user_index, eth_index
def callback():
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):
if type == 'ip' and max(start, end) > 255:
end = 255
i = random.randint(start, end)
return prefix + str(i)
def get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):
if type == 'ip' and max(start, end) > 255:
end = 255
sequences = []
for i in range(start, end + 1):
sequences.append(prefix + str(i))
if num > len(sequences):
num = len(sequences)
choices = random.sample(sequences, num)
return choices
def popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=
'rootCA.cer'):
curl_cmd = (
'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
.format(cert, eth, user, proxy, url))
subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
try:
subp.wait(2)
except Exception as e:
print('curl_request_timeout, error: ', e)
return
if subp.poll() == 0:
print(subp.communicate()[1])
else:
print('curl_request-失败: ', curl_cmd)
return
def system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=
'rootCA.cer', is_http=False, debug=False):
"""
-I: header request
-k: skip ssl
--no-keepalive, keepalive=close
"""
curl_cmd = ''
debug = False
if is_http:
basic_cmd = (
'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'
)
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(eth, user, proxy, url)
else:
basic_cmd = (
'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
)
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)
try:
os_p = os.system(curl_cmd)
print('curl_cmd=', curl_cmd)
except Exception as e:
print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.
format(curl_cmd, e, url, user))
return
def get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',
pre_www='www.'):
"""
用于url分类测试,测试文件中存放大量的url地址
:param from_file: str
:return: list, URL_list(Generator)
"""
txtfile = open(from_file, 'r', encoding='utf-8')
url_list = txtfile.readlines()
for i in range(0, len(url_list)):
url_list[i] = url_list[i].replace('\n', '')
if url_index >= 0:
url_var = url_list[i].split(spliter)[url_index].replace(' ', '')
protocol_header = url_var[:9].lower()
if pre_www not in url_var and not ('http://' in protocol_header or
'https://' in protocol_header or 'ftp://' in protocol_header):
url_var = pre_www + url_var
url_list[i] = url_var
protocol_header = url_list[i][:9].lower()
if ('http://' in protocol_header or 'https://' in protocol_header or
'ftp://' in protocol_header):
pass
else:
url_list[i] = 'https://' + url_list[i]
return url_list
def get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,
eth_num=254):
"""
inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0
inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253
sequence: start with 0
eth_num: eth sequence start with 0
"""
user_index = sequence % user_num + user_start
eth_index = sequence % eth_num + eth_start
"""
user_index = sequence
if sequence>user_num: #循环,复用,取余
user_index = sequence % user_num + user_start
eth_index = sequence
if eth_index>eth_num: #循环,复用,取余
eth_index = eth_index % eth_num + eth_start
"""
return user_index, eth_index
def callback():
return
def urls_resquests(urls, proxy='172.17.33.23:8080', user_start=300,
user_num=253, sub_eth_start=0, eth_num=253, ip_prefix='172.18.1.', cert
='rootCA.cer', is_same_url=False, is_http=False, debug=False):
"""
one ip/eth<--> one user
"""
i = 0
for i in range(max(user_num, eth_num)):
url = ''
if is_same_url:
if is_http:
url = 'http://172.16.0.1'
else:
url = 'https://www.baidu.com'
user_index = i % user_num + user_start
eth_index = i % eth_num + sub_eth_start
user = 'userg' + str(user_index)
eth = 'eth0:' + str(eth_index)
""" For debug
print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))
print('ip_{0}={1}'.format(i,ip))
print('eth=',eth)
print('user=',user)
print("-" * 50)
"""
system_curl_request(url, user, eth, proxy=proxy, cert=cert, is_http
=is_http, debug=debug)
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):
if type == 'ip' and max(start, end) > 255:
end = 255
i = random.randint(start, end)
return prefix + str(i)
def get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):
if type == 'ip' and max(start, end) > 255:
end = 255
sequences = []
for i in range(start, end + 1):
sequences.append(prefix + str(i))
if num > len(sequences):
num = len(sequences)
choices = random.sample(sequences, num)
return choices
def popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=
'rootCA.cer'):
curl_cmd = (
'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
.format(cert, eth, user, proxy, url))
subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True)
try:
subp.wait(2)
except Exception as e:
print('curl_request_timeout, error: ', e)
return
if subp.poll() == 0:
print(subp.communicate()[1])
else:
print('curl_request-失败: ', curl_cmd)
return
def system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=
'rootCA.cer', is_http=False, debug=False):
"""
-I: header request
-k: skip ssl
--no-keepalive, keepalive=close
"""
curl_cmd = ''
debug = False
if is_http:
basic_cmd = (
'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'
)
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(eth, user, proxy, url)
else:
basic_cmd = (
'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
)
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)
try:
os_p = os.system(curl_cmd)
print('curl_cmd=', curl_cmd)
except Exception as e:
print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.
format(curl_cmd, e, url, user))
return
def get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',
pre_www='www.'):
"""
用于url分类测试,测试文件中存放大量的url地址
:param from_file: str
:return: list, URL_list(Generator)
"""
txtfile = open(from_file, 'r', encoding='utf-8')
url_list = txtfile.readlines()
for i in range(0, len(url_list)):
url_list[i] = url_list[i].replace('\n', '')
if url_index >= 0:
url_var = url_list[i].split(spliter)[url_index].replace(' ', '')
protocol_header = url_var[:9].lower()
if pre_www not in url_var and not ('http://' in protocol_header or
'https://' in protocol_header or 'ftp://' in protocol_header):
url_var = pre_www + url_var
url_list[i] = url_var
protocol_header = url_list[i][:9].lower()
if ('http://' in protocol_header or 'https://' in protocol_header or
'ftp://' in protocol_header):
pass
else:
url_list[i] = 'https://' + url_list[i]
return url_list
def get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,
eth_num=254):
"""
inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0
inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253
sequence: start with 0
eth_num: eth sequence start with 0
"""
user_index = sequence % user_num + user_start
eth_index = sequence % eth_num + eth_start
"""
user_index = sequence
if sequence>user_num: #循环,复用,取余
user_index = sequence % user_num + user_start
eth_index = sequence
if eth_index>eth_num: #循环,复用,取余
eth_index = eth_index % eth_num + eth_start
"""
return user_index, eth_index
def callback():
return
def urls_resquests(urls, proxy='172.17.33.23:8080', user_start=300,
user_num=253, sub_eth_start=0, eth_num=253, ip_prefix='172.18.1.', cert
='rootCA.cer', is_same_url=False, is_http=False, debug=False):
"""
one ip/eth<--> one user
"""
i = 0
for i in range(max(user_num, eth_num)):
url = ''
if is_same_url:
if is_http:
url = 'http://172.16.0.1'
else:
url = 'https://www.baidu.com'
user_index = i % user_num + user_start
eth_index = i % eth_num + sub_eth_start
user = 'userg' + str(user_index)
eth = 'eth0:' + str(eth_index)
""" For debug
print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))
print('ip_{0}={1}'.format(i,ip))
print('eth=',eth)
print('user=',user)
print("-" * 50)
"""
system_curl_request(url, user, eth, proxy=proxy, cert=cert, is_http
=is_http, debug=debug)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
"""该Python3脚本用于ASWG做并发认证测试。
1、使用方法示例:
python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080"""
)
parser.add_argument('-r', '--round', type=int, default=1, help=
'认证并发测试的测试次数,默认1轮测试即停止')
parser.add_argument('-s', '--starttime', type=str, default='', help=
'首次认证并发测试的时间,如 16:20:60')
parser.add_argument('-t', '--auth-cache-timeout', type=int, default=600,
help='认证缓存过期时间,默认600秒')
parser.add_argument('-p', '--aswg-proxy', type=str, default=
'172.17.33.23:8080', help='ASWG proxy')
parser.add_argument('-i', '--ip-prefix', type=str, default='172.18.1.',
help='客户端IP前缀,默认只支持C段;其他方式自行适配')
parser.add_argument('-u', '--is-same-url', type=bool, default=True,
help='是否使用相同URL测试')
parser.add_argument('-u1', '--is-http', type=bool, default=True, help=
'当指定使用相同URL时,指定是http还是https请求')
parser.add_argument('-f', '--url-file', type=str, default=
'hwurls_top10w.txt', help='urls来源文件')
parser.add_argument('-f1', '--url-index', type=int, default=0, help=
'urls来源文件中字段序号,默认从0开始')
parser.add_argument('-a0', '--start-user-index', type=int, default=0,
help='auth 用户的序号,默认从0开始')
parser.add_argument('-a1', '--user-num', type=int, default=1275, help=
'auth 用户数量')
parser.add_argument('-e0', '--start-eth0-index', type=int, default=0,
help='开始的子网卡序号,默认从0开始')
parser.add_argument('-e1', '--sub-eth0-num', type=int, default=1275,
help='子网卡接口数量,每个接口一个IP地址')
parser.add_argument('-d', '--is-debug', type=bool, default=False, help=
'是否开启curl的打印日志')
args = parser.parse_args()
max_round = args.round
first_schedule_time = args.starttime
now = datetime.datetime.now()
now_str = now.strftime('%H:%M:%S')
if first_schedule_time:
if len(first_schedule_time) == 8 and len(first_schedule_time.split(':')
) == 3 and first_schedule_time > now_str:
pass
else:
print('-s或者--starttime 格式不对,请输入大于当前时间字符串,如:16:20:60 ')
sys.exit()
else:
nexttime = now + datetime.timedelta(seconds=60)
first_schedule_time = nexttime.strftime('%H:%M:%S')
auth_cache_timeout = args.auth_cache_timeout
proxy = args.aswg_proxy
ip_prefix = args.ip_prefix
is_same_url = args.is_same_url
is_same_url = True
url_file = args.url_file
url_index = args.url_index
start_user_index = args.start_user_index
user_num = args.user_num
start_eth0_index = args.start_eth0_index
sub_eth0_num = args.sub_eth0_num
is_debug = args.is_debug
urls = get_urls_from_file(from_file=url_file, url_index=url_index,
spliter=',', pre_www='www.')
print('urls_len=', len(urls))
print('urls_len=', len(urls))
i = 0
user_start = start_user_index
user_num = user_num
sub_eth_start = start_eth0_index
eth_num = sub_eth0_num
cert = 'rootCA.cer'
is_http = True
print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.
format(max_round, first_schedule_time, auth_cache_timeout))
round_num = 0
while True:
now = datetime.datetime.now()
time_now = now.strftime('%H:%M:%S')
if time_now == first_schedule_time:
print('This_schedule_time={0}, round={1}'.format(
first_schedule_time, round_num))
start_time = time.time()
urls_resquests(urls, proxy=proxy, user_start=user_start,
user_num=user_num, sub_eth_start=sub_eth_start, eth_num=
eth_num, ip_prefix=ip_prefix, cert=cert, is_same_url=
is_same_url, is_http=is_http, debug=is_debug)
total_sending_time_seconds = time.time() - start_time
print(
'total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'
.format(total_sending_time_seconds, round_num))
round_num = round_num + 1
if round_num >= max_round:
print('-' * 50)
print('Finished all test with {0} rounds!!!'.format(max_round))
break
else:
print('-' * 50)
print(
'Please make sure clear cache before the next schedule time!!!'
)
last_schedule_time = datetime.datetime.strptime(now.
strftime('%Y-%m-%d ') + first_schedule_time,
'%Y-%m-%d %H:%M:%S')
nexttime = last_schedule_time + datetime.timedelta(seconds=
auth_cache_timeout + 60)
first_schedule_time = nexttime.strftime('%H:%M:%S')
print('Next_schedule_time={0}...'.format(first_schedule_time))
else:
pass
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#allisnone 20200403
#https://github.com/urllib3/urllib3/issues/1434
#https://github.com/dopstar/requests-ntlm2
#https://github.com/requests/requests-ntlm
#base on python3
#if you request https website, you need to add ASWG CA to following file:
#/root/.pyenv/versions/3.5.5/lib/python3.5/site-packages/certifi/cacert.pem
#ulimit –n 2000
#pip install requests_ntlm
import argparse
import re
import os
import csv
import string,sys,time,datetime
import requests
from requests_toolbelt.adapters import source
#from requests_ntlm import HttpNtlmAuth
import random
import subprocess
#import zthreads
def get_random_ip_or_user(start,end,prefix='172.16.90.',type='ip'):
if type=='ip' and max(start,end)>255:
end = 255
i = random.randint(start,end)
return prefix + str(i)
def get_random_ips_users(start,end,num,prefix='172.16.90.',type='ip'):
if type=='ip' and max(start,end)>255:
end = 255
sequences = []
for i in range(start,end+1):
sequences.append(prefix+str(i))
if num> len(sequences):
num = len(sequences)
choices = random.sample(sequences,num)
return choices
def popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer'):
curl_cmd = 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'.format(
cert,eth,user,proxy,url)
subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding="utf-8")
try:
subp.wait(2) #等待超时
except Exception as e:
print('curl_request_timeout, error: ',e)
return
if subp.poll() == 0:
print(subp.communicate()[1])
else:
print("curl_request-失败: ",curl_cmd)
return
def system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False):
"""
-I: header request
-k: skip ssl
--no-keepalive, keepalive=close
"""
curl_cmd = ''
debug = False
if is_http:
basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(eth,user,proxy,url)
else:
basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'
if debug:
pass
else:
basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'
curl_cmd = basic_cmd.format(cert,eth,user,proxy,url)
try:
os_p = os.system(curl_cmd)
print('curl_cmd=',curl_cmd)
except Exception as e:
print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user))
return
def get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'):
"""
用于url分类测试,测试文件中存放大量的url地址
:param from_file: str
:return: list, URL_list(Generator)
"""
txtfile = open(from_file, 'r',encoding='utf-8')
url_list = txtfile.readlines()
for i in range(0,len(url_list)):
url_list[i] = url_list[i].replace('\n','')
# print(url_list[i])
if url_index>=0:
url_var = url_list[i].split(spliter)[url_index].replace(' ','')
#print('url_var=',url_var)
protocol_header = url_var[:9].lower()
if pre_www not in url_var and not ("http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header):
url_var = pre_www + url_var
url_list[i] = url_var
protocol_header = url_list[i][:9].lower()
#print('protocol_header=',protocol_header)
if "http://" in protocol_header or "https://" in protocol_header or "ftp://" in protocol_header:
pass
else: #无协议头部,默认加http协议
url_list[i] = "https://" + url_list[i]
return url_list
def get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,eth_num=254):
"""
inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0
inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253
sequence: start with 0
eth_num: eth sequence start with 0
"""
user_index = sequence % user_num + user_start
eth_index = sequence % eth_num + eth_start
"""
user_index = sequence
if sequence>user_num: #循环,复用,取余
user_index = sequence % user_num + user_start
eth_index = sequence
if eth_index>eth_num: #循环,复用,取余
eth_index = eth_index % eth_num + eth_start
"""
return user_index,eth_index
def callback():
return
def urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253,
ip_prefix = '172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False):
"""
one ip/eth<--> one user
"""
i = 0
#count = max(len(urls),user_num,eth_num)
#for url in urls:
for i in range(max(user_num,eth_num)):
url = ''
if is_same_url:
if is_http:
url = 'http://172.16.0.1' #use the same url for request test
else:
url = 'https://www.baidu.com'
user_index = i % user_num + user_start
eth_index = i % eth_num + sub_eth_start
#ip = get_random_ip_or_user(start=2,end=254)
#ip = ip_prefix + str(eth_index + 1)
#user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user')
user = 'userg'+str(user_index)
#eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user')
eth = 'eth0:'+str(eth_index)
""" For debug
print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))
print('ip_{0}={1}'.format(i,ip))
print('eth=',eth)
print('user=',user)
print("-" * 50)
"""
#thread_pool.put(system_curl_request, (url,user,eth,), callback)
#popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
#system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')
system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug)
#i = i + 1
return
#"""
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='该Python3脚本用于ASWG做并发认证测试。\n 1、使用方法示例:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080')
parser.add_argument('-r','--round', type=int, default=1,help='认证并发测试的测试次数,默认1轮测试即停止')
parser.add_argument('-s','--starttime', type=str, default='',help='首次认证并发测试的时间,如 16:20:60')
parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='认证缓存过期时间,默认600秒')
parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy')
parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='客户端IP前缀,默认只支持C段;其他方式自行适配')
parser.add_argument('-u','--is-same-url', type=bool, default=True,help='是否使用相同URL测试')
parser.add_argument('-u1','--is-http', type=bool, default=True,help='当指定使用相同URL时,指定是http还是https请求')
parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urls来源文件')
parser.add_argument('-f1','--url-index', type=int, default=0,help='urls来源文件中字段序号,默认从0开始')
parser.add_argument('-a0','--start-user-index', type=int, default=0,help='auth 用户的序号,默认从0开始')
parser.add_argument('-a1','--user-num', type=int, default=1275,help='auth 用户数量')
parser.add_argument('-e0','--start-eth0-index', type=int, default=0,help='开始的子网卡序号,默认从0开始')
parser.add_argument('-e1','--sub-eth0-num', type=int, default=1275,help='子网卡接口数量,每个接口一个IP地址')
parser.add_argument('-d','--is-debug', type=bool, default=False,help='是否开启curl的打印日志')
args = parser.parse_args()
max_round = args.round
first_schedule_time = args.starttime
now = datetime.datetime.now()
now_str = now.strftime("%H:%M:%S")
if first_schedule_time:
if len(first_schedule_time)==8 and len(first_schedule_time.split(':'))==3 and first_schedule_time > now_str:
pass
else:
print('-s或者--starttime 格式不对,请输入大于当前时间字符串,如:16:20:60 ')
sys.exit()
else:
nexttime = now + datetime.timedelta(seconds=60)
first_schedule_time = nexttime.strftime("%H:%M:%S")
auth_cache_timeout = args.auth_cache_timeout
proxy = args.aswg_proxy
ip_prefix = args.ip_prefix
is_same_url = args.is_same_url
is_same_url = True
url_file = args.url_file
url_index = args.url_index
start_user_index = args.start_user_index
user_num = args.user_num
start_eth0_index = args.start_eth0_index
sub_eth0_num = args.sub_eth0_num
is_debug = args.is_debug
urls = get_urls_from_file(from_file=url_file,url_index=url_index,spliter=',',pre_www='www.')
#print('urls=',urls)
#url = 'https://www.baidu.com'
print('urls_len=',len(urls))
#urls = urls[:300]
print('urls_len=',len(urls))
#from zthreads.threadpools.threadpools import Threadpools
#thread_pool = Threadpools(5)
i = 0
#unique_users = 1275
user_start = start_user_index
user_num = user_num
sub_eth_start = start_eth0_index
eth_num = sub_eth0_num
cert = 'rootCA.cer'
is_http = True
#first_schedule_time = "16:45:00"
#auth_cache_timeout = 60
#max_round = 2
print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.format(max_round,first_schedule_time,auth_cache_timeout))
round_num = 0
while True:
#time_now = time.strftime("%H:%M:%S", time.localtime())
now = datetime.datetime.now()
time_now = now.strftime("%H:%M:%S")
if time_now == first_schedule_time:
print('This_schedule_time={0}, round={1}'.format(first_schedule_time,round_num))
start_time = time.time()
urls_resquests(urls, proxy=proxy,user_start=user_start,user_num=user_num,sub_eth_start=sub_eth_start, eth_num=eth_num,
ip_prefix=ip_prefix, cert=cert,is_same_url=is_same_url, is_http=is_http,debug=is_debug)
total_sending_time_seconds = time.time() - start_time
print('total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'.format(total_sending_time_seconds,round_num))
round_num = round_num + 1
if round_num >= max_round:
print("-" * 50)
print('Finished all test with {0} rounds!!!'.format(max_round))
break
else:
print("-" * 50)
print('Please make sure clear cache before the next schedule time!!!')
#now = datetime.datetime.now()
#date_str = now.strftime("%Y-%m-%d ")
#last_schedule_time_str = date_str + first_schedule_time
last_schedule_time = datetime.datetime.strptime(now.strftime("%Y-%m-%d ") + first_schedule_time,'%Y-%m-%d %H:%M:%S')
nexttime = last_schedule_time + datetime.timedelta(seconds=auth_cache_timeout+60) # delay 60 seconds
first_schedule_time = nexttime.strftime("%H:%M:%S")
print('Next_schedule_time={0}...'.format(first_schedule_time))
#time.sleep(sleep_time)
else:
#print('time_now=',time_now)
pass
#thread_pool.close()
#initial_requests_session(ip=ip,user=ntlm_user)
|
flexible
|
{
"blob_id": "a7fae2da8abba6e05b4fc90dec8826194d189853",
"index": 2758,
"step-1": "<mask token>\n\n\ndef get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n i = random.randint(start, end)\n return prefix + str(i)\n\n\ndef get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n sequences = []\n for i in range(start, end + 1):\n sequences.append(prefix + str(i))\n if num > len(sequences):\n num = len(sequences)\n choices = random.sample(sequences, num)\n return choices\n\n\ndef popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer'):\n curl_cmd = (\n 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n .format(cert, eth, user, proxy, url))\n subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n try:\n subp.wait(2)\n except Exception as e:\n print('curl_request_timeout, error: ', e)\n return\n if subp.poll() == 0:\n print(subp.communicate()[1])\n else:\n print('curl_request-失败: ', curl_cmd)\n return\n\n\ndef system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer', is_http=False, debug=False):\n \"\"\"\n -I: header request\n -k: skip ssl\n --no-keepalive, keepalive=close\n \"\"\"\n curl_cmd = ''\n debug = False\n if is_http:\n basic_cmd = (\n 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(eth, user, proxy, url)\n else:\n basic_cmd = (\n 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)\n try:\n os_p = os.system(curl_cmd)\n print('curl_cmd=', curl_cmd)\n except Exception as e:\n print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.\n format(curl_cmd, e, url, user))\n return\n\n\ndef get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',\n pre_www='www.'):\n \"\"\"\n 用于url分类测试,测试文件中存放大量的url地址\n :param from_file: str \n :return: list, URL_list(Generator)\n \"\"\"\n txtfile = open(from_file, 'r', encoding='utf-8')\n url_list = txtfile.readlines()\n for i in range(0, len(url_list)):\n url_list[i] = url_list[i].replace('\\n', '')\n if url_index >= 0:\n url_var = url_list[i].split(spliter)[url_index].replace(' ', '')\n protocol_header = url_var[:9].lower()\n if pre_www not in url_var and not ('http://' in protocol_header or\n 'https://' in protocol_header or 'ftp://' in protocol_header):\n url_var = pre_www + url_var\n url_list[i] = url_var\n protocol_header = url_list[i][:9].lower()\n if ('http://' in protocol_header or 'https://' in protocol_header or\n 'ftp://' in protocol_header):\n pass\n else:\n url_list[i] = 'https://' + url_list[i]\n return url_list\n\n\ndef get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,\n eth_num=254):\n \"\"\"\n inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0\n inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253\n sequence: start with 0\n eth_num: eth sequence start with 0\n \"\"\"\n user_index = sequence % user_num + user_start\n eth_index = sequence % eth_num + eth_start\n \"\"\"\n user_index = sequence\n if sequence>user_num: #循环,复用,取余\n user_index = sequence % user_num + user_start\n eth_index = sequence\n if eth_index>eth_num: #循环,复用,取余\n eth_index = eth_index % eth_num + eth_start\n \"\"\"\n return user_index, eth_index\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n i = random.randint(start, end)\n return prefix + str(i)\n\n\ndef get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n sequences = []\n for i in range(start, end + 1):\n sequences.append(prefix + str(i))\n if num > len(sequences):\n num = len(sequences)\n choices = random.sample(sequences, num)\n return choices\n\n\ndef popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer'):\n curl_cmd = (\n 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n .format(cert, eth, user, proxy, url))\n subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n try:\n subp.wait(2)\n except Exception as e:\n print('curl_request_timeout, error: ', e)\n return\n if subp.poll() == 0:\n print(subp.communicate()[1])\n else:\n print('curl_request-失败: ', curl_cmd)\n return\n\n\ndef system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer', is_http=False, debug=False):\n \"\"\"\n -I: header request\n -k: skip ssl\n --no-keepalive, keepalive=close\n \"\"\"\n curl_cmd = ''\n debug = False\n if is_http:\n basic_cmd = (\n 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(eth, user, proxy, url)\n else:\n basic_cmd = (\n 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)\n try:\n os_p = os.system(curl_cmd)\n print('curl_cmd=', curl_cmd)\n except Exception as e:\n print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.\n format(curl_cmd, e, url, user))\n return\n\n\ndef get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',\n pre_www='www.'):\n \"\"\"\n 用于url分类测试,测试文件中存放大量的url地址\n :param from_file: str \n :return: list, URL_list(Generator)\n \"\"\"\n txtfile = open(from_file, 'r', encoding='utf-8')\n url_list = txtfile.readlines()\n for i in range(0, len(url_list)):\n url_list[i] = url_list[i].replace('\\n', '')\n if url_index >= 0:\n url_var = url_list[i].split(spliter)[url_index].replace(' ', '')\n protocol_header = url_var[:9].lower()\n if pre_www not in url_var and not ('http://' in protocol_header or\n 'https://' in protocol_header or 'ftp://' in protocol_header):\n url_var = pre_www + url_var\n url_list[i] = url_var\n protocol_header = url_list[i][:9].lower()\n if ('http://' in protocol_header or 'https://' in protocol_header or\n 'ftp://' in protocol_header):\n pass\n else:\n url_list[i] = 'https://' + url_list[i]\n return url_list\n\n\ndef get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,\n eth_num=254):\n \"\"\"\n inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0\n inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253\n sequence: start with 0\n eth_num: eth sequence start with 0\n \"\"\"\n user_index = sequence % user_num + user_start\n eth_index = sequence % eth_num + eth_start\n \"\"\"\n user_index = sequence\n if sequence>user_num: #循环,复用,取余\n user_index = sequence % user_num + user_start\n eth_index = sequence\n if eth_index>eth_num: #循环,复用,取余\n eth_index = eth_index % eth_num + eth_start\n \"\"\"\n return user_index, eth_index\n\n\ndef callback():\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n i = random.randint(start, end)\n return prefix + str(i)\n\n\ndef get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n sequences = []\n for i in range(start, end + 1):\n sequences.append(prefix + str(i))\n if num > len(sequences):\n num = len(sequences)\n choices = random.sample(sequences, num)\n return choices\n\n\ndef popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer'):\n curl_cmd = (\n 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n .format(cert, eth, user, proxy, url))\n subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n try:\n subp.wait(2)\n except Exception as e:\n print('curl_request_timeout, error: ', e)\n return\n if subp.poll() == 0:\n print(subp.communicate()[1])\n else:\n print('curl_request-失败: ', curl_cmd)\n return\n\n\ndef system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer', is_http=False, debug=False):\n \"\"\"\n -I: header request\n -k: skip ssl\n --no-keepalive, keepalive=close\n \"\"\"\n curl_cmd = ''\n debug = False\n if is_http:\n basic_cmd = (\n 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(eth, user, proxy, url)\n else:\n basic_cmd = (\n 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)\n try:\n os_p = os.system(curl_cmd)\n print('curl_cmd=', curl_cmd)\n except Exception as e:\n print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.\n format(curl_cmd, e, url, user))\n return\n\n\ndef get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',\n pre_www='www.'):\n \"\"\"\n 用于url分类测试,测试文件中存放大量的url地址\n :param from_file: str \n :return: list, URL_list(Generator)\n \"\"\"\n txtfile = open(from_file, 'r', encoding='utf-8')\n url_list = txtfile.readlines()\n for i in range(0, len(url_list)):\n url_list[i] = url_list[i].replace('\\n', '')\n if url_index >= 0:\n url_var = url_list[i].split(spliter)[url_index].replace(' ', '')\n protocol_header = url_var[:9].lower()\n if pre_www not in url_var and not ('http://' in protocol_header or\n 'https://' in protocol_header or 'ftp://' in protocol_header):\n url_var = pre_www + url_var\n url_list[i] = url_var\n protocol_header = url_list[i][:9].lower()\n if ('http://' in protocol_header or 'https://' in protocol_header or\n 'ftp://' in protocol_header):\n pass\n else:\n url_list[i] = 'https://' + url_list[i]\n return url_list\n\n\ndef get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,\n eth_num=254):\n \"\"\"\n inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0\n inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253\n sequence: start with 0\n eth_num: eth sequence start with 0\n \"\"\"\n user_index = sequence % user_num + user_start\n eth_index = sequence % eth_num + eth_start\n \"\"\"\n user_index = sequence\n if sequence>user_num: #循环,复用,取余\n user_index = sequence % user_num + user_start\n eth_index = sequence\n if eth_index>eth_num: #循环,复用,取余\n eth_index = eth_index % eth_num + eth_start\n \"\"\"\n return user_index, eth_index\n\n\ndef callback():\n return\n\n\ndef urls_resquests(urls, proxy='172.17.33.23:8080', user_start=300,\n user_num=253, sub_eth_start=0, eth_num=253, ip_prefix='172.18.1.', cert\n ='rootCA.cer', is_same_url=False, is_http=False, debug=False):\n \"\"\"\n one ip/eth<--> one user\n \"\"\"\n i = 0\n for i in range(max(user_num, eth_num)):\n url = ''\n if is_same_url:\n if is_http:\n url = 'http://172.16.0.1'\n else:\n url = 'https://www.baidu.com'\n user_index = i % user_num + user_start\n eth_index = i % eth_num + sub_eth_start\n user = 'userg' + str(user_index)\n eth = 'eth0:' + str(eth_index)\n \"\"\" For debug\n print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))\n print('ip_{0}={1}'.format(i,ip))\n print('eth=',eth)\n print('user=',user)\n print(\"-\" * 50)\n \"\"\"\n system_curl_request(url, user, eth, proxy=proxy, cert=cert, is_http\n =is_http, debug=debug)\n return\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef get_random_ip_or_user(start, end, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n i = random.randint(start, end)\n return prefix + str(i)\n\n\ndef get_random_ips_users(start, end, num, prefix='172.16.90.', type='ip'):\n if type == 'ip' and max(start, end) > 255:\n end = 255\n sequences = []\n for i in range(start, end + 1):\n sequences.append(prefix + str(i))\n if num > len(sequences):\n num = len(sequences)\n choices = random.sample(sequences, num)\n return choices\n\n\ndef popen_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer'):\n curl_cmd = (\n 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n .format(cert, eth, user, proxy, url))\n subp = subprocess.Popen(curl_cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, close_fds=True)\n try:\n subp.wait(2)\n except Exception as e:\n print('curl_request_timeout, error: ', e)\n return\n if subp.poll() == 0:\n print(subp.communicate()[1])\n else:\n print('curl_request-失败: ', curl_cmd)\n return\n\n\ndef system_curl_request(url, user, eth, proxy='172.17.33.23:8080', cert=\n 'rootCA.cer', is_http=False, debug=False):\n \"\"\"\n -I: header request\n -k: skip ssl\n --no-keepalive, keepalive=close\n \"\"\"\n curl_cmd = ''\n debug = False\n if is_http:\n basic_cmd = (\n 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(eth, user, proxy, url)\n else:\n basic_cmd = (\n 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\n )\n if debug:\n pass\n else:\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\n curl_cmd = basic_cmd.format(cert, eth, user, proxy, url)\n try:\n os_p = os.system(curl_cmd)\n print('curl_cmd=', curl_cmd)\n except Exception as e:\n print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.\n format(curl_cmd, e, url, user))\n return\n\n\ndef get_urls_from_file(from_file='url16000.txt', url_index=-1, spliter=',',\n pre_www='www.'):\n \"\"\"\n 用于url分类测试,测试文件中存放大量的url地址\n :param from_file: str \n :return: list, URL_list(Generator)\n \"\"\"\n txtfile = open(from_file, 'r', encoding='utf-8')\n url_list = txtfile.readlines()\n for i in range(0, len(url_list)):\n url_list[i] = url_list[i].replace('\\n', '')\n if url_index >= 0:\n url_var = url_list[i].split(spliter)[url_index].replace(' ', '')\n protocol_header = url_var[:9].lower()\n if pre_www not in url_var and not ('http://' in protocol_header or\n 'https://' in protocol_header or 'ftp://' in protocol_header):\n url_var = pre_www + url_var\n url_list[i] = url_var\n protocol_header = url_list[i][:9].lower()\n if ('http://' in protocol_header or 'https://' in protocol_header or\n 'ftp://' in protocol_header):\n pass\n else:\n url_list[i] = 'https://' + url_list[i]\n return url_list\n\n\ndef get_eth_user_index(sequence=0, user_start=30, user_num=10, eth_start=0,\n eth_num=254):\n \"\"\"\n inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0\n inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253\n sequence: start with 0\n eth_num: eth sequence start with 0\n \"\"\"\n user_index = sequence % user_num + user_start\n eth_index = sequence % eth_num + eth_start\n \"\"\"\n user_index = sequence\n if sequence>user_num: #循环,复用,取余\n user_index = sequence % user_num + user_start\n eth_index = sequence\n if eth_index>eth_num: #循环,复用,取余\n eth_index = eth_index % eth_num + eth_start\n \"\"\"\n return user_index, eth_index\n\n\ndef callback():\n return\n\n\ndef urls_resquests(urls, proxy='172.17.33.23:8080', user_start=300,\n user_num=253, sub_eth_start=0, eth_num=253, ip_prefix='172.18.1.', cert\n ='rootCA.cer', is_same_url=False, is_http=False, debug=False):\n \"\"\"\n one ip/eth<--> one user\n \"\"\"\n i = 0\n for i in range(max(user_num, eth_num)):\n url = ''\n if is_same_url:\n if is_http:\n url = 'http://172.16.0.1'\n else:\n url = 'https://www.baidu.com'\n user_index = i % user_num + user_start\n eth_index = i % eth_num + sub_eth_start\n user = 'userg' + str(user_index)\n eth = 'eth0:' + str(eth_index)\n \"\"\" For debug\n print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))\n print('ip_{0}={1}'.format(i,ip))\n print('eth=',eth)\n print('user=',user)\n print(\"-\" * 50)\n \"\"\"\n system_curl_request(url, user, eth, proxy=proxy, cert=cert, is_http\n =is_http, debug=debug)\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n \"\"\"该Python3脚本用于ASWG做并发认证测试。\n 1、使用方法示例:\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080\"\"\"\n )\n parser.add_argument('-r', '--round', type=int, default=1, help=\n '认证并发测试的测试次数,默认1轮测试即停止')\n parser.add_argument('-s', '--starttime', type=str, default='', help=\n '首次认证并发测试的时间,如 16:20:60')\n parser.add_argument('-t', '--auth-cache-timeout', type=int, default=600,\n help='认证缓存过期时间,默认600秒')\n parser.add_argument('-p', '--aswg-proxy', type=str, default=\n '172.17.33.23:8080', help='ASWG proxy')\n parser.add_argument('-i', '--ip-prefix', type=str, default='172.18.1.',\n help='客户端IP前缀,默认只支持C段;其他方式自行适配')\n parser.add_argument('-u', '--is-same-url', type=bool, default=True,\n help='是否使用相同URL测试')\n parser.add_argument('-u1', '--is-http', type=bool, default=True, help=\n '当指定使用相同URL时,指定是http还是https请求')\n parser.add_argument('-f', '--url-file', type=str, default=\n 'hwurls_top10w.txt', help='urls来源文件')\n parser.add_argument('-f1', '--url-index', type=int, default=0, help=\n 'urls来源文件中字段序号,默认从0开始')\n parser.add_argument('-a0', '--start-user-index', type=int, default=0,\n help='auth 用户的序号,默认从0开始')\n parser.add_argument('-a1', '--user-num', type=int, default=1275, help=\n 'auth 用户数量')\n parser.add_argument('-e0', '--start-eth0-index', type=int, default=0,\n help='开始的子网卡序号,默认从0开始')\n parser.add_argument('-e1', '--sub-eth0-num', type=int, default=1275,\n help='子网卡接口数量,每个接口一个IP地址')\n parser.add_argument('-d', '--is-debug', type=bool, default=False, help=\n '是否开启curl的打印日志')\n args = parser.parse_args()\n max_round = args.round\n first_schedule_time = args.starttime\n now = datetime.datetime.now()\n now_str = now.strftime('%H:%M:%S')\n if first_schedule_time:\n if len(first_schedule_time) == 8 and len(first_schedule_time.split(':')\n ) == 3 and first_schedule_time > now_str:\n pass\n else:\n print('-s或者--starttime 格式不对,请输入大于当前时间字符串,如:16:20:60 ')\n sys.exit()\n else:\n nexttime = now + datetime.timedelta(seconds=60)\n first_schedule_time = nexttime.strftime('%H:%M:%S')\n auth_cache_timeout = args.auth_cache_timeout\n proxy = args.aswg_proxy\n ip_prefix = args.ip_prefix\n is_same_url = args.is_same_url\n is_same_url = True\n url_file = args.url_file\n url_index = args.url_index\n start_user_index = args.start_user_index\n user_num = args.user_num\n start_eth0_index = args.start_eth0_index\n sub_eth0_num = args.sub_eth0_num\n is_debug = args.is_debug\n urls = get_urls_from_file(from_file=url_file, url_index=url_index,\n spliter=',', pre_www='www.')\n print('urls_len=', len(urls))\n print('urls_len=', len(urls))\n i = 0\n user_start = start_user_index\n user_num = user_num\n sub_eth_start = start_eth0_index\n eth_num = sub_eth0_num\n cert = 'rootCA.cer'\n is_http = True\n print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.\n format(max_round, first_schedule_time, auth_cache_timeout))\n round_num = 0\n while True:\n now = datetime.datetime.now()\n time_now = now.strftime('%H:%M:%S')\n if time_now == first_schedule_time:\n print('This_schedule_time={0}, round={1}'.format(\n first_schedule_time, round_num))\n start_time = time.time()\n urls_resquests(urls, proxy=proxy, user_start=user_start,\n user_num=user_num, sub_eth_start=sub_eth_start, eth_num=\n eth_num, ip_prefix=ip_prefix, cert=cert, is_same_url=\n is_same_url, is_http=is_http, debug=is_debug)\n total_sending_time_seconds = time.time() - start_time\n print(\n 'total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'\n .format(total_sending_time_seconds, round_num))\n round_num = round_num + 1\n if round_num >= max_round:\n print('-' * 50)\n print('Finished all test with {0} rounds!!!'.format(max_round))\n break\n else:\n print('-' * 50)\n print(\n 'Please make sure clear cache before the next schedule time!!!'\n )\n last_schedule_time = datetime.datetime.strptime(now.\n strftime('%Y-%m-%d ') + first_schedule_time,\n '%Y-%m-%d %H:%M:%S')\n nexttime = last_schedule_time + datetime.timedelta(seconds=\n auth_cache_timeout + 60)\n first_schedule_time = nexttime.strftime('%H:%M:%S')\n print('Next_schedule_time={0}...'.format(first_schedule_time))\n else:\n pass\n",
"step-5": "#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n#allisnone 20200403\r\n#https://github.com/urllib3/urllib3/issues/1434\r\n#https://github.com/dopstar/requests-ntlm2\r\n#https://github.com/requests/requests-ntlm\r\n\r\n#base on python3\r\n#if you request https website, you need to add ASWG CA to following file:\r\n#/root/.pyenv/versions/3.5.5/lib/python3.5/site-packages/certifi/cacert.pem\r\n#ulimit –n 2000\r\n#pip install requests_ntlm\r\nimport argparse\r\nimport re\r\nimport os\r\nimport csv\r\nimport string,sys,time,datetime\r\nimport requests\r\nfrom requests_toolbelt.adapters import source\r\n#from requests_ntlm import HttpNtlmAuth\r\nimport random\r\nimport subprocess\r\n#import zthreads\r\n\r\ndef get_random_ip_or_user(start,end,prefix='172.16.90.',type='ip'):\r\n if type=='ip' and max(start,end)>255:\r\n end = 255\r\n i = random.randint(start,end)\r\n return prefix + str(i)\r\n\r\ndef get_random_ips_users(start,end,num,prefix='172.16.90.',type='ip'):\r\n if type=='ip' and max(start,end)>255:\r\n end = 255\r\n sequences = []\r\n for i in range(start,end+1):\r\n sequences.append(prefix+str(i))\r\n if num> len(sequences):\r\n num = len(sequences)\r\n choices = random.sample(sequences,num)\r\n return choices\r\n\r\n\r\n\r\ndef popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer'):\r\n curl_cmd = 'curl --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'.format(\r\n cert,eth,user,proxy,url)\r\n subp = subprocess.Popen(curl_cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,close_fds=True)#,encoding=\"utf-8\")\r\n try:\r\n subp.wait(2) #等待超时\r\n except Exception as e:\r\n print('curl_request_timeout, error: ',e)\r\n return\r\n if subp.poll() == 0:\r\n print(subp.communicate()[1])\r\n else:\r\n print(\"curl_request-失败: \",curl_cmd)\r\n return\r\n\r\ndef system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer',is_http=False,debug=False):\r\n \"\"\"\r\n -I: header request\r\n -k: skip ssl\r\n --no-keepalive, keepalive=close\r\n \"\"\"\r\n curl_cmd = ''\r\n debug = False\r\n if is_http:\r\n basic_cmd = 'curl -I --no-keepalive --interface {0} --proxy-user {1}:Firewall1 --proxy-ntlm -x {2} {3} &'\r\n if debug:\r\n pass\r\n else:\r\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\r\n curl_cmd = basic_cmd.format(eth,user,proxy,url)\r\n else:\r\n basic_cmd = 'curl -I --cacert {0} --interface {1} --proxy-user {2}:Firewall1 --proxy-ntlm -x {3} {4} &'\r\n if debug:\r\n pass\r\n else:\r\n basic_cmd = basic_cmd[:-1] + ' > /dev/ull 2>&1 &'\r\n curl_cmd = basic_cmd.format(cert,eth,user,proxy,url)\r\n try:\r\n os_p = os.system(curl_cmd)\r\n print('curl_cmd=',curl_cmd)\r\n except Exception as e:\r\n print('curl_request_timeout: {0}, error: {1}, url={2}, user={3}'.format(curl_cmd,e,url,user))\r\n return\r\n\r\ndef get_urls_from_file(from_file='url16000.txt',url_index=-1,spliter=',',pre_www='www.'):\r\n \"\"\"\r\n 用于url分类测试,测试文件中存放大量的url地址\r\n :param from_file: str \r\n :return: list, URL_list(Generator)\r\n \"\"\"\r\n txtfile = open(from_file, 'r',encoding='utf-8')\r\n url_list = txtfile.readlines()\r\n for i in range(0,len(url_list)):\r\n url_list[i] = url_list[i].replace('\\n','')\r\n # print(url_list[i])\r\n if url_index>=0:\r\n url_var = url_list[i].split(spliter)[url_index].replace(' ','')\r\n #print('url_var=',url_var)\r\n protocol_header = url_var[:9].lower()\r\n if pre_www not in url_var and not (\"http://\" in protocol_header or \"https://\" in protocol_header or \"ftp://\" in protocol_header):\r\n url_var = pre_www + url_var\r\n url_list[i] = url_var\r\n protocol_header = url_list[i][:9].lower()\r\n #print('protocol_header=',protocol_header)\r\n if \"http://\" in protocol_header or \"https://\" in protocol_header or \"ftp://\" in protocol_header:\r\n pass \r\n else: #无协议头部,默认加http协议\r\n url_list[i] = \"https://\" + url_list[i]\r\n return url_list \r\n\r\n\r\ndef get_eth_user_index(sequence=0,user_start=30,user_num=10,eth_start=0,eth_num=254):\r\n \"\"\"\r\n inet 172.18.1.1/16 brd 172.18.255.255 scope global secondary eth0:0\r\n inet 172.18.1.254/16 brd 172.18.255.255 scope global secondary eth0:253\r\n sequence: start with 0\r\n eth_num: eth sequence start with 0\r\n \"\"\"\r\n user_index = sequence % user_num + user_start\r\n eth_index = sequence % eth_num + eth_start\r\n \"\"\"\r\n user_index = sequence\r\n if sequence>user_num: #循环,复用,取余\r\n user_index = sequence % user_num + user_start\r\n eth_index = sequence\r\n if eth_index>eth_num: #循环,复用,取余\r\n eth_index = eth_index % eth_num + eth_start\r\n \"\"\"\r\n return user_index,eth_index\r\n\r\ndef callback():\r\n return\r\n\r\n\r\ndef urls_resquests(urls, proxy='172.17.33.23:8080',user_start=300,user_num=253,sub_eth_start = 0, eth_num=253, \r\n ip_prefix = '172.18.1.', cert='rootCA.cer',is_same_url=False, is_http=False,debug=False):\r\n \"\"\"\r\n one ip/eth<--> one user\r\n \"\"\"\r\n i = 0\r\n #count = max(len(urls),user_num,eth_num)\r\n #for url in urls:\r\n for i in range(max(user_num,eth_num)):\r\n url = ''\r\n if is_same_url:\r\n if is_http:\r\n url = 'http://172.16.0.1' #use the same url for request test\r\n else:\r\n url = 'https://www.baidu.com'\r\n user_index = i % user_num + user_start\r\n eth_index = i % eth_num + sub_eth_start\r\n \r\n #ip = get_random_ip_or_user(start=2,end=254)\r\n \r\n #ip = ip_prefix + str(eth_index + 1)\r\n \r\n #user = get_random_ip_or_user(start=1,end=99,prefix='df64user',type='user')\r\n user = 'userg'+str(user_index)\r\n #eth = get_random_ip_or_user(start=2,end=253,prefix='eth0:',type='user')\r\n eth = 'eth0:'+str(eth_index)\r\n \"\"\" For debug\r\n print('i={0}: user_index={1}, eth_index={2}'.format(i,user_index,eth_index))\r\n print('ip_{0}={1}'.format(i,ip))\r\n print('eth=',eth)\r\n print('user=',user)\r\n print(\"-\" * 50)\r\n \"\"\"\r\n #thread_pool.put(system_curl_request, (url,user,eth,), callback)\r\n #popen_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')\r\n #system_curl_request(url,user,eth,proxy='172.17.33.23:8080',cert='rootCA.cer')\r\n system_curl_request(url,user,eth,proxy=proxy,cert=cert,is_http=is_http,debug=debug)\r\n #i = i + 1\r\n return\r\n \r\n \r\n#\"\"\"\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='该Python3脚本用于ASWG做并发认证测试。\\n 1、使用方法示例:\\n python concurrent_ntlm_auth_requests.py -s 17:45:00 -r 2 -t 120 -p 172.17.33.23:8080') \r\n parser.add_argument('-r','--round', type=int, default=1,help='认证并发测试的测试次数,默认1轮测试即停止')\r\n parser.add_argument('-s','--starttime', type=str, default='',help='首次认证并发测试的时间,如 16:20:60')\r\n parser.add_argument('-t','--auth-cache-timeout', type=int, default=600,help='认证缓存过期时间,默认600秒')\r\n parser.add_argument('-p','--aswg-proxy', type=str, default='172.17.33.23:8080',help='ASWG proxy')\r\n parser.add_argument('-i','--ip-prefix', type=str, default='172.18.1.',help='客户端IP前缀,默认只支持C段;其他方式自行适配')\r\n parser.add_argument('-u','--is-same-url', type=bool, default=True,help='是否使用相同URL测试')\r\n parser.add_argument('-u1','--is-http', type=bool, default=True,help='当指定使用相同URL时,指定是http还是https请求')\r\n parser.add_argument('-f','--url-file', type=str, default='hwurls_top10w.txt',help='urls来源文件')\r\n parser.add_argument('-f1','--url-index', type=int, default=0,help='urls来源文件中字段序号,默认从0开始')\r\n parser.add_argument('-a0','--start-user-index', type=int, default=0,help='auth 用户的序号,默认从0开始')\r\n parser.add_argument('-a1','--user-num', type=int, default=1275,help='auth 用户数量')\r\n parser.add_argument('-e0','--start-eth0-index', type=int, default=0,help='开始的子网卡序号,默认从0开始')\r\n parser.add_argument('-e1','--sub-eth0-num', type=int, default=1275,help='子网卡接口数量,每个接口一个IP地址')\r\n parser.add_argument('-d','--is-debug', type=bool, default=False,help='是否开启curl的打印日志')\r\n args = parser.parse_args()\r\n max_round = args.round\r\n first_schedule_time = args.starttime\r\n now = datetime.datetime.now()\r\n now_str = now.strftime(\"%H:%M:%S\")\r\n if first_schedule_time:\r\n if len(first_schedule_time)==8 and len(first_schedule_time.split(':'))==3 and first_schedule_time > now_str:\r\n pass\r\n else:\r\n print('-s或者--starttime 格式不对,请输入大于当前时间字符串,如:16:20:60 ')\r\n sys.exit()\r\n else:\r\n nexttime = now + datetime.timedelta(seconds=60)\r\n first_schedule_time = nexttime.strftime(\"%H:%M:%S\")\r\n \r\n auth_cache_timeout = args.auth_cache_timeout\r\n proxy = args.aswg_proxy\r\n ip_prefix = args.ip_prefix\r\n is_same_url = args.is_same_url\r\n is_same_url = True\r\n url_file = args.url_file\r\n url_index = args.url_index\r\n start_user_index = args.start_user_index\r\n user_num = args.user_num\r\n start_eth0_index = args.start_eth0_index\r\n sub_eth0_num = args.sub_eth0_num\r\n is_debug = args.is_debug\r\n urls = get_urls_from_file(from_file=url_file,url_index=url_index,spliter=',',pre_www='www.')\r\n #print('urls=',urls)\r\n #url = 'https://www.baidu.com'\r\n print('urls_len=',len(urls))\r\n \r\n #urls = urls[:300]\r\n print('urls_len=',len(urls))\r\n #from zthreads.threadpools.threadpools import Threadpools\r\n #thread_pool = Threadpools(5)\r\n i = 0\r\n #unique_users = 1275\r\n user_start = start_user_index\r\n user_num = user_num\r\n sub_eth_start = start_eth0_index\r\n eth_num = sub_eth0_num\r\n cert = 'rootCA.cer'\r\n is_http = True\r\n #first_schedule_time = \"16:45:00\"\r\n #auth_cache_timeout = 60\r\n #max_round = 2\r\n print('max_round={0}, first_schedule_time={1}, auth_cache_timeout={2}'.format(max_round,first_schedule_time,auth_cache_timeout))\r\n round_num = 0\r\n while True:\r\n #time_now = time.strftime(\"%H:%M:%S\", time.localtime())\r\n now = datetime.datetime.now()\r\n time_now = now.strftime(\"%H:%M:%S\")\r\n if time_now == first_schedule_time: \r\n print('This_schedule_time={0}, round={1}'.format(first_schedule_time,round_num))\r\n start_time = time.time()\r\n urls_resquests(urls, proxy=proxy,user_start=user_start,user_num=user_num,sub_eth_start=sub_eth_start, eth_num=eth_num, \r\n ip_prefix=ip_prefix, cert=cert,is_same_url=is_same_url, is_http=is_http,debug=is_debug)\r\n total_sending_time_seconds = time.time() - start_time \r\n print('total_sending_time_seconds={0}. Finished all url requests for round_{1}!!!'.format(total_sending_time_seconds,round_num))\r\n round_num = round_num + 1\r\n if round_num >= max_round:\r\n print(\"-\" * 50)\r\n print('Finished all test with {0} rounds!!!'.format(max_round))\r\n break\r\n else:\r\n print(\"-\" * 50)\r\n print('Please make sure clear cache before the next schedule time!!!')\r\n #now = datetime.datetime.now()\r\n #date_str = now.strftime(\"%Y-%m-%d \")\r\n #last_schedule_time_str = date_str + first_schedule_time\r\n last_schedule_time = datetime.datetime.strptime(now.strftime(\"%Y-%m-%d \") + first_schedule_time,'%Y-%m-%d %H:%M:%S')\r\n nexttime = last_schedule_time + datetime.timedelta(seconds=auth_cache_timeout+60) # delay 60 seconds\r\n first_schedule_time = nexttime.strftime(\"%H:%M:%S\")\r\n print('Next_schedule_time={0}...'.format(first_schedule_time))\r\n #time.sleep(sleep_time)\r\n else:\r\n #print('time_now=',time_now)\r\n pass\r\n \r\n \r\n #thread_pool.close() \r\n #initial_requests_session(ip=ip,user=ntlm_user)\r\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
classIndex = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.
IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([resized_img_array, classIndex])
except Exception as e:
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
plt.imshow(img_array, cmap='gray')
plt.show()
print(img_array)
print(img_array.shape)
break
break
<|reserved_special_token_0|>
plt.imshow(resized_img_array, cmap='gray')
plt.show()
<|reserved_special_token_0|>
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
classIndex = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.
IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([resized_img_array, classIndex])
except Exception as e:
pass
create_training_data()
print(len(training_data))
<|reserved_special_token_0|>
random.shuffle(training_data)
<|reserved_special_token_0|>
for features, label in training_data:
x.append(features)
y.append(label)
<|reserved_special_token_0|>
pickle.dump(x, pickle_out)
pickle_out.close()
<|reserved_special_token_0|>
pickle.dump(y, pickle_out)
pickle_out.close()
<|reserved_special_token_0|>
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,
layer_size, dense_layer, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer - 1):
model.add(Conv2D(layer_size, (5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Flatten())
for _ in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))
model.compile(loss='categorical_crossentropy', optimizer=
OPTIMIZER, metrics=['accuracy'])
history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=
NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,
callbacks=[tensorboard])
if history.history.get('val_acc')[-1] > max:
max = history.history.get('val_acc')[-1]
if accIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)
val_acc_out.close()
accIndex += 1
pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')
p_upload = pickle.load(pickle_upload)
print(p_upload)
if history.history.get('val_loss')[-1] < min:
min = history.history.get('val_loss')[-1]
if lossIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)
val_loss_out.close()
lossIndex += 1
model.save('64x3-CNN.model')
<|reserved_special_token_0|>
def prepare(filepath):
IMG_SIZE = 299
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
<|reserved_special_token_0|>
print(prediction)
print(prediction[0][0])
print(CATEGORIES[int(prediction[0][0])])
<|reserved_special_token_0|>
print(prediction)
print(CATEGORIES[int(prediction[0][0])])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
DATADIR = 'content/PetImages'
CATEGORIES = ['Cat', 'Dog']
img_array = []
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
plt.imshow(img_array, cmap='gray')
plt.show()
print(img_array)
print(img_array.shape)
break
break
IMG_SIZE = 299
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(resized_img_array, cmap='gray')
plt.show()
training_data = []
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
classIndex = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.
IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([resized_img_array, classIndex])
except Exception as e:
pass
create_training_data()
print(len(training_data))
<|reserved_special_token_0|>
random.shuffle(training_data)
x = []
y = []
for features, label in training_data:
x.append(features)
y.append(label)
x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
pickle_out = open('x.pickle', 'wb')
pickle.dump(x, pickle_out)
pickle_out.close()
pickle_out = open('y.pickle', 'wb')
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open('x.pickle', 'rb')
x = pickle.load(pickle_in)
pickle_in = open('y.pickle', 'rb')
y = pickle.load(pickle_in)
x = x / 255.0
INPUT_SHAPE = x.shape[1:]
DROPOUT = 0.2
NB_CLASSES = 10
NB_EPOCHS = 10
BATCH_SIZE = 128
VALIDATION_SPLIT = 0.2
OPTIMIZER = Adam()
max, min, accIndex, lossIndex = 70.0, 4.0, 1, 1
date = datetime.datetime.now()
dense_layers = [2, 1, 0]
layer_sizes = [512, 256, 128, 64]
conv_layers = [3, 2, 1]
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,
layer_size, dense_layer, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer - 1):
model.add(Conv2D(layer_size, (5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Flatten())
for _ in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))
model.compile(loss='categorical_crossentropy', optimizer=
OPTIMIZER, metrics=['accuracy'])
history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=
NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,
callbacks=[tensorboard])
if history.history.get('val_acc')[-1] > max:
max = history.history.get('val_acc')[-1]
if accIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)
val_acc_out.close()
accIndex += 1
pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')
p_upload = pickle.load(pickle_upload)
print(p_upload)
if history.history.get('val_loss')[-1] < min:
min = history.history.get('val_loss')[-1]
if lossIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)
val_loss_out.close()
lossIndex += 1
model.save('64x3-CNN.model')
CATEGORIES = ['Dog', 'Cat']
def prepare(filepath):
IMG_SIZE = 299
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
model = tf.keras.models.load_model('64x3-CNN.model')
prediction = model.predict([prepare('dog.jpg')])
print(prediction)
print(prediction[0][0])
print(CATEGORIES[int(prediction[0][0])])
prediction = model.predict([prepare('cat.jpg')])
print(prediction)
print(CATEGORIES[int(prediction[0][0])])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import pickle
import random
import datetime
import tensorflow as tf
from tensorflow.python.keras.datasets import cifar10
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import TensorBoard
DATADIR = 'content/PetImages'
CATEGORIES = ['Cat', 'Dog']
img_array = []
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
plt.imshow(img_array, cmap='gray')
plt.show()
print(img_array)
print(img_array.shape)
break
break
IMG_SIZE = 299
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(resized_img_array, cmap='gray')
plt.show()
training_data = []
def create_training_data():
for category in CATEGORIES:
path = os.path.join(DATADIR, category)
classIndex = CATEGORIES.index(category)
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.
IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([resized_img_array, classIndex])
except Exception as e:
pass
create_training_data()
print(len(training_data))
<|reserved_special_token_0|>
random.shuffle(training_data)
x = []
y = []
for features, label in training_data:
x.append(features)
y.append(label)
x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3)
pickle_out = open('x.pickle', 'wb')
pickle.dump(x, pickle_out)
pickle_out.close()
pickle_out = open('y.pickle', 'wb')
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open('x.pickle', 'rb')
x = pickle.load(pickle_in)
pickle_in = open('y.pickle', 'rb')
y = pickle.load(pickle_in)
x = x / 255.0
INPUT_SHAPE = x.shape[1:]
DROPOUT = 0.2
NB_CLASSES = 10
NB_EPOCHS = 10
BATCH_SIZE = 128
VALIDATION_SPLIT = 0.2
OPTIMIZER = Adam()
max, min, accIndex, lossIndex = 70.0, 4.0, 1, 1
date = datetime.datetime.now()
dense_layers = [2, 1, 0]
layer_sizes = [512, 256, 128, 64]
conv_layers = [3, 2, 1]
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,
layer_size, dense_layer, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer - 1):
model.add(Conv2D(layer_size, (5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Flatten())
for _ in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))
model.compile(loss='categorical_crossentropy', optimizer=
OPTIMIZER, metrics=['accuracy'])
history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=
NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,
callbacks=[tensorboard])
if history.history.get('val_acc')[-1] > max:
max = history.history.get('val_acc')[-1]
if accIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,
round(max, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)
val_acc_out.close()
accIndex += 1
pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')
p_upload = pickle.load(pickle_upload)
print(p_upload)
if history.history.get('val_loss')[-1] < min:
min = history.history.get('val_loss')[-1]
if lossIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,
round(min, 4), CBP[0], CBP[1], CBP[2],
f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)
val_loss_out.close()
lossIndex += 1
model.save('64x3-CNN.model')
CATEGORIES = ['Dog', 'Cat']
def prepare(filepath):
IMG_SIZE = 299
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)
model = tf.keras.models.load_model('64x3-CNN.model')
prediction = model.predict([prepare('dog.jpg')])
print(prediction)
print(prediction[0][0])
print(CATEGORIES[int(prediction[0][0])])
prediction = model.predict([prepare('cat.jpg')])
print(prediction)
print(CATEGORIES[int(prediction[0][0])])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
'''
!pip install wget
from zipfile import ZipFile
import wget
print('Beginning file downlaod with wget module')
url = 'https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip'
wget.download(url, 'sample_data/')
print('2. Extract all files in ZIP to different directory')
# Create a ZipFile Object and load sample.zip in it
with ZipFile('sample_data/kagglecatsanddogs_3367a.zip', 'r') as zipObj:
# Extract all the contents of zip file in different directory
zipObj.extractall('content/')
'''
import numpy as np
import matplotlib.pyplot as plt
import os
import cv2
import pickle
import random
import datetime
import tensorflow as tf
from tensorflow.python.keras.datasets import cifar10
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout
from tensorflow.python.keras.layers import Conv2D, MaxPooling2D
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import TensorBoard
DATADIR = 'content/PetImages'
CATEGORIES = ['Cat', 'Dog'] #'''categories that we have to deal with'''
img_array= []
for category in CATEGORIES:
path = os.path.join(DATADIR, category) # path to cats and dogs dir
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
plt.imshow(img_array, cmap='gray')
plt.show()
print(img_array)
print(img_array.shape)
break
break
IMG_SIZE = 299 #every image of 299x299
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
plt.imshow(resized_img_array, cmap='gray') # cmap = hot, plasma, cool,
plt.show()
training_data = []
def create_training_data(): # creating training datasets
for category in CATEGORIES:
path = os.path.join(DATADIR, category) # path to cats and dogs dir
classIndex = CATEGORIES.index(category) # 0 for dog and 1 for cat
for img in os.listdir(path):
try:
img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
training_data.append([resized_img_array, classIndex])
except Exception as e:
pass
create_training_data()
print(len(training_data))
'''shuffle training data'''
random.shuffle(training_data)
# for sample in training_data[:10]:
# print(sample[1])
x=[]
y=[]
for features, label in training_data:
x.append(features)
y.append(label)
x = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3) #we can't pass a list to keras for training
#'''we have to pass here a numpy array '''
# print(x[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))
pickle_out = open("x.pickle", 'wb')
pickle.dump(x, pickle_out)
pickle_out.close()
pickle_out= open('y.pickle', 'wb')
pickle.dump(y, pickle_out)
pickle_out.close()
pickle_in = open('x.pickle', 'rb')
x = pickle.load(pickle_in)
pickle_in = open('y.pickle', 'rb')
y = pickle.load(pickle_in)
x = x / 255.0
INPUT_SHAPE = x.shape[1:]#(224, 224, 3)
DROPOUT=0.2
NB_CLASSES=10
NB_EPOCHS=10
BATCH_SIZE=128
VALIDATION_SPLIT=0.2
OPTIMIZER = Adam()
max, min, accIndex , lossIndex=70.0 , 4.0, 1, 1
date = datetime.datetime.now()
dense_layers = [2, 1, 0] # 0, 1,2
layer_sizes = [512, 256, 128, 64] #32, 64, 128, 256, 512
conv_layers = [3, 2, 1] # 1, 2,3
for dense_layer in dense_layers:
for layer_size in layer_sizes:
for conv_layer in conv_layers:
NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time()))
print(NAME)
model = Sequential()
model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
for l in range(conv_layer-1):
model.add(Conv2D(layer_size, (5, 5)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(DROPOUT))
model.add(Flatten())
for _ in range(dense_layer):
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(DROPOUT))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
model.compile(loss='categorical_crossentropy',
optimizer=OPTIMIZER,
metrics=['accuracy'],
)
history = model.fit(x, y,
batch_size=BATCH_SIZE,
epochs=NB_EPOCHS,
validation_split=VALIDATION_SPLIT,
verbose=1,
callbacks=[tensorboard])
if history.history.get('val_acc')[-1] > max:
max = history.history.get('val_acc')[-1]
if accIndex >= 2:
os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex-1, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"), "wb")
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")),
val_acc_out)
val_acc_out.close()
accIndex += 1
pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')
p_upload = pickle.load(pickle_upload)
print(p_upload)
if history.history.get('val_loss')[-1] < min:
min = history.history.get('val_loss')[-1]
if lossIndex>=2:
os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex-1, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}"))
pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f":{date:%Y-%m-%d-%Hh%Mm%Ss}")),
val_loss_out)
val_loss_out.close()
lossIndex += 1
model.save('64x3-CNN.model')
CATEGORIES = ["Dog", "Cat"] # will use this to convert prediction num to string value
def prepare(filepath):
IMG_SIZE = 299 # 50 in txt-based
img_array = cv2.imread(filepath, cv2.IMREAD_COLOR) # read in the image, convert to grayscale
resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing
return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3) # return the image with shaping that TF wants.
model = tf.keras.models.load_model("64x3-CNN.model")
prediction = model.predict([prepare('dog.jpg')]) # REMEMBER YOU'RE PASSING A LIST OF THINGS YOU WISH TO PREDICT
print(prediction)
print(prediction[0][0])
print(CATEGORIES[int(prediction[0][0])])
#We can also test our cat example:
prediction = model.predict([prepare('cat.jpg')])
print(prediction) # will be a list in a list.
print(CATEGORIES[int(prediction[0][0])])
'''
alpha. Also referred to as the learning rate or step size. The proportion that weights are updated (e.g. 0.001). Larger values (e.g. 0.3) results in faster initial learning before the rate is updated. Smaller values (e.g. 1.0E-5) slow learning right down during training
beta1. The exponential decay rate for the first moment estimates (e.g. 0.9).
beta2. The exponential decay rate for the second-moment estimates (e.g. 0.999). This value should be set close to 1.0 on problems with a sparse gradient (e.g. NLP and computer vision problems).
epsilon. Is a very small number to prevent any division by zero in the implementation (e.g. 10E-8).
We can see that the popular deep learning libraries generally use the default parameters recommended by the paper.
TensorFlow: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08.
Keras: lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0.
Blocks: learning_rate=0.002, beta1=0.9, beta2=0.999, epsilon=1e-08, decay_factor=1.
Lasagne: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08
Caffe: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08
MxNet: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8
Torch: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8
'''
|
flexible
|
{
"blob_id": "13c9f0f58ec6da317c3802f594bb0db7c275dee9",
"index": 21,
"step-1": "<mask token>\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n print(img_array)\n print(img_array.shape)\n break\n break\n<mask token>\nplt.imshow(resized_img_array, cmap='gray')\nplt.show()\n<mask token>\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\ncreate_training_data()\nprint(len(training_data))\n<mask token>\nrandom.shuffle(training_data)\n<mask token>\nfor features, label in training_data:\n x.append(features)\n y.append(label)\n<mask token>\npickle.dump(x, pickle_out)\npickle_out.close()\n<mask token>\npickle.dump(y, pickle_out)\npickle_out.close()\n<mask token>\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,\n layer_size, dense_layer, int(time.time()))\n print(NAME)\n model = Sequential()\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n for l in range(conv_layer - 1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n model.add(Flatten())\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))\n model.compile(loss='categorical_crossentropy', optimizer=\n OPTIMIZER, metrics=['accuracy'])\n history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=\n NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)\n val_acc_out.close()\n accIndex += 1\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)\n val_loss_out.close()\n lossIndex += 1\nmodel.save('64x3-CNN.model')\n<mask token>\n\n\ndef prepare(filepath):\n IMG_SIZE = 299\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\n\n<mask token>\nprint(prediction)\nprint(prediction[0][0])\nprint(CATEGORIES[int(prediction[0][0])])\n<mask token>\nprint(prediction)\nprint(CATEGORIES[int(prediction[0][0])])\n<mask token>\n",
"step-3": "<mask token>\nDATADIR = 'content/PetImages'\nCATEGORIES = ['Cat', 'Dog']\nimg_array = []\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n print(img_array)\n print(img_array.shape)\n break\n break\nIMG_SIZE = 299\nresized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\nplt.imshow(resized_img_array, cmap='gray')\nplt.show()\ntraining_data = []\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\ncreate_training_data()\nprint(len(training_data))\n<mask token>\nrandom.shuffle(training_data)\nx = []\ny = []\nfor features, label in training_data:\n x.append(features)\n y.append(label)\nx = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3)\npickle_out = open('x.pickle', 'wb')\npickle.dump(x, pickle_out)\npickle_out.close()\npickle_out = open('y.pickle', 'wb')\npickle.dump(y, pickle_out)\npickle_out.close()\npickle_in = open('x.pickle', 'rb')\nx = pickle.load(pickle_in)\npickle_in = open('y.pickle', 'rb')\ny = pickle.load(pickle_in)\nx = x / 255.0\nINPUT_SHAPE = x.shape[1:]\nDROPOUT = 0.2\nNB_CLASSES = 10\nNB_EPOCHS = 10\nBATCH_SIZE = 128\nVALIDATION_SPLIT = 0.2\nOPTIMIZER = Adam()\nmax, min, accIndex, lossIndex = 70.0, 4.0, 1, 1\ndate = datetime.datetime.now()\ndense_layers = [2, 1, 0]\nlayer_sizes = [512, 256, 128, 64]\nconv_layers = [3, 2, 1]\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,\n layer_size, dense_layer, int(time.time()))\n print(NAME)\n model = Sequential()\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n for l in range(conv_layer - 1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n model.add(Flatten())\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))\n model.compile(loss='categorical_crossentropy', optimizer=\n OPTIMIZER, metrics=['accuracy'])\n history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=\n NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)\n val_acc_out.close()\n accIndex += 1\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)\n val_loss_out.close()\n lossIndex += 1\nmodel.save('64x3-CNN.model')\nCATEGORIES = ['Dog', 'Cat']\n\n\ndef prepare(filepath):\n IMG_SIZE = 299\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\n\nmodel = tf.keras.models.load_model('64x3-CNN.model')\nprediction = model.predict([prepare('dog.jpg')])\nprint(prediction)\nprint(prediction[0][0])\nprint(CATEGORIES[int(prediction[0][0])])\nprediction = model.predict([prepare('cat.jpg')])\nprint(prediction)\nprint(CATEGORIES[int(prediction[0][0])])\n<mask token>\n",
"step-4": "<mask token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\nimport pickle\nimport random\nimport datetime\nimport tensorflow as tf\nfrom tensorflow.python.keras.datasets import cifar10\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout\nfrom tensorflow.python.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.python.keras.optimizers import Adam\nfrom tensorflow.python.keras.callbacks import TensorBoard\nDATADIR = 'content/PetImages'\nCATEGORIES = ['Cat', 'Dog']\nimg_array = []\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n print(img_array)\n print(img_array.shape)\n break\n break\nIMG_SIZE = 299\nresized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\nplt.imshow(resized_img_array, cmap='gray')\nplt.show()\ntraining_data = []\n\n\ndef create_training_data():\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category)\n classIndex = CATEGORIES.index(category)\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.\n IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\n\ncreate_training_data()\nprint(len(training_data))\n<mask token>\nrandom.shuffle(training_data)\nx = []\ny = []\nfor features, label in training_data:\n x.append(features)\n y.append(label)\nx = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3)\npickle_out = open('x.pickle', 'wb')\npickle.dump(x, pickle_out)\npickle_out.close()\npickle_out = open('y.pickle', 'wb')\npickle.dump(y, pickle_out)\npickle_out.close()\npickle_in = open('x.pickle', 'rb')\nx = pickle.load(pickle_in)\npickle_in = open('y.pickle', 'rb')\ny = pickle.load(pickle_in)\nx = x / 255.0\nINPUT_SHAPE = x.shape[1:]\nDROPOUT = 0.2\nNB_CLASSES = 10\nNB_EPOCHS = 10\nBATCH_SIZE = 128\nVALIDATION_SPLIT = 0.2\nOPTIMIZER = Adam()\nmax, min, accIndex, lossIndex = 70.0, 4.0, 1, 1\ndate = datetime.datetime.now()\ndense_layers = [2, 1, 0]\nlayer_sizes = [512, 256, 128, 64]\nconv_layers = [3, 2, 1]\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = '{}-conv-{}-nodes-{}-dense-{}'.format(conv_layer,\n layer_size, dense_layer, int(time.time()))\n print(NAME)\n model = Sequential()\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n for l in range(conv_layer - 1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n model.add(Flatten())\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))\n model.compile(loss='categorical_crossentropy', optimizer=\n OPTIMIZER, metrics=['accuracy'])\n history = model.fit(x, y, batch_size=BATCH_SIZE, epochs=\n NB_EPOCHS, validation_split=VALIDATION_SPLIT, verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex - 1,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'), 'wb')\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex,\n round(max, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_acc_out)\n val_acc_out.close()\n accIndex += 1\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex - 1,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}'))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex,\n round(min, 4), CBP[0], CBP[1], CBP[2],\n f':{date:%Y-%m-%d-%Hh%Mm%Ss}')), val_loss_out)\n val_loss_out.close()\n lossIndex += 1\nmodel.save('64x3-CNN.model')\nCATEGORIES = ['Dog', 'Cat']\n\n\ndef prepare(filepath):\n IMG_SIZE = 299\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR)\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3)\n\n\nmodel = tf.keras.models.load_model('64x3-CNN.model')\nprediction = model.predict([prepare('dog.jpg')])\nprint(prediction)\nprint(prediction[0][0])\nprint(CATEGORIES[int(prediction[0][0])])\nprediction = model.predict([prepare('cat.jpg')])\nprint(prediction)\nprint(CATEGORIES[int(prediction[0][0])])\n<mask token>\n",
"step-5": "'''\n!pip install wget\nfrom zipfile import ZipFile\nimport wget\nprint('Beginning file downlaod with wget module')\n\nurl = 'https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip'\nwget.download(url, 'sample_data/')\n\n\nprint('2. Extract all files in ZIP to different directory')\n\n # Create a ZipFile Object and load sample.zip in it\nwith ZipFile('sample_data/kagglecatsanddogs_3367a.zip', 'r') as zipObj:\n # Extract all the contents of zip file in different directory\n zipObj.extractall('content/')\n\n'''\n\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport cv2\nimport pickle\nimport random\nimport datetime\nimport tensorflow as tf\nfrom tensorflow.python.keras.datasets import cifar10\nfrom tensorflow.python.keras.preprocessing.image import ImageDataGenerator\n\n\nfrom tensorflow.python.keras.models import Sequential\nfrom tensorflow.python.keras.layers import Activation, Dense, Flatten, Dropout\nfrom tensorflow.python.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.python.keras.optimizers import Adam\n\nfrom tensorflow.python.keras.callbacks import TensorBoard\n\n\nDATADIR = 'content/PetImages'\nCATEGORIES = ['Cat', 'Dog'] #'''categories that we have to deal with'''\nimg_array= []\n\nfor category in CATEGORIES:\n path = os.path.join(DATADIR, category) # path to cats and dogs dir\n for img in os.listdir(path):\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n plt.imshow(img_array, cmap='gray')\n plt.show()\n\n print(img_array)\n print(img_array.shape)\n\n break\n break\n\n\nIMG_SIZE = 299 #every image of 299x299\nresized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\nplt.imshow(resized_img_array, cmap='gray') # cmap = hot, plasma, cool,\nplt.show()\n\n\ntraining_data = []\ndef create_training_data(): # creating training datasets\n for category in CATEGORIES:\n path = os.path.join(DATADIR, category) # path to cats and dogs dir\n\n classIndex = CATEGORIES.index(category) # 0 for dog and 1 for cat\n\n for img in os.listdir(path):\n try:\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)\n\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))\n training_data.append([resized_img_array, classIndex])\n except Exception as e:\n pass\n\ncreate_training_data()\n\nprint(len(training_data))\n\n\n\n'''shuffle training data'''\nrandom.shuffle(training_data)\n\n\n\n# for sample in training_data[:10]:\n# print(sample[1])\n\n\n\nx=[]\ny=[]\nfor features, label in training_data:\n x.append(features)\n y.append(label)\nx = np.array(x).reshape(-1, IMG_SIZE, IMG_SIZE, 3) #we can't pass a list to keras for training\n #'''we have to pass here a numpy array '''\n\n# print(x[0].reshape(-1, IMG_SIZE, IMG_SIZE, 1))\n\n\npickle_out = open(\"x.pickle\", 'wb')\npickle.dump(x, pickle_out)\npickle_out.close()\n\npickle_out= open('y.pickle', 'wb')\npickle.dump(y, pickle_out)\npickle_out.close()\n\npickle_in = open('x.pickle', 'rb')\nx = pickle.load(pickle_in)\npickle_in = open('y.pickle', 'rb')\ny = pickle.load(pickle_in)\n\n\nx = x / 255.0\nINPUT_SHAPE = x.shape[1:]#(224, 224, 3)\nDROPOUT=0.2\nNB_CLASSES=10\nNB_EPOCHS=10\nBATCH_SIZE=128\nVALIDATION_SPLIT=0.2\nOPTIMIZER = Adam()\n\n\nmax, min, accIndex , lossIndex=70.0 , 4.0, 1, 1\ndate = datetime.datetime.now()\n\ndense_layers = [2, 1, 0] # 0, 1,2\nlayer_sizes = [512, 256, 128, 64] #32, 64, 128, 256, 512\nconv_layers = [3, 2, 1] # 1, 2,3\n\nfor dense_layer in dense_layers:\n for layer_size in layer_sizes:\n for conv_layer in conv_layers:\n NAME = \"{}-conv-{}-nodes-{}-dense-{}\".format(conv_layer, layer_size, dense_layer, int(time.time()))\n print(NAME)\n\n model = Sequential()\n\n model.add(Conv2D(layer_size, (3, 3), input_shape=INPUT_SHAPE))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n for l in range(conv_layer-1):\n model.add(Conv2D(layer_size, (5, 5)))\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(DROPOUT))\n\n model.add(Flatten())\n\n for _ in range(dense_layer):\n model.add(Dense(layer_size))\n model.add(Activation('relu'))\n model.add(Dropout(DROPOUT))\n\n model.add(Dense(NB_CLASSES))\n model.add(Activation('softmax'))\n\n tensorboard = TensorBoard(log_dir=\"logs/{}\".format(NAME))\n\n model.compile(loss='categorical_crossentropy',\n optimizer=OPTIMIZER,\n metrics=['accuracy'],\n )\n\n history = model.fit(x, y,\n batch_size=BATCH_SIZE,\n epochs=NB_EPOCHS,\n validation_split=VALIDATION_SPLIT,\n verbose=1,\n callbacks=[tensorboard])\n if history.history.get('val_acc')[-1] > max:\n max = history.history.get('val_acc')[-1]\n if accIndex >= 2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(accIndex-1, round(max, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"))\n val_acc_out = open('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"), \"wb\")\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(accIndex, round(max, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\")),\n val_acc_out)\n val_acc_out.close()\n accIndex += 1\n\n pickle_upload = open('{}_pickle'.format(accIndex - 1), 'rb')\n p_upload = pickle.load(pickle_upload)\n print(p_upload)\n\n\n if history.history.get('val_loss')[-1] < min:\n min = history.history.get('val_loss')[-1]\n if lossIndex>=2:\n os.remove('{}_{}_{}_{}_{}_{}'.format(lossIndex-1, round(min, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"))\n val_loss_out = open('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\"))\n pickle.dump(model.save('{}_{}_{}_{}_{}_{}'.format(lossIndex, round(min, 4), CBP[0], CBP[1], CBP[2], f\":{date:%Y-%m-%d-%Hh%Mm%Ss}\")),\n val_loss_out)\n val_loss_out.close()\n lossIndex += 1\n\n\n\n\nmodel.save('64x3-CNN.model')\n\n\nCATEGORIES = [\"Dog\", \"Cat\"] # will use this to convert prediction num to string value\n\n\ndef prepare(filepath):\n IMG_SIZE = 299 # 50 in txt-based\n img_array = cv2.imread(filepath, cv2.IMREAD_COLOR) # read in the image, convert to grayscale\n resized_img_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE)) # resize image to match model's expected sizing\n return resized_img_array.reshape(-1, IMG_SIZE, IMG_SIZE, 3) # return the image with shaping that TF wants.\n\n\nmodel = tf.keras.models.load_model(\"64x3-CNN.model\")\nprediction = model.predict([prepare('dog.jpg')]) # REMEMBER YOU'RE PASSING A LIST OF THINGS YOU WISH TO PREDICT\nprint(prediction)\nprint(prediction[0][0])\n\nprint(CATEGORIES[int(prediction[0][0])])\n\n\n#We can also test our cat example:\n\nprediction = model.predict([prepare('cat.jpg')])\nprint(prediction) # will be a list in a list.\nprint(CATEGORIES[int(prediction[0][0])])\n\n\n\n'''\nalpha. Also referred to as the learning rate or step size. The proportion that weights are updated (e.g. 0.001). Larger values (e.g. 0.3) results in faster initial learning before the rate is updated. Smaller values (e.g. 1.0E-5) slow learning right down during training\nbeta1. The exponential decay rate for the first moment estimates (e.g. 0.9).\nbeta2. The exponential decay rate for the second-moment estimates (e.g. 0.999). This value should be set close to 1.0 on problems with a sparse gradient (e.g. NLP and computer vision problems).\nepsilon. Is a very small number to prevent any division by zero in the implementation (e.g. 10E-8).\n\nWe can see that the popular deep learning libraries generally use the default parameters recommended by the paper.\n\nTensorFlow: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08.\nKeras: lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0.\nBlocks: learning_rate=0.002, beta1=0.9, beta2=0.999, epsilon=1e-08, decay_factor=1.\nLasagne: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08\nCaffe: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08\nMxNet: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8\nTorch: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8\n\n\n\n\n'''",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class DetailView(generic.DetailView):
model = Project
template_name = 'projects/detail.html'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IndexView(generic.ListView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_queryset(self):
"""Return all projects."""
return Project.objects.all()
class DetailView(generic.DetailView):
model = Project
template_name = 'projects/detail.html'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class IndexView(generic.ListView):
template_name = 'projects/index.html'
context_object_name = 'projectz'
def get_queryset(self):
"""Return all projects."""
return Project.objects.all()
class DetailView(generic.DetailView):
model = Project
template_name = 'projects/detail.html'
<|reserved_special_token_1|>
from django.views import generic
from .models import Project
class IndexView(generic.ListView):
template_name = 'projects/index.html'
context_object_name = 'projectz'
def get_queryset(self):
"""Return all projects."""
return Project.objects.all()
class DetailView(generic.DetailView):
model = Project
template_name = 'projects/detail.html'
<|reserved_special_token_1|>
from django.views import generic
from .models import Project
class IndexView(generic.ListView):
template_name = "projects/index.html"
context_object_name = "projectz"
def get_queryset(self):
"""Return all projects."""
return Project.objects.all()
class DetailView(generic.DetailView):
model = Project
template_name = "projects/detail.html"
|
flexible
|
{
"blob_id": "23d15c719cd26ea67a032a91a3e73f0d8d3bcfd1",
"index": 6662,
"step-1": "<mask token>\n\n\nclass DetailView(generic.DetailView):\n model = Project\n template_name = 'projects/detail.html'\n",
"step-2": "<mask token>\n\n\nclass IndexView(generic.ListView):\n <mask token>\n <mask token>\n\n def get_queryset(self):\n \"\"\"Return all projects.\"\"\"\n return Project.objects.all()\n\n\nclass DetailView(generic.DetailView):\n model = Project\n template_name = 'projects/detail.html'\n",
"step-3": "<mask token>\n\n\nclass IndexView(generic.ListView):\n template_name = 'projects/index.html'\n context_object_name = 'projectz'\n\n def get_queryset(self):\n \"\"\"Return all projects.\"\"\"\n return Project.objects.all()\n\n\nclass DetailView(generic.DetailView):\n model = Project\n template_name = 'projects/detail.html'\n",
"step-4": "from django.views import generic\nfrom .models import Project\n\n\nclass IndexView(generic.ListView):\n template_name = 'projects/index.html'\n context_object_name = 'projectz'\n\n def get_queryset(self):\n \"\"\"Return all projects.\"\"\"\n return Project.objects.all()\n\n\nclass DetailView(generic.DetailView):\n model = Project\n template_name = 'projects/detail.html'\n",
"step-5": "from django.views import generic\n\nfrom .models import Project\n\n\nclass IndexView(generic.ListView):\n template_name = \"projects/index.html\"\n context_object_name = \"projectz\"\n\n def get_queryset(self):\n \"\"\"Return all projects.\"\"\"\n return Project.objects.all()\n\nclass DetailView(generic.DetailView):\n model = Project\n template_name = \"projects/detail.html\"",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
#from getData import getRatings
import numpy as np
num_factors = 10
num_iter = 75
regularization = 0.05
lr = 0.005
folds=5
#to make sure you are able to repeat results, set the random seed to something:
np.random.seed(17)
def split_matrix(ratings, num_users, num_movies):
#Convert data into (IxJ) matrix
X= np.zeros((num_users, num_movies))
for r in np.arange(len(ratings)):
X[ratings[r,0]-1,ratings[r,1]-1] = ratings[r,2]
#print(X.shape)
return X
def mf_gd(ratings, num_users, num_movies):
X_data= split_matrix(ratings, num_users, num_movies)
X_hat = np.zeros(num_users, num_movies) #predicted rating matrix
err = np.zeros(num_users, num_movies) #error values
# Randomly initialize weights in U and M
U = np.random.rand(num_users, num_factors)
M = np.random.rand(num_factors, num_movies)
U_prime = U
M_prime = M
for nr in np.arange(num_iter):
for i in np.arange(len(ratings)):
userID = ratings[i,0]-1
movieID = ratings[i,1]-1
actual = ratings[i,2]
prediction = np.sum(U[userID,:]*M[:,movieID]) #SVD
error = actual - prediction #compute e(i,j)
#update U and M using following equations:
#Uprime(i,k) = u(i,k) + lr(2e*m(k,j)-lamda.u(i,k))
#Mprime(k,j) = m(k,j) + lr(2e*u(i,k)-lamda.m(k,j))
for k in np.arange(num_factors):
U_prime[userID,k] = U[userID,k]+ lr * (2*error*M[k,movieID] - regularization * U[userID,k])
M_prime[k,movieID] = M[k,movieID] + lr * (2*error*U[userID,k] - regularization * M[k,movieID])
U = U_prime
M = M_prime
#Intermediate RMSE
X_hat = np.dot(U,M)
err = X_data-X_hat
e = err[np.where(np.isnan(err)==False)]
ir = np.sqrt(np.mean(e**2))
print ("Error for iteration #", nr, ":", ir)
#Return the result
X_hat = np.dot(U,M)
return X_hat
def mf():
#Read dataset
#ratings = getRatings()
ratings = np.genfromtxt("D:/Leiden/Semester 1_Sept/Assignment1/AiDM/ml-1m/ratings.dat", usecols=(0,1,2), delimiter='::',dtype='int')
#number of users and movies in data.
num_users= np.max(ratings[:,0])
num_movies= np.max(ratings[:,1])
print(num_users, num_movies)
print(len(ratings))
#5-fold cross validation
for f in np.arange(folds):
print ("Fold #", f)
#shuffle data for train and test
np.random.shuffle(ratings)
train_set = np.array([ratings[x] for x in np.arange(len(ratings)) if (x%folds) !=f])
test_set = np.array([ratings[x] for x in np.arange(len(ratings)) if (x%folds) == f])
#Matrix fact
X_hat = mf_gd(train_set, num_users, num_movies)
X_train = split_matrix(train_set, num_users, num_movies)
X_test = split_matrix(test_set, num_users, num_movies)
err_train = X_train- X_hat
err_test = X_test - X_hat
#RMSE
e_mf = err_train[np.where(np.isnan(err_train)==False)]
error_train_mf = np.sqrt(np.mean(e_mf**2))
e2_mf = err_test[np.where(np.isnan(err_test)==False)]
error_test_mf = np.sqrt(np.mean(e2_mf**2))
print ('Matrix Factorization Error -> training set: ', error_train_mf)
print ('Matrix Factorization Error -> test set: ', error_test_mf)
mf()
#Still getting a high error rate, not comparable to the website mentioned in the assignment doc.
# I need to check the logic again.
#https://medium.com/coinmonks/recommendation-engine-python-401c080c583e; followed this blogpost
|
normal
|
{
"blob_id": "b4267612e7939b635542099e1ba31e661720607a",
"index": 3129,
"step-1": "<mask token>\n\n\ndef split_matrix(ratings, num_users, num_movies):\n X = np.zeros((num_users, num_movies))\n for r in np.arange(len(ratings)):\n X[ratings[r, 0] - 1, ratings[r, 1] - 1] = ratings[r, 2]\n return X\n\n\ndef mf_gd(ratings, num_users, num_movies):\n X_data = split_matrix(ratings, num_users, num_movies)\n X_hat = np.zeros(num_users, num_movies)\n err = np.zeros(num_users, num_movies)\n U = np.random.rand(num_users, num_factors)\n M = np.random.rand(num_factors, num_movies)\n U_prime = U\n M_prime = M\n for nr in np.arange(num_iter):\n for i in np.arange(len(ratings)):\n userID = ratings[i, 0] - 1\n movieID = ratings[i, 1] - 1\n actual = ratings[i, 2]\n prediction = np.sum(U[userID, :] * M[:, movieID])\n error = actual - prediction\n for k in np.arange(num_factors):\n U_prime[userID, k] = U[userID, k] + lr * (2 * error * M[k,\n movieID] - regularization * U[userID, k])\n M_prime[k, movieID] = M[k, movieID] + lr * (2 * error * U[\n userID, k] - regularization * M[k, movieID])\n U = U_prime\n M = M_prime\n X_hat = np.dot(U, M)\n err = X_data - X_hat\n e = err[np.where(np.isnan(err) == False)]\n ir = np.sqrt(np.mean(e ** 2))\n print('Error for iteration #', nr, ':', ir)\n X_hat = np.dot(U, M)\n return X_hat\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef split_matrix(ratings, num_users, num_movies):\n X = np.zeros((num_users, num_movies))\n for r in np.arange(len(ratings)):\n X[ratings[r, 0] - 1, ratings[r, 1] - 1] = ratings[r, 2]\n return X\n\n\ndef mf_gd(ratings, num_users, num_movies):\n X_data = split_matrix(ratings, num_users, num_movies)\n X_hat = np.zeros(num_users, num_movies)\n err = np.zeros(num_users, num_movies)\n U = np.random.rand(num_users, num_factors)\n M = np.random.rand(num_factors, num_movies)\n U_prime = U\n M_prime = M\n for nr in np.arange(num_iter):\n for i in np.arange(len(ratings)):\n userID = ratings[i, 0] - 1\n movieID = ratings[i, 1] - 1\n actual = ratings[i, 2]\n prediction = np.sum(U[userID, :] * M[:, movieID])\n error = actual - prediction\n for k in np.arange(num_factors):\n U_prime[userID, k] = U[userID, k] + lr * (2 * error * M[k,\n movieID] - regularization * U[userID, k])\n M_prime[k, movieID] = M[k, movieID] + lr * (2 * error * U[\n userID, k] - regularization * M[k, movieID])\n U = U_prime\n M = M_prime\n X_hat = np.dot(U, M)\n err = X_data - X_hat\n e = err[np.where(np.isnan(err) == False)]\n ir = np.sqrt(np.mean(e ** 2))\n print('Error for iteration #', nr, ':', ir)\n X_hat = np.dot(U, M)\n return X_hat\n\n\ndef mf():\n ratings = np.genfromtxt(\n 'D:/Leiden/Semester 1_Sept/Assignment1/AiDM/ml-1m/ratings.dat',\n usecols=(0, 1, 2), delimiter='::', dtype='int')\n num_users = np.max(ratings[:, 0])\n num_movies = np.max(ratings[:, 1])\n print(num_users, num_movies)\n print(len(ratings))\n for f in np.arange(folds):\n print('Fold #', f)\n np.random.shuffle(ratings)\n train_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds != f])\n test_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds == f])\n X_hat = mf_gd(train_set, num_users, num_movies)\n X_train = split_matrix(train_set, num_users, num_movies)\n X_test = split_matrix(test_set, num_users, num_movies)\n err_train = X_train - X_hat\n err_test = X_test - X_hat\n e_mf = err_train[np.where(np.isnan(err_train) == False)]\n error_train_mf = np.sqrt(np.mean(e_mf ** 2))\n e2_mf = err_test[np.where(np.isnan(err_test) == False)]\n error_test_mf = np.sqrt(np.mean(e2_mf ** 2))\n print('Matrix Factorization Error -> training set: ', error_train_mf)\n print('Matrix Factorization Error -> test set: ', error_test_mf)\n\n\n<mask token>\n",
"step-3": "<mask token>\nnp.random.seed(17)\n\n\ndef split_matrix(ratings, num_users, num_movies):\n X = np.zeros((num_users, num_movies))\n for r in np.arange(len(ratings)):\n X[ratings[r, 0] - 1, ratings[r, 1] - 1] = ratings[r, 2]\n return X\n\n\ndef mf_gd(ratings, num_users, num_movies):\n X_data = split_matrix(ratings, num_users, num_movies)\n X_hat = np.zeros(num_users, num_movies)\n err = np.zeros(num_users, num_movies)\n U = np.random.rand(num_users, num_factors)\n M = np.random.rand(num_factors, num_movies)\n U_prime = U\n M_prime = M\n for nr in np.arange(num_iter):\n for i in np.arange(len(ratings)):\n userID = ratings[i, 0] - 1\n movieID = ratings[i, 1] - 1\n actual = ratings[i, 2]\n prediction = np.sum(U[userID, :] * M[:, movieID])\n error = actual - prediction\n for k in np.arange(num_factors):\n U_prime[userID, k] = U[userID, k] + lr * (2 * error * M[k,\n movieID] - regularization * U[userID, k])\n M_prime[k, movieID] = M[k, movieID] + lr * (2 * error * U[\n userID, k] - regularization * M[k, movieID])\n U = U_prime\n M = M_prime\n X_hat = np.dot(U, M)\n err = X_data - X_hat\n e = err[np.where(np.isnan(err) == False)]\n ir = np.sqrt(np.mean(e ** 2))\n print('Error for iteration #', nr, ':', ir)\n X_hat = np.dot(U, M)\n return X_hat\n\n\ndef mf():\n ratings = np.genfromtxt(\n 'D:/Leiden/Semester 1_Sept/Assignment1/AiDM/ml-1m/ratings.dat',\n usecols=(0, 1, 2), delimiter='::', dtype='int')\n num_users = np.max(ratings[:, 0])\n num_movies = np.max(ratings[:, 1])\n print(num_users, num_movies)\n print(len(ratings))\n for f in np.arange(folds):\n print('Fold #', f)\n np.random.shuffle(ratings)\n train_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds != f])\n test_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds == f])\n X_hat = mf_gd(train_set, num_users, num_movies)\n X_train = split_matrix(train_set, num_users, num_movies)\n X_test = split_matrix(test_set, num_users, num_movies)\n err_train = X_train - X_hat\n err_test = X_test - X_hat\n e_mf = err_train[np.where(np.isnan(err_train) == False)]\n error_train_mf = np.sqrt(np.mean(e_mf ** 2))\n e2_mf = err_test[np.where(np.isnan(err_test) == False)]\n error_test_mf = np.sqrt(np.mean(e2_mf ** 2))\n print('Matrix Factorization Error -> training set: ', error_train_mf)\n print('Matrix Factorization Error -> test set: ', error_test_mf)\n\n\nmf()\n",
"step-4": "<mask token>\nnum_factors = 10\nnum_iter = 75\nregularization = 0.05\nlr = 0.005\nfolds = 5\nnp.random.seed(17)\n\n\ndef split_matrix(ratings, num_users, num_movies):\n X = np.zeros((num_users, num_movies))\n for r in np.arange(len(ratings)):\n X[ratings[r, 0] - 1, ratings[r, 1] - 1] = ratings[r, 2]\n return X\n\n\ndef mf_gd(ratings, num_users, num_movies):\n X_data = split_matrix(ratings, num_users, num_movies)\n X_hat = np.zeros(num_users, num_movies)\n err = np.zeros(num_users, num_movies)\n U = np.random.rand(num_users, num_factors)\n M = np.random.rand(num_factors, num_movies)\n U_prime = U\n M_prime = M\n for nr in np.arange(num_iter):\n for i in np.arange(len(ratings)):\n userID = ratings[i, 0] - 1\n movieID = ratings[i, 1] - 1\n actual = ratings[i, 2]\n prediction = np.sum(U[userID, :] * M[:, movieID])\n error = actual - prediction\n for k in np.arange(num_factors):\n U_prime[userID, k] = U[userID, k] + lr * (2 * error * M[k,\n movieID] - regularization * U[userID, k])\n M_prime[k, movieID] = M[k, movieID] + lr * (2 * error * U[\n userID, k] - regularization * M[k, movieID])\n U = U_prime\n M = M_prime\n X_hat = np.dot(U, M)\n err = X_data - X_hat\n e = err[np.where(np.isnan(err) == False)]\n ir = np.sqrt(np.mean(e ** 2))\n print('Error for iteration #', nr, ':', ir)\n X_hat = np.dot(U, M)\n return X_hat\n\n\ndef mf():\n ratings = np.genfromtxt(\n 'D:/Leiden/Semester 1_Sept/Assignment1/AiDM/ml-1m/ratings.dat',\n usecols=(0, 1, 2), delimiter='::', dtype='int')\n num_users = np.max(ratings[:, 0])\n num_movies = np.max(ratings[:, 1])\n print(num_users, num_movies)\n print(len(ratings))\n for f in np.arange(folds):\n print('Fold #', f)\n np.random.shuffle(ratings)\n train_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds != f])\n test_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds == f])\n X_hat = mf_gd(train_set, num_users, num_movies)\n X_train = split_matrix(train_set, num_users, num_movies)\n X_test = split_matrix(test_set, num_users, num_movies)\n err_train = X_train - X_hat\n err_test = X_test - X_hat\n e_mf = err_train[np.where(np.isnan(err_train) == False)]\n error_train_mf = np.sqrt(np.mean(e_mf ** 2))\n e2_mf = err_test[np.where(np.isnan(err_test) == False)]\n error_test_mf = np.sqrt(np.mean(e2_mf ** 2))\n print('Matrix Factorization Error -> training set: ', error_train_mf)\n print('Matrix Factorization Error -> test set: ', error_test_mf)\n\n\nmf()\n",
"step-5": "#from getData import getRatings\r\nimport numpy as np \r\n\r\n\r\nnum_factors = 10\r\nnum_iter = 75\r\nregularization = 0.05\r\nlr = 0.005\r\nfolds=5\r\n\r\n#to make sure you are able to repeat results, set the random seed to something:\r\nnp.random.seed(17)\r\n\r\n\r\ndef split_matrix(ratings, num_users, num_movies):\r\n #Convert data into (IxJ) matrix\r\n X= np.zeros((num_users, num_movies))\r\n for r in np.arange(len(ratings)):\r\n X[ratings[r,0]-1,ratings[r,1]-1] = ratings[r,2]\r\n\r\n #print(X.shape)\r\n return X\r\n\r\n\r\ndef mf_gd(ratings, num_users, num_movies):\r\n X_data= split_matrix(ratings, num_users, num_movies)\r\n\r\n X_hat = np.zeros(num_users, num_movies) #predicted rating matrix\r\n err = np.zeros(num_users, num_movies) #error values\r\n\r\n # Randomly initialize weights in U and M \r\n U = np.random.rand(num_users, num_factors)\r\n M = np.random.rand(num_factors, num_movies)\r\n U_prime = U\r\n M_prime = M\r\n\r\n for nr in np.arange(num_iter):\r\n for i in np.arange(len(ratings)):\r\n userID = ratings[i,0]-1\r\n movieID = ratings[i,1]-1\r\n actual = ratings[i,2]\r\n prediction = np.sum(U[userID,:]*M[:,movieID]) #SVD\r\n error = actual - prediction #compute e(i,j)\r\n\r\n \r\n #update U and M using following equations:\r\n #Uprime(i,k) = u(i,k) + lr(2e*m(k,j)-lamda.u(i,k))\r\n #Mprime(k,j) = m(k,j) + lr(2e*u(i,k)-lamda.m(k,j))\r\n for k in np.arange(num_factors):\r\n U_prime[userID,k] = U[userID,k]+ lr * (2*error*M[k,movieID] - regularization * U[userID,k])\r\n M_prime[k,movieID] = M[k,movieID] + lr * (2*error*U[userID,k] - regularization * M[k,movieID])\r\n\r\n U = U_prime\r\n M = M_prime\r\n\r\n #Intermediate RMSE\r\n X_hat = np.dot(U,M)\r\n err = X_data-X_hat\r\n e = err[np.where(np.isnan(err)==False)]\r\n ir = np.sqrt(np.mean(e**2))\r\n\r\n print (\"Error for iteration #\", nr, \":\", ir)\r\n\r\n \r\n #Return the result \r\n X_hat = np.dot(U,M)\r\n return X_hat\r\n\r\n\r\ndef mf():\r\n #Read dataset \r\n #ratings = getRatings()\r\n ratings = np.genfromtxt(\"D:/Leiden/Semester 1_Sept/Assignment1/AiDM/ml-1m/ratings.dat\", usecols=(0,1,2), delimiter='::',dtype='int')\r\n\r\n #number of users and movies in data. \r\n num_users= np.max(ratings[:,0])\r\n num_movies= np.max(ratings[:,1])\r\n\r\n print(num_users, num_movies)\r\n print(len(ratings))\r\n \r\n #5-fold cross validation\r\n for f in np.arange(folds):\r\n print (\"Fold #\", f)\r\n\r\n #shuffle data for train and test\r\n np.random.shuffle(ratings)\r\n train_set = np.array([ratings[x] for x in np.arange(len(ratings)) if (x%folds) !=f])\r\n test_set = np.array([ratings[x] for x in np.arange(len(ratings)) if (x%folds) == f])\r\n\r\n \r\n #Matrix fact\r\n X_hat = mf_gd(train_set, num_users, num_movies)\r\n X_train = split_matrix(train_set, num_users, num_movies)\r\n X_test = split_matrix(test_set, num_users, num_movies)\r\n\r\n err_train = X_train- X_hat\r\n err_test = X_test - X_hat\r\n\r\n #RMSE\r\n e_mf = err_train[np.where(np.isnan(err_train)==False)]\r\n error_train_mf = np.sqrt(np.mean(e_mf**2))\r\n\r\n e2_mf = err_test[np.where(np.isnan(err_test)==False)]\r\n error_test_mf = np.sqrt(np.mean(e2_mf**2))\r\n \r\n\r\n print ('Matrix Factorization Error -> training set: ', error_train_mf)\r\n print ('Matrix Factorization Error -> test set: ', error_test_mf)\r\n\r\nmf()\r\n\r\n#Still getting a high error rate, not comparable to the website mentioned in the assignment doc. \r\n# I need to check the logic again. \r\n#https://medium.com/coinmonks/recommendation-engine-python-401c080c583e; followed this blogpost ",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
import os
from flask import Flask,request
from flask_restful import Resource,Api,reqparse
from flask_jwt import JWT,jwt_required
from resources.Users import UserRegister
from security import authenticate,identity
from resources.items import Item, ItemList
from resources.stores import Store, StoreList
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL','sqlite:///data.db')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'naveen'
api = Api(app)
jwt = JWT(app,authenticate,identity)
api.add_resource(StoreList,"/stores")
api.add_resource(Store,"/store/<string:name>")
api.add_resource(ItemList,"/items")
api.add_resource(Item,"/item/<string:name>")
api.add_resource(UserRegister,"/register")
if __name__ =="__main__":
from db import db
db.init_app(app)
app.run(port=5000,debug=True)
|
normal
|
{
"blob_id": "bf8f7b51b685f0e9131cb4d8a0bfc16ee5ad1263",
"index": 3281,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napi.add_resource(StoreList, '/stores')\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(port=5000, debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',\n 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'naveen'\napi = Api(app)\njwt = JWT(app, authenticate, identity)\napi.add_resource(StoreList, '/stores')\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(port=5000, debug=True)\n",
"step-4": "import os\nfrom flask import Flask, request\nfrom flask_restful import Resource, Api, reqparse\nfrom flask_jwt import JWT, jwt_required\nfrom resources.Users import UserRegister\nfrom security import authenticate, identity\nfrom resources.items import Item, ItemList\nfrom resources.stores import Store, StoreList\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL',\n 'sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'naveen'\napi = Api(app)\njwt = JWT(app, authenticate, identity)\napi.add_resource(StoreList, '/stores')\napi.add_resource(Store, '/store/<string:name>')\napi.add_resource(ItemList, '/items')\napi.add_resource(Item, '/item/<string:name>')\napi.add_resource(UserRegister, '/register')\nif __name__ == '__main__':\n from db import db\n db.init_app(app)\n app.run(port=5000, debug=True)\n",
"step-5": "import os\nfrom flask import Flask,request\nfrom flask_restful import Resource,Api,reqparse\nfrom flask_jwt import JWT,jwt_required\nfrom resources.Users import UserRegister\nfrom security import authenticate,identity\nfrom resources.items import Item, ItemList\nfrom resources.stores import Store, StoreList\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL','sqlite:///data.db')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.secret_key = 'naveen'\napi = Api(app)\n\n\n\njwt = JWT(app,authenticate,identity)\n\n\n\napi.add_resource(StoreList,\"/stores\")\napi.add_resource(Store,\"/store/<string:name>\")\napi.add_resource(ItemList,\"/items\")\napi.add_resource(Item,\"/item/<string:name>\")\napi.add_resource(UserRegister,\"/register\")\n\nif __name__ ==\"__main__\":\n from db import db\n db.init_app(app)\n app.run(port=5000,debug=True) ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@attr.s
class MusicDB(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@feat.default
def _feat_default(self):
our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False
)
miao = our_feat[[('track', 'genre_top')]]
miao = miao.loc[self.df.index]
miao.columns = ['genre']
le = LabelEncoder()
label_encoders = dict()
column2encode = ['genre']
for col in column2encode:
le = LabelEncoder()
miao['enc_genre'] = le.fit_transform(miao[col])
label_encoders[col] = le
return miao
<|reserved_special_token_0|>
@sax.default
def _saxdf_default(self):
segments = 130
scaler = TimeSeriesScalerMeanVariance()
musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).
reshape(self.df.values.shape[0], self.df.values.shape[1]))
musi_scaled.index = self.df.index
sax = SymbolicAggregateApproximation(n_segments=segments,
alphabet_size_avg=20)
ts_sax = sax.fit_transform(musi_scaled)
miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],
segments))
miaoooooo.index = self.df.index
return miaoooooo
<|reserved_special_token_0|>
def _dataframe_populate(self):
y, sr = librosa.load('data/music/000/000002.mp3', sr=None)
miao = librosa.resample(y, sr, 90)
number_of_feat = len(miao)
print(f'Building a dataframe with {number_of_feat} features.')
dfm = pd.DataFrame(columns=list(range(number_of_feat)))
num_errors = 0
p = Path('data/music').glob('**/*.mp3')
tracks = [x for x in p if x.is_file()]
print(f'Making a Dataframe of len {len(tracks)}.')
progress = Progress('[progress.description]{task.description}',
BarColumn(), '{task.completed} of {task.total}',
'[progress.percentage]{task.percentage:>3.0f}%',
TimeRemainingColumn())
with progress:
task_id = progress.add_task('[cyan]Extracting...', total=len(
tracks))
with multiprocessing.Pool() as pool:
for row in pool.imap_unordered(self._do_one_song, tracks):
if type(row) is not bool:
dfm = dfm.append(row)
else:
num_errors += 1
progress.advance(task_id)
dfm = dfm.sort_index()
dfm = dfm.loc[:, :number_of_feat - 1]
print(
f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'
)
print(f'There also were {num_errors} errors.')
dfm = dfm.fillna(value=0)
dfm.to_pickle('data/picks/small.pkl')
return dfm
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@attr.s
class MusicDB(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@feat.default
def _feat_default(self):
our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False
)
miao = our_feat[[('track', 'genre_top')]]
miao = miao.loc[self.df.index]
miao.columns = ['genre']
le = LabelEncoder()
label_encoders = dict()
column2encode = ['genre']
for col in column2encode:
le = LabelEncoder()
miao['enc_genre'] = le.fit_transform(miao[col])
label_encoders[col] = le
return miao
<|reserved_special_token_0|>
@sax.default
def _saxdf_default(self):
segments = 130
scaler = TimeSeriesScalerMeanVariance()
musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).
reshape(self.df.values.shape[0], self.df.values.shape[1]))
musi_scaled.index = self.df.index
sax = SymbolicAggregateApproximation(n_segments=segments,
alphabet_size_avg=20)
ts_sax = sax.fit_transform(musi_scaled)
miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],
segments))
miaoooooo.index = self.df.index
return miaoooooo
def _dataframe_pickleload(self):
path_to_pickle = Path('data/picks/small.pkl')
try:
pipi = pd.read_pickle(path_to_pickle)
except FileNotFoundError:
return False
return pipi
def _dataframe_populate(self):
y, sr = librosa.load('data/music/000/000002.mp3', sr=None)
miao = librosa.resample(y, sr, 90)
number_of_feat = len(miao)
print(f'Building a dataframe with {number_of_feat} features.')
dfm = pd.DataFrame(columns=list(range(number_of_feat)))
num_errors = 0
p = Path('data/music').glob('**/*.mp3')
tracks = [x for x in p if x.is_file()]
print(f'Making a Dataframe of len {len(tracks)}.')
progress = Progress('[progress.description]{task.description}',
BarColumn(), '{task.completed} of {task.total}',
'[progress.percentage]{task.percentage:>3.0f}%',
TimeRemainingColumn())
with progress:
task_id = progress.add_task('[cyan]Extracting...', total=len(
tracks))
with multiprocessing.Pool() as pool:
for row in pool.imap_unordered(self._do_one_song, tracks):
if type(row) is not bool:
dfm = dfm.append(row)
else:
num_errors += 1
progress.advance(task_id)
dfm = dfm.sort_index()
dfm = dfm.loc[:, :number_of_feat - 1]
print(
f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'
)
print(f'There also were {num_errors} errors.')
dfm = dfm.fillna(value=0)
dfm.to_pickle('data/picks/small.pkl')
return dfm
def _do_one_song(self, song):
try:
y, sr = librosa.load(str(song), sr=None)
miao = librosa.resample(y, sr, 120)
miao = pd.Series(data=miao)
miao.name = int(song.stem)
return miao
except:
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@attr.s
class MusicDB(object):
df = attr.ib()
feat = attr.ib()
sax = attr.ib()
@feat.default
def _feat_default(self):
our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False
)
miao = our_feat[[('track', 'genre_top')]]
miao = miao.loc[self.df.index]
miao.columns = ['genre']
le = LabelEncoder()
label_encoders = dict()
column2encode = ['genre']
for col in column2encode:
le = LabelEncoder()
miao['enc_genre'] = le.fit_transform(miao[col])
label_encoders[col] = le
return miao
@df.default
def _dataframe_default(self):
pick = self._dataframe_pickleload()
if type(pick) is not bool:
return pick
return self._dataframe_populate()
@sax.default
def _saxdf_default(self):
segments = 130
scaler = TimeSeriesScalerMeanVariance()
musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).
reshape(self.df.values.shape[0], self.df.values.shape[1]))
musi_scaled.index = self.df.index
sax = SymbolicAggregateApproximation(n_segments=segments,
alphabet_size_avg=20)
ts_sax = sax.fit_transform(musi_scaled)
miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],
segments))
miaoooooo.index = self.df.index
return miaoooooo
def _dataframe_pickleload(self):
path_to_pickle = Path('data/picks/small.pkl')
try:
pipi = pd.read_pickle(path_to_pickle)
except FileNotFoundError:
return False
return pipi
def _dataframe_populate(self):
y, sr = librosa.load('data/music/000/000002.mp3', sr=None)
miao = librosa.resample(y, sr, 90)
number_of_feat = len(miao)
print(f'Building a dataframe with {number_of_feat} features.')
dfm = pd.DataFrame(columns=list(range(number_of_feat)))
num_errors = 0
p = Path('data/music').glob('**/*.mp3')
tracks = [x for x in p if x.is_file()]
print(f'Making a Dataframe of len {len(tracks)}.')
progress = Progress('[progress.description]{task.description}',
BarColumn(), '{task.completed} of {task.total}',
'[progress.percentage]{task.percentage:>3.0f}%',
TimeRemainingColumn())
with progress:
task_id = progress.add_task('[cyan]Extracting...', total=len(
tracks))
with multiprocessing.Pool() as pool:
for row in pool.imap_unordered(self._do_one_song, tracks):
if type(row) is not bool:
dfm = dfm.append(row)
else:
num_errors += 1
progress.advance(task_id)
dfm = dfm.sort_index()
dfm = dfm.loc[:, :number_of_feat - 1]
print(
f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'
)
print(f'There also were {num_errors} errors.')
dfm = dfm.fillna(value=0)
dfm.to_pickle('data/picks/small.pkl')
return dfm
def _do_one_song(self, song):
try:
y, sr = librosa.load(str(song), sr=None)
miao = librosa.resample(y, sr, 120)
miao = pd.Series(data=miao)
miao.name = int(song.stem)
return miao
except:
return False
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if not sys.warnoptions:
warnings.simplefilter('ignore')
@attr.s
class MusicDB(object):
df = attr.ib()
feat = attr.ib()
sax = attr.ib()
@feat.default
def _feat_default(self):
our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False
)
miao = our_feat[[('track', 'genre_top')]]
miao = miao.loc[self.df.index]
miao.columns = ['genre']
le = LabelEncoder()
label_encoders = dict()
column2encode = ['genre']
for col in column2encode:
le = LabelEncoder()
miao['enc_genre'] = le.fit_transform(miao[col])
label_encoders[col] = le
return miao
@df.default
def _dataframe_default(self):
pick = self._dataframe_pickleload()
if type(pick) is not bool:
return pick
return self._dataframe_populate()
@sax.default
def _saxdf_default(self):
segments = 130
scaler = TimeSeriesScalerMeanVariance()
musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).
reshape(self.df.values.shape[0], self.df.values.shape[1]))
musi_scaled.index = self.df.index
sax = SymbolicAggregateApproximation(n_segments=segments,
alphabet_size_avg=20)
ts_sax = sax.fit_transform(musi_scaled)
miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],
segments))
miaoooooo.index = self.df.index
return miaoooooo
def _dataframe_pickleload(self):
path_to_pickle = Path('data/picks/small.pkl')
try:
pipi = pd.read_pickle(path_to_pickle)
except FileNotFoundError:
return False
return pipi
def _dataframe_populate(self):
y, sr = librosa.load('data/music/000/000002.mp3', sr=None)
miao = librosa.resample(y, sr, 90)
number_of_feat = len(miao)
print(f'Building a dataframe with {number_of_feat} features.')
dfm = pd.DataFrame(columns=list(range(number_of_feat)))
num_errors = 0
p = Path('data/music').glob('**/*.mp3')
tracks = [x for x in p if x.is_file()]
print(f'Making a Dataframe of len {len(tracks)}.')
progress = Progress('[progress.description]{task.description}',
BarColumn(), '{task.completed} of {task.total}',
'[progress.percentage]{task.percentage:>3.0f}%',
TimeRemainingColumn())
with progress:
task_id = progress.add_task('[cyan]Extracting...', total=len(
tracks))
with multiprocessing.Pool() as pool:
for row in pool.imap_unordered(self._do_one_song, tracks):
if type(row) is not bool:
dfm = dfm.append(row)
else:
num_errors += 1
progress.advance(task_id)
dfm = dfm.sort_index()
dfm = dfm.loc[:, :number_of_feat - 1]
print(
f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'
)
print(f'There also were {num_errors} errors.')
dfm = dfm.fillna(value=0)
dfm.to_pickle('data/picks/small.pkl')
return dfm
def _do_one_song(self, song):
try:
y, sr = librosa.load(str(song), sr=None)
miao = librosa.resample(y, sr, 120)
miao = pd.Series(data=miao)
miao.name = int(song.stem)
return miao
except:
return False
if __name__ == '__main__':
music = MusicDB()
print(music.df.info())
print(music.df.head())
<|reserved_special_token_1|>
import multiprocessing
import sys
import warnings
from pathlib import Path
import attr
import librosa
import pandas as pd
from rich.progress import BarColumn, Progress, TimeRemainingColumn
from sklearn.preprocessing import LabelEncoder
from tslearn.piecewise import SymbolicAggregateApproximation
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
import utils
if not sys.warnoptions:
warnings.simplefilter("ignore")
@attr.s
class MusicDB(object):
df = attr.ib()
feat = attr.ib()
sax = attr.ib()
# start of private methods
@feat.default
def _feat_default(self):
our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False)
miao = our_feat[[("track", "genre_top")]]
miao = miao.loc[self.df.index]
miao.columns = ["genre"]
le = LabelEncoder()
label_encoders = dict()
column2encode = [("genre")]
for col in column2encode:
le = LabelEncoder()
miao["enc_genre"] = le.fit_transform(miao[col])
label_encoders[col] = le
return miao
@df.default
def _dataframe_default(self):
pick = self._dataframe_pickleload()
if type(pick) is not bool:
return pick
# if not, populate
return self._dataframe_populate()
@sax.default
def _saxdf_default(self):
segments = 130
scaler = TimeSeriesScalerMeanVariance()
musi_scaled = pd.DataFrame(
scaler.fit_transform(self.df.values).reshape(
self.df.values.shape[0], self.df.values.shape[1]
)
)
musi_scaled.index = self.df.index
sax = SymbolicAggregateApproximation(n_segments=segments, alphabet_size_avg=20)
ts_sax = sax.fit_transform(musi_scaled)
miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0], segments))
miaoooooo.index = self.df.index
return miaoooooo
def _dataframe_pickleload(self):
path_to_pickle = Path("data/picks/small.pkl")
try:
pipi = pd.read_pickle(path_to_pickle)
except FileNotFoundError:
return False
return pipi
def _dataframe_populate(self):
# estabilish number of features using the main song
y, sr = librosa.load("data/music/000/000002.mp3", sr=None)
miao = librosa.resample(y, sr, 90)
number_of_feat = len(miao)
# make df
print(f"Building a dataframe with {number_of_feat} features.")
dfm = pd.DataFrame(columns=list(range(number_of_feat)))
num_errors = 0
# populate collection of paths of mp3s
p = Path("data/music").glob("**/*.mp3")
tracks = [x for x in p if x.is_file()]
print(f"Making a Dataframe of len {len(tracks)}.")
# make progress reporting
progress = Progress(
"[progress.description]{task.description}",
BarColumn(),
"{task.completed} of {task.total}",
"[progress.percentage]{task.percentage:>3.0f}%",
TimeRemainingColumn(),
)
# populate df
with progress:
task_id = progress.add_task("[cyan]Extracting...", total=len(tracks))
with multiprocessing.Pool() as pool:
for row in pool.imap_unordered(self._do_one_song, tracks):
if type(row) is not bool:
dfm = dfm.append(row)
else:
num_errors += 1
progress.advance(task_id)
dfm = dfm.sort_index()
# ensure the shape is the one of the main song
dfm = dfm.loc[:, : number_of_feat - 1]
print(f"There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.")
print(f"There also were {num_errors} errors.")
dfm = dfm.fillna(value=0)
dfm.to_pickle("data/picks/small.pkl")
return dfm
def _do_one_song(self, song):
# extract waveform and convert
try:
y, sr = librosa.load(str(song), sr=None)
miao = librosa.resample(y, sr, 120)
# fix the index
miao = pd.Series(data=miao)
miao.name = int(song.stem)
return miao
except:
return False
if __name__ == "__main__":
music = MusicDB()
# some printing just to understand how this works
print(music.df.info())
print(music.df.head())
|
flexible
|
{
"blob_id": "0e57e25c11ba97aef5467f61d99065609e127f5b",
"index": 2782,
"step-1": "<mask token>\n\n\n@attr.s\nclass MusicDB(object):\n <mask token>\n <mask token>\n <mask token>\n\n @feat.default\n def _feat_default(self):\n our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False\n )\n miao = our_feat[[('track', 'genre_top')]]\n miao = miao.loc[self.df.index]\n miao.columns = ['genre']\n le = LabelEncoder()\n label_encoders = dict()\n column2encode = ['genre']\n for col in column2encode:\n le = LabelEncoder()\n miao['enc_genre'] = le.fit_transform(miao[col])\n label_encoders[col] = le\n return miao\n <mask token>\n\n @sax.default\n def _saxdf_default(self):\n segments = 130\n scaler = TimeSeriesScalerMeanVariance()\n musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).\n reshape(self.df.values.shape[0], self.df.values.shape[1]))\n musi_scaled.index = self.df.index\n sax = SymbolicAggregateApproximation(n_segments=segments,\n alphabet_size_avg=20)\n ts_sax = sax.fit_transform(musi_scaled)\n miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],\n segments))\n miaoooooo.index = self.df.index\n return miaoooooo\n <mask token>\n\n def _dataframe_populate(self):\n y, sr = librosa.load('data/music/000/000002.mp3', sr=None)\n miao = librosa.resample(y, sr, 90)\n number_of_feat = len(miao)\n print(f'Building a dataframe with {number_of_feat} features.')\n dfm = pd.DataFrame(columns=list(range(number_of_feat)))\n num_errors = 0\n p = Path('data/music').glob('**/*.mp3')\n tracks = [x for x in p if x.is_file()]\n print(f'Making a Dataframe of len {len(tracks)}.')\n progress = Progress('[progress.description]{task.description}',\n BarColumn(), '{task.completed} of {task.total}',\n '[progress.percentage]{task.percentage:>3.0f}%',\n TimeRemainingColumn())\n with progress:\n task_id = progress.add_task('[cyan]Extracting...', total=len(\n tracks))\n with multiprocessing.Pool() as pool:\n for row in pool.imap_unordered(self._do_one_song, tracks):\n if type(row) is not bool:\n dfm = dfm.append(row)\n else:\n num_errors += 1\n progress.advance(task_id)\n dfm = dfm.sort_index()\n dfm = dfm.loc[:, :number_of_feat - 1]\n print(\n f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'\n )\n print(f'There also were {num_errors} errors.')\n dfm = dfm.fillna(value=0)\n dfm.to_pickle('data/picks/small.pkl')\n return dfm\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@attr.s\nclass MusicDB(object):\n <mask token>\n <mask token>\n <mask token>\n\n @feat.default\n def _feat_default(self):\n our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False\n )\n miao = our_feat[[('track', 'genre_top')]]\n miao = miao.loc[self.df.index]\n miao.columns = ['genre']\n le = LabelEncoder()\n label_encoders = dict()\n column2encode = ['genre']\n for col in column2encode:\n le = LabelEncoder()\n miao['enc_genre'] = le.fit_transform(miao[col])\n label_encoders[col] = le\n return miao\n <mask token>\n\n @sax.default\n def _saxdf_default(self):\n segments = 130\n scaler = TimeSeriesScalerMeanVariance()\n musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).\n reshape(self.df.values.shape[0], self.df.values.shape[1]))\n musi_scaled.index = self.df.index\n sax = SymbolicAggregateApproximation(n_segments=segments,\n alphabet_size_avg=20)\n ts_sax = sax.fit_transform(musi_scaled)\n miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],\n segments))\n miaoooooo.index = self.df.index\n return miaoooooo\n\n def _dataframe_pickleload(self):\n path_to_pickle = Path('data/picks/small.pkl')\n try:\n pipi = pd.read_pickle(path_to_pickle)\n except FileNotFoundError:\n return False\n return pipi\n\n def _dataframe_populate(self):\n y, sr = librosa.load('data/music/000/000002.mp3', sr=None)\n miao = librosa.resample(y, sr, 90)\n number_of_feat = len(miao)\n print(f'Building a dataframe with {number_of_feat} features.')\n dfm = pd.DataFrame(columns=list(range(number_of_feat)))\n num_errors = 0\n p = Path('data/music').glob('**/*.mp3')\n tracks = [x for x in p if x.is_file()]\n print(f'Making a Dataframe of len {len(tracks)}.')\n progress = Progress('[progress.description]{task.description}',\n BarColumn(), '{task.completed} of {task.total}',\n '[progress.percentage]{task.percentage:>3.0f}%',\n TimeRemainingColumn())\n with progress:\n task_id = progress.add_task('[cyan]Extracting...', total=len(\n tracks))\n with multiprocessing.Pool() as pool:\n for row in pool.imap_unordered(self._do_one_song, tracks):\n if type(row) is not bool:\n dfm = dfm.append(row)\n else:\n num_errors += 1\n progress.advance(task_id)\n dfm = dfm.sort_index()\n dfm = dfm.loc[:, :number_of_feat - 1]\n print(\n f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'\n )\n print(f'There also were {num_errors} errors.')\n dfm = dfm.fillna(value=0)\n dfm.to_pickle('data/picks/small.pkl')\n return dfm\n\n def _do_one_song(self, song):\n try:\n y, sr = librosa.load(str(song), sr=None)\n miao = librosa.resample(y, sr, 120)\n miao = pd.Series(data=miao)\n miao.name = int(song.stem)\n return miao\n except:\n return False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@attr.s\nclass MusicDB(object):\n df = attr.ib()\n feat = attr.ib()\n sax = attr.ib()\n\n @feat.default\n def _feat_default(self):\n our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False\n )\n miao = our_feat[[('track', 'genre_top')]]\n miao = miao.loc[self.df.index]\n miao.columns = ['genre']\n le = LabelEncoder()\n label_encoders = dict()\n column2encode = ['genre']\n for col in column2encode:\n le = LabelEncoder()\n miao['enc_genre'] = le.fit_transform(miao[col])\n label_encoders[col] = le\n return miao\n\n @df.default\n def _dataframe_default(self):\n pick = self._dataframe_pickleload()\n if type(pick) is not bool:\n return pick\n return self._dataframe_populate()\n\n @sax.default\n def _saxdf_default(self):\n segments = 130\n scaler = TimeSeriesScalerMeanVariance()\n musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).\n reshape(self.df.values.shape[0], self.df.values.shape[1]))\n musi_scaled.index = self.df.index\n sax = SymbolicAggregateApproximation(n_segments=segments,\n alphabet_size_avg=20)\n ts_sax = sax.fit_transform(musi_scaled)\n miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],\n segments))\n miaoooooo.index = self.df.index\n return miaoooooo\n\n def _dataframe_pickleload(self):\n path_to_pickle = Path('data/picks/small.pkl')\n try:\n pipi = pd.read_pickle(path_to_pickle)\n except FileNotFoundError:\n return False\n return pipi\n\n def _dataframe_populate(self):\n y, sr = librosa.load('data/music/000/000002.mp3', sr=None)\n miao = librosa.resample(y, sr, 90)\n number_of_feat = len(miao)\n print(f'Building a dataframe with {number_of_feat} features.')\n dfm = pd.DataFrame(columns=list(range(number_of_feat)))\n num_errors = 0\n p = Path('data/music').glob('**/*.mp3')\n tracks = [x for x in p if x.is_file()]\n print(f'Making a Dataframe of len {len(tracks)}.')\n progress = Progress('[progress.description]{task.description}',\n BarColumn(), '{task.completed} of {task.total}',\n '[progress.percentage]{task.percentage:>3.0f}%',\n TimeRemainingColumn())\n with progress:\n task_id = progress.add_task('[cyan]Extracting...', total=len(\n tracks))\n with multiprocessing.Pool() as pool:\n for row in pool.imap_unordered(self._do_one_song, tracks):\n if type(row) is not bool:\n dfm = dfm.append(row)\n else:\n num_errors += 1\n progress.advance(task_id)\n dfm = dfm.sort_index()\n dfm = dfm.loc[:, :number_of_feat - 1]\n print(\n f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'\n )\n print(f'There also were {num_errors} errors.')\n dfm = dfm.fillna(value=0)\n dfm.to_pickle('data/picks/small.pkl')\n return dfm\n\n def _do_one_song(self, song):\n try:\n y, sr = librosa.load(str(song), sr=None)\n miao = librosa.resample(y, sr, 120)\n miao = pd.Series(data=miao)\n miao.name = int(song.stem)\n return miao\n except:\n return False\n\n\n<mask token>\n",
"step-4": "<mask token>\nif not sys.warnoptions:\n warnings.simplefilter('ignore')\n\n\n@attr.s\nclass MusicDB(object):\n df = attr.ib()\n feat = attr.ib()\n sax = attr.ib()\n\n @feat.default\n def _feat_default(self):\n our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False\n )\n miao = our_feat[[('track', 'genre_top')]]\n miao = miao.loc[self.df.index]\n miao.columns = ['genre']\n le = LabelEncoder()\n label_encoders = dict()\n column2encode = ['genre']\n for col in column2encode:\n le = LabelEncoder()\n miao['enc_genre'] = le.fit_transform(miao[col])\n label_encoders[col] = le\n return miao\n\n @df.default\n def _dataframe_default(self):\n pick = self._dataframe_pickleload()\n if type(pick) is not bool:\n return pick\n return self._dataframe_populate()\n\n @sax.default\n def _saxdf_default(self):\n segments = 130\n scaler = TimeSeriesScalerMeanVariance()\n musi_scaled = pd.DataFrame(scaler.fit_transform(self.df.values).\n reshape(self.df.values.shape[0], self.df.values.shape[1]))\n musi_scaled.index = self.df.index\n sax = SymbolicAggregateApproximation(n_segments=segments,\n alphabet_size_avg=20)\n ts_sax = sax.fit_transform(musi_scaled)\n miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0],\n segments))\n miaoooooo.index = self.df.index\n return miaoooooo\n\n def _dataframe_pickleload(self):\n path_to_pickle = Path('data/picks/small.pkl')\n try:\n pipi = pd.read_pickle(path_to_pickle)\n except FileNotFoundError:\n return False\n return pipi\n\n def _dataframe_populate(self):\n y, sr = librosa.load('data/music/000/000002.mp3', sr=None)\n miao = librosa.resample(y, sr, 90)\n number_of_feat = len(miao)\n print(f'Building a dataframe with {number_of_feat} features.')\n dfm = pd.DataFrame(columns=list(range(number_of_feat)))\n num_errors = 0\n p = Path('data/music').glob('**/*.mp3')\n tracks = [x for x in p if x.is_file()]\n print(f'Making a Dataframe of len {len(tracks)}.')\n progress = Progress('[progress.description]{task.description}',\n BarColumn(), '{task.completed} of {task.total}',\n '[progress.percentage]{task.percentage:>3.0f}%',\n TimeRemainingColumn())\n with progress:\n task_id = progress.add_task('[cyan]Extracting...', total=len(\n tracks))\n with multiprocessing.Pool() as pool:\n for row in pool.imap_unordered(self._do_one_song, tracks):\n if type(row) is not bool:\n dfm = dfm.append(row)\n else:\n num_errors += 1\n progress.advance(task_id)\n dfm = dfm.sort_index()\n dfm = dfm.loc[:, :number_of_feat - 1]\n print(\n f'There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.'\n )\n print(f'There also were {num_errors} errors.')\n dfm = dfm.fillna(value=0)\n dfm.to_pickle('data/picks/small.pkl')\n return dfm\n\n def _do_one_song(self, song):\n try:\n y, sr = librosa.load(str(song), sr=None)\n miao = librosa.resample(y, sr, 120)\n miao = pd.Series(data=miao)\n miao.name = int(song.stem)\n return miao\n except:\n return False\n\n\nif __name__ == '__main__':\n music = MusicDB()\n print(music.df.info())\n print(music.df.head())\n",
"step-5": "import multiprocessing\nimport sys\nimport warnings\nfrom pathlib import Path\n\nimport attr\nimport librosa\nimport pandas as pd\nfrom rich.progress import BarColumn, Progress, TimeRemainingColumn\nfrom sklearn.preprocessing import LabelEncoder\nfrom tslearn.piecewise import SymbolicAggregateApproximation\nfrom tslearn.preprocessing import TimeSeriesScalerMeanVariance\n\nimport utils\n\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n\n\n@attr.s\nclass MusicDB(object):\n df = attr.ib()\n feat = attr.ib()\n sax = attr.ib()\n\n # start of private methods\n @feat.default\n def _feat_default(self):\n our_feat = utils.load_tracks(givegenre=True, outliers=False, fill=False)\n miao = our_feat[[(\"track\", \"genre_top\")]]\n miao = miao.loc[self.df.index]\n miao.columns = [\"genre\"]\n\n le = LabelEncoder()\n label_encoders = dict()\n column2encode = [(\"genre\")]\n for col in column2encode:\n le = LabelEncoder()\n miao[\"enc_genre\"] = le.fit_transform(miao[col])\n label_encoders[col] = le\n return miao\n\n @df.default\n def _dataframe_default(self):\n pick = self._dataframe_pickleload()\n if type(pick) is not bool:\n return pick\n # if not, populate\n return self._dataframe_populate()\n\n @sax.default\n def _saxdf_default(self):\n segments = 130\n scaler = TimeSeriesScalerMeanVariance()\n musi_scaled = pd.DataFrame(\n scaler.fit_transform(self.df.values).reshape(\n self.df.values.shape[0], self.df.values.shape[1]\n )\n )\n musi_scaled.index = self.df.index\n sax = SymbolicAggregateApproximation(n_segments=segments, alphabet_size_avg=20)\n ts_sax = sax.fit_transform(musi_scaled)\n miaoooooo = pd.DataFrame(ts_sax.reshape(self.df.values.shape[0], segments))\n miaoooooo.index = self.df.index\n return miaoooooo\n\n def _dataframe_pickleload(self):\n path_to_pickle = Path(\"data/picks/small.pkl\")\n try:\n pipi = pd.read_pickle(path_to_pickle)\n except FileNotFoundError:\n return False\n return pipi\n\n def _dataframe_populate(self):\n # estabilish number of features using the main song\n y, sr = librosa.load(\"data/music/000/000002.mp3\", sr=None)\n miao = librosa.resample(y, sr, 90)\n number_of_feat = len(miao)\n\n # make df\n print(f\"Building a dataframe with {number_of_feat} features.\")\n dfm = pd.DataFrame(columns=list(range(number_of_feat)))\n num_errors = 0\n\n # populate collection of paths of mp3s\n p = Path(\"data/music\").glob(\"**/*.mp3\")\n tracks = [x for x in p if x.is_file()]\n print(f\"Making a Dataframe of len {len(tracks)}.\")\n\n # make progress reporting\n progress = Progress(\n \"[progress.description]{task.description}\",\n BarColumn(),\n \"{task.completed} of {task.total}\",\n \"[progress.percentage]{task.percentage:>3.0f}%\",\n TimeRemainingColumn(),\n )\n\n # populate df\n with progress:\n task_id = progress.add_task(\"[cyan]Extracting...\", total=len(tracks))\n with multiprocessing.Pool() as pool:\n for row in pool.imap_unordered(self._do_one_song, tracks):\n if type(row) is not bool:\n dfm = dfm.append(row)\n else:\n num_errors += 1\n progress.advance(task_id)\n\n dfm = dfm.sort_index()\n # ensure the shape is the one of the main song\n dfm = dfm.loc[:, : number_of_feat - 1]\n print(f\"There were {dfm.shape[0] * dfm.shape[1] - dfm.count().sum()} NaN.\")\n print(f\"There also were {num_errors} errors.\")\n dfm = dfm.fillna(value=0)\n dfm.to_pickle(\"data/picks/small.pkl\")\n return dfm\n\n def _do_one_song(self, song):\n # extract waveform and convert\n try:\n y, sr = librosa.load(str(song), sr=None)\n miao = librosa.resample(y, sr, 120)\n # fix the index\n miao = pd.Series(data=miao)\n miao.name = int(song.stem)\n return miao\n except:\n return False\n\n\nif __name__ == \"__main__\":\n music = MusicDB()\n # some printing just to understand how this works\n print(music.df.info())\n print(music.df.head())\n",
"step-ids": [
4,
6,
8,
9,
11
]
}
|
[
4,
6,
8,
9,
11
] |
import time
import pickle
class BayesNetClassifier:
def __init__(self, train_file, out_file):
self.train_file = train_file
self.out_file = out_file
self.word_count_loc = {}
self.word_probs = {}
self.l_probs = {}
self.word_counts = {}
self.common_words = {}
self.cities = []
self.total_words = 0
# Saves probabilites to a pickle file
def pickle_probs(self):
all_probs = {'type': 'bayes', 'location': self.l_probs, 'words': self.word_probs, 'total': self.total_words, 'cities': self.cities}
#source: https://stackoverflow.com/questions/11218477/how-can-i-use-pickle-to-save-a-dict
with open(self.out_file, 'wb') as handle:
pickle.dump(all_probs, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Loads training data
# Some ideas taken from label.py given by Dr. Crandall
def read_data(self, fname):
exemplars = []
file = open(fname, 'r');
for line in file:
data = tuple([w if i == 0 else w.lower() for i, w in enumerate(line.split())])
exemplars += [data]
return exemplars
# Source: https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary
# Fastest way to find max value in dict
def max_val(self, d, c):
top = {}
for i in range(c):
v=list(d.values())
k=list(d.keys())
max_key = k[v.index(max(v))]
top[max_key] = max(v)
del d[max_key]
return top
# Stop Copy
# Pretty print of top 5 words per location
def print_values(self, d):
for key in d.keys():
print('{:<20}'.format(key.replace('_',' ')), end = '')
for i, val in enumerate(d[key]):
print('{:<20}'.format(val), end = '')
print()
# Fits the Bayes Net model
def fit(self):
t0 = time.time()
tweets = self.read_data(self.train_file)
total_tweets = len(tweets)
words_per_city = {}
for twe in tweets:
city = twe[0]
if city in self.l_probs.keys():
self.l_probs[city] += 1/total_tweets
else:
self.l_probs[city] = 1/total_tweets
words_per_city[city] = 0
for word in twe[1:]:
if city in self.word_count_loc.keys():
if word in self.word_count_loc[city].keys():
self.word_count_loc[city][word] += 1
words_per_city[city] +=1
else:
self.word_count_loc[city][word] = 1
words_per_city[city] +=1
else:
self.word_count_loc[city] = {}
self.word_count_loc[city][word] = 1
words_per_city[city] +=1
if word in self.word_counts.keys():
self.word_counts[word] += 1
else:
self.word_counts[word] = 1
#source: https://stackoverflow.com/questions/17095163/remove-a-dictionary-key-that-has-a-certain-value
self.common_words = {k:v for k,v in self.word_counts.items() if v >= 5}
self.cities = list(self.word_count_loc.keys())
self.total_words = len(self.word_counts.keys())
#print(self.max_val(self.word_counts,30))
for city in self.cities:
self.word_probs[city] = {w: c/words_per_city[city] for w,c in self.word_count_loc[city].items()}
# Find top 5 words by location
def top_words(self):
most_pop = {}
top_five = {}
for city in self.cities:
self.word_count_loc[city] = {k: v for k, v in sorted(self.word_count_loc[city].items(), key=lambda x: x[1], reverse = True)}
most_pop[city] = {k:v/self.common_words[k] for k,v in self.word_count_loc[city].items() if k in self.common_words.keys()}
top_five[city] = self.max_val(most_pop[city], 5)
print()
print('Top 5 Words Per Location')
print('-------------------------------------------------------------------------------------------------------------------')
self.print_values(top_five)
|
normal
|
{
"blob_id": "dee7b12862d02837fbb0f2310b136dd768ca7bab",
"index": 3277,
"step-1": "<mask token>\n\n\nclass BayesNetClassifier:\n\n def __init__(self, train_file, out_file):\n self.train_file = train_file\n self.out_file = out_file\n self.word_count_loc = {}\n self.word_probs = {}\n self.l_probs = {}\n self.word_counts = {}\n self.common_words = {}\n self.cities = []\n self.total_words = 0\n <mask token>\n\n def read_data(self, fname):\n exemplars = []\n file = open(fname, 'r')\n for line in file:\n data = tuple([(w if i == 0 else w.lower()) for i, w in\n enumerate(line.split())])\n exemplars += [data]\n return exemplars\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass BayesNetClassifier:\n\n def __init__(self, train_file, out_file):\n self.train_file = train_file\n self.out_file = out_file\n self.word_count_loc = {}\n self.word_probs = {}\n self.l_probs = {}\n self.word_counts = {}\n self.common_words = {}\n self.cities = []\n self.total_words = 0\n\n def pickle_probs(self):\n all_probs = {'type': 'bayes', 'location': self.l_probs, 'words':\n self.word_probs, 'total': self.total_words, 'cities': self.cities}\n with open(self.out_file, 'wb') as handle:\n pickle.dump(all_probs, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def read_data(self, fname):\n exemplars = []\n file = open(fname, 'r')\n for line in file:\n data = tuple([(w if i == 0 else w.lower()) for i, w in\n enumerate(line.split())])\n exemplars += [data]\n return exemplars\n\n def max_val(self, d, c):\n top = {}\n for i in range(c):\n v = list(d.values())\n k = list(d.keys())\n max_key = k[v.index(max(v))]\n top[max_key] = max(v)\n del d[max_key]\n return top\n <mask token>\n\n def fit(self):\n t0 = time.time()\n tweets = self.read_data(self.train_file)\n total_tweets = len(tweets)\n words_per_city = {}\n for twe in tweets:\n city = twe[0]\n if city in self.l_probs.keys():\n self.l_probs[city] += 1 / total_tweets\n else:\n self.l_probs[city] = 1 / total_tweets\n words_per_city[city] = 0\n for word in twe[1:]:\n if city in self.word_count_loc.keys():\n if word in self.word_count_loc[city].keys():\n self.word_count_loc[city][word] += 1\n words_per_city[city] += 1\n else:\n self.word_count_loc[city][word] = 1\n words_per_city[city] += 1\n else:\n self.word_count_loc[city] = {}\n self.word_count_loc[city][word] = 1\n words_per_city[city] += 1\n if word in self.word_counts.keys():\n self.word_counts[word] += 1\n else:\n self.word_counts[word] = 1\n self.common_words = {k: v for k, v in self.word_counts.items() if v >=\n 5}\n self.cities = list(self.word_count_loc.keys())\n self.total_words = len(self.word_counts.keys())\n for city in self.cities:\n self.word_probs[city] = {w: (c / words_per_city[city]) for w, c in\n self.word_count_loc[city].items()}\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass BayesNetClassifier:\n\n def __init__(self, train_file, out_file):\n self.train_file = train_file\n self.out_file = out_file\n self.word_count_loc = {}\n self.word_probs = {}\n self.l_probs = {}\n self.word_counts = {}\n self.common_words = {}\n self.cities = []\n self.total_words = 0\n\n def pickle_probs(self):\n all_probs = {'type': 'bayes', 'location': self.l_probs, 'words':\n self.word_probs, 'total': self.total_words, 'cities': self.cities}\n with open(self.out_file, 'wb') as handle:\n pickle.dump(all_probs, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def read_data(self, fname):\n exemplars = []\n file = open(fname, 'r')\n for line in file:\n data = tuple([(w if i == 0 else w.lower()) for i, w in\n enumerate(line.split())])\n exemplars += [data]\n return exemplars\n\n def max_val(self, d, c):\n top = {}\n for i in range(c):\n v = list(d.values())\n k = list(d.keys())\n max_key = k[v.index(max(v))]\n top[max_key] = max(v)\n del d[max_key]\n return top\n <mask token>\n\n def fit(self):\n t0 = time.time()\n tweets = self.read_data(self.train_file)\n total_tweets = len(tweets)\n words_per_city = {}\n for twe in tweets:\n city = twe[0]\n if city in self.l_probs.keys():\n self.l_probs[city] += 1 / total_tweets\n else:\n self.l_probs[city] = 1 / total_tweets\n words_per_city[city] = 0\n for word in twe[1:]:\n if city in self.word_count_loc.keys():\n if word in self.word_count_loc[city].keys():\n self.word_count_loc[city][word] += 1\n words_per_city[city] += 1\n else:\n self.word_count_loc[city][word] = 1\n words_per_city[city] += 1\n else:\n self.word_count_loc[city] = {}\n self.word_count_loc[city][word] = 1\n words_per_city[city] += 1\n if word in self.word_counts.keys():\n self.word_counts[word] += 1\n else:\n self.word_counts[word] = 1\n self.common_words = {k: v for k, v in self.word_counts.items() if v >=\n 5}\n self.cities = list(self.word_count_loc.keys())\n self.total_words = len(self.word_counts.keys())\n for city in self.cities:\n self.word_probs[city] = {w: (c / words_per_city[city]) for w, c in\n self.word_count_loc[city].items()}\n\n def top_words(self):\n most_pop = {}\n top_five = {}\n for city in self.cities:\n self.word_count_loc[city] = {k: v for k, v in sorted(self.\n word_count_loc[city].items(), key=lambda x: x[1], reverse=True)\n }\n most_pop[city] = {k: (v / self.common_words[k]) for k, v in\n self.word_count_loc[city].items() if k in self.common_words\n .keys()}\n top_five[city] = self.max_val(most_pop[city], 5)\n print()\n print('Top 5 Words Per Location')\n print(\n '-------------------------------------------------------------------------------------------------------------------'\n )\n self.print_values(top_five)\n",
"step-4": "<mask token>\n\n\nclass BayesNetClassifier:\n\n def __init__(self, train_file, out_file):\n self.train_file = train_file\n self.out_file = out_file\n self.word_count_loc = {}\n self.word_probs = {}\n self.l_probs = {}\n self.word_counts = {}\n self.common_words = {}\n self.cities = []\n self.total_words = 0\n\n def pickle_probs(self):\n all_probs = {'type': 'bayes', 'location': self.l_probs, 'words':\n self.word_probs, 'total': self.total_words, 'cities': self.cities}\n with open(self.out_file, 'wb') as handle:\n pickle.dump(all_probs, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n def read_data(self, fname):\n exemplars = []\n file = open(fname, 'r')\n for line in file:\n data = tuple([(w if i == 0 else w.lower()) for i, w in\n enumerate(line.split())])\n exemplars += [data]\n return exemplars\n\n def max_val(self, d, c):\n top = {}\n for i in range(c):\n v = list(d.values())\n k = list(d.keys())\n max_key = k[v.index(max(v))]\n top[max_key] = max(v)\n del d[max_key]\n return top\n\n def print_values(self, d):\n for key in d.keys():\n print('{:<20}'.format(key.replace('_', ' ')), end='')\n for i, val in enumerate(d[key]):\n print('{:<20}'.format(val), end='')\n print()\n\n def fit(self):\n t0 = time.time()\n tweets = self.read_data(self.train_file)\n total_tweets = len(tweets)\n words_per_city = {}\n for twe in tweets:\n city = twe[0]\n if city in self.l_probs.keys():\n self.l_probs[city] += 1 / total_tweets\n else:\n self.l_probs[city] = 1 / total_tweets\n words_per_city[city] = 0\n for word in twe[1:]:\n if city in self.word_count_loc.keys():\n if word in self.word_count_loc[city].keys():\n self.word_count_loc[city][word] += 1\n words_per_city[city] += 1\n else:\n self.word_count_loc[city][word] = 1\n words_per_city[city] += 1\n else:\n self.word_count_loc[city] = {}\n self.word_count_loc[city][word] = 1\n words_per_city[city] += 1\n if word in self.word_counts.keys():\n self.word_counts[word] += 1\n else:\n self.word_counts[word] = 1\n self.common_words = {k: v for k, v in self.word_counts.items() if v >=\n 5}\n self.cities = list(self.word_count_loc.keys())\n self.total_words = len(self.word_counts.keys())\n for city in self.cities:\n self.word_probs[city] = {w: (c / words_per_city[city]) for w, c in\n self.word_count_loc[city].items()}\n\n def top_words(self):\n most_pop = {}\n top_five = {}\n for city in self.cities:\n self.word_count_loc[city] = {k: v for k, v in sorted(self.\n word_count_loc[city].items(), key=lambda x: x[1], reverse=True)\n }\n most_pop[city] = {k: (v / self.common_words[k]) for k, v in\n self.word_count_loc[city].items() if k in self.common_words\n .keys()}\n top_five[city] = self.max_val(most_pop[city], 5)\n print()\n print('Top 5 Words Per Location')\n print(\n '-------------------------------------------------------------------------------------------------------------------'\n )\n self.print_values(top_five)\n",
"step-5": "import time\r\nimport pickle\r\n\r\nclass BayesNetClassifier:\r\n def __init__(self, train_file, out_file):\r\n self.train_file = train_file\r\n self.out_file = out_file\r\n self.word_count_loc = {}\r\n self.word_probs = {}\r\n self.l_probs = {}\r\n self.word_counts = {}\r\n self.common_words = {}\r\n self.cities = []\r\n self.total_words = 0\r\n \r\n # Saves probabilites to a pickle file\r\n def pickle_probs(self):\r\n all_probs = {'type': 'bayes', 'location': self.l_probs, 'words': self.word_probs, 'total': self.total_words, 'cities': self.cities}\r\n #source: https://stackoverflow.com/questions/11218477/how-can-i-use-pickle-to-save-a-dict\r\n with open(self.out_file, 'wb') as handle:\r\n pickle.dump(all_probs, handle, protocol=pickle.HIGHEST_PROTOCOL)\r\n \r\n # Loads training data\r\n # Some ideas taken from label.py given by Dr. Crandall \r\n def read_data(self, fname):\r\n exemplars = []\r\n file = open(fname, 'r');\r\n for line in file:\r\n data = tuple([w if i == 0 else w.lower() for i, w in enumerate(line.split())])\r\n exemplars += [data]\r\n return exemplars\r\n \r\n # Source: https://stackoverflow.com/questions/268272/getting-key-with-maximum-value-in-dictionary\r\n # Fastest way to find max value in dict\r\n def max_val(self, d, c): \r\n top = {}\r\n for i in range(c):\r\n v=list(d.values())\r\n k=list(d.keys())\r\n max_key = k[v.index(max(v))]\r\n top[max_key] = max(v)\r\n del d[max_key]\r\n return top\r\n # Stop Copy\r\n \r\n # Pretty print of top 5 words per location\r\n def print_values(self, d):\r\n for key in d.keys():\r\n print('{:<20}'.format(key.replace('_',' ')), end = '')\r\n for i, val in enumerate(d[key]):\r\n print('{:<20}'.format(val), end = '')\r\n print()\r\n \r\n # Fits the Bayes Net model\r\n def fit(self):\r\n t0 = time.time()\r\n tweets = self.read_data(self.train_file)\r\n total_tweets = len(tweets)\r\n words_per_city = {}\r\n for twe in tweets:\r\n city = twe[0]\r\n if city in self.l_probs.keys():\r\n self.l_probs[city] += 1/total_tweets\r\n else:\r\n self.l_probs[city] = 1/total_tweets\r\n words_per_city[city] = 0\r\n for word in twe[1:]:\r\n if city in self.word_count_loc.keys():\r\n if word in self.word_count_loc[city].keys(): \r\n self.word_count_loc[city][word] += 1\r\n words_per_city[city] +=1\r\n else:\r\n self.word_count_loc[city][word] = 1\r\n words_per_city[city] +=1\r\n else:\r\n self.word_count_loc[city] = {}\r\n self.word_count_loc[city][word] = 1\r\n words_per_city[city] +=1\r\n \r\n if word in self.word_counts.keys():\r\n self.word_counts[word] += 1\r\n else:\r\n self.word_counts[word] = 1\r\n \r\n #source: https://stackoverflow.com/questions/17095163/remove-a-dictionary-key-that-has-a-certain-value\r\n self.common_words = {k:v for k,v in self.word_counts.items() if v >= 5}\r\n self.cities = list(self.word_count_loc.keys())\r\n self.total_words = len(self.word_counts.keys())\r\n #print(self.max_val(self.word_counts,30))\r\n for city in self.cities:\r\n self.word_probs[city] = {w: c/words_per_city[city] for w,c in self.word_count_loc[city].items()}\r\n \r\n # Find top 5 words by location\r\n def top_words(self):\r\n most_pop = {}\r\n top_five = {}\r\n for city in self.cities:\r\n self.word_count_loc[city] = {k: v for k, v in sorted(self.word_count_loc[city].items(), key=lambda x: x[1], reverse = True)}\r\n most_pop[city] = {k:v/self.common_words[k] for k,v in self.word_count_loc[city].items() if k in self.common_words.keys()}\r\n top_five[city] = self.max_val(most_pop[city], 5)\r\n print()\r\n print('Top 5 Words Per Location')\r\n print('-------------------------------------------------------------------------------------------------------------------')\r\n self.print_values(top_five)\r\n",
"step-ids": [
3,
6,
7,
8,
10
]
}
|
[
3,
6,
7,
8,
10
] |
'''
Created on 5 Mar 2010
@author: oppianmatt
'''
# hook to find setup tools if not installed
try:
from ez_setup import use_setuptools
use_setuptools()
except ImportError:
pass
from setuptools import setup, find_packages
setup(
name = "django-defaultsite",
version = "1.1",
packages = find_packages('src'),
package_dir = {'': 'src'},
package_data={'': ['LICENSE']},
include_package_data=True,
zip_safe=False,
# metadata for upload to PyPI
author = "Oppian System Ltd",
author_email = "matt@oppian.com",
description = "django-defaultsiteSets the Site object in django to something better then example.com.",
license = 'LICENSE.txt',
keywords = "django site example.com",
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
url = "http://oppian.com/labs/django-defaultsite/",
long_description=open('README.txt').read(),
)
|
normal
|
{
"blob_id": "5580e5942370c925b759b09675306cdfbc7dd4f1",
"index": 3633,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n from ez_setup import use_setuptools\n use_setuptools()\nexcept ImportError:\n pass\n<mask token>\nsetup(name='django-defaultsite', version='1.1', packages=find_packages(\n 'src'), package_dir={'': 'src'}, package_data={'': ['LICENSE']},\n include_package_data=True, zip_safe=False, author='Oppian System Ltd',\n author_email='matt@oppian.com', description=\n 'django-defaultsiteSets the Site object in django to something better then example.com.'\n , license='LICENSE.txt', keywords='django site example.com',\n classifiers=['Development Status :: 3 - Alpha',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Framework :: Django'], url=\n 'http://oppian.com/labs/django-defaultsite/', long_description=open(\n 'README.txt').read())\n",
"step-3": "<mask token>\ntry:\n from ez_setup import use_setuptools\n use_setuptools()\nexcept ImportError:\n pass\nfrom setuptools import setup, find_packages\nsetup(name='django-defaultsite', version='1.1', packages=find_packages(\n 'src'), package_dir={'': 'src'}, package_data={'': ['LICENSE']},\n include_package_data=True, zip_safe=False, author='Oppian System Ltd',\n author_email='matt@oppian.com', description=\n 'django-defaultsiteSets the Site object in django to something better then example.com.'\n , license='LICENSE.txt', keywords='django site example.com',\n classifiers=['Development Status :: 3 - Alpha',\n 'Environment :: Web Environment', 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent', 'Programming Language :: Python',\n 'Framework :: Django'], url=\n 'http://oppian.com/labs/django-defaultsite/', long_description=open(\n 'README.txt').read())\n",
"step-4": "'''\nCreated on 5 Mar 2010\n\n@author: oppianmatt\n'''\n\n# hook to find setup tools if not installed\ntry:\n from ez_setup import use_setuptools\n use_setuptools()\nexcept ImportError:\n pass\n\nfrom setuptools import setup, find_packages\nsetup(\n name = \"django-defaultsite\",\n version = \"1.1\",\n packages = find_packages('src'),\n package_dir = {'': 'src'},\n package_data={'': ['LICENSE']},\n include_package_data=True,\n zip_safe=False,\n \n # metadata for upload to PyPI\n author = \"Oppian System Ltd\",\n author_email = \"matt@oppian.com\",\n description = \"django-defaultsiteSets the Site object in django to something better then example.com.\",\n license = 'LICENSE.txt',\n keywords = \"django site example.com\",\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Framework :: Django',\n ],\n url = \"http://oppian.com/labs/django-defaultsite/\",\n long_description=open('README.txt').read(),\n)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import argparse
from data.downloader import *
from data.utils import *
from data.danmaku import *
from utils import *
key = '03fc8eb101b091fb'
parser = argparse.ArgumentParser(description='Download Video From Bilibili')
parser.add_argument('-d', type=str, help='dataset')
parser.add_argument('-o', type=str, default='dataset', help='output directory')
parser.add_argument('-f', type=str, default='mp4', help='format')
parser.add_argument('-c', type=str, default='', help='country')
parser.add_argument('-q', type=int, default=0, help='quality')
parser.add_argument('-i', action='store_true', default=False, help=
'ignore download')
args = parser.parse_args()
cookie = dict()
cookie['DedeUserID'] = '347368229'
cookie['DedeUserID__ckMd5'] = '6e02ca142544e64c'
cookie['sid'] = 'ii8ca1k2'
cookie['SESSDATA'] = '1d13f39c%2C1544246349%2Cc62b611b'
aids, attr = download_list(os.path.join('list', args.d + '.txt'), os.path.
join(args.o, args.d, 'video'), **cookie, ignore=args.i, quality=args.q,
debug=True)
print('[*] Video Download Finished')
infos = dict()
for aid in aids:
extra = dict()
if 'ep' in aid:
epid = aid
aid = attr['aid']
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)
)
page = int(epid[2:]) - int(attr['base'][2:]) + 1
info = GetVideoInfo(aid.strip('av'), key, 1)
else:
fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))
info = GetVideoInfo(aid.strip('av'), key)
extra['danmaku'] = request_danmaku(cid=info.cid)
if 'country' in attr:
extra['country'] = attr['country']
extra['complete'] = False
else:
capture = get_capture(fn)
print('[*] Capture : {}'.format(fn))
extra['duration'] = get_duration(capture=capture)
extra['duration'] = get_duration(capture=capture)
extra['nframes'] = get_nframes(capture=capture)
extra['fps'] = get_fps(capture=capture)
extra['boundary'] = get_boundary(fn, capture, extra['nframes'],
'hecate')
extra['positions'] = get_positions(extra['nframes'])
extra['fpsegment'] = get_fpsegment(extra['boundary'])
extra['score'] = get_score(**extra)
extra['summary'] = get_summary(**extra)
extra['complete'] = True
for k, v in extra.items():
setattr(info, k, v)
infos[aid] = info
save_pickle(infos, '{}.info'.format(args.d))
|
normal
|
{
"blob_id": "479411727de14e8032b6d01cdb844632111af688",
"index": 5275,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('-d', type=str, help='dataset')\nparser.add_argument('-o', type=str, default='dataset', help='output directory')\nparser.add_argument('-f', type=str, default='mp4', help='format')\nparser.add_argument('-c', type=str, default='', help='country')\nparser.add_argument('-q', type=int, default=0, help='quality')\nparser.add_argument('-i', action='store_true', default=False, help=\n 'ignore download')\n<mask token>\nprint('[*] Video Download Finished')\n<mask token>\nfor aid in aids:\n extra = dict()\n if 'ep' in aid:\n epid = aid\n aid = attr['aid']\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)\n )\n page = int(epid[2:]) - int(attr['base'][2:]) + 1\n info = GetVideoInfo(aid.strip('av'), key, 1)\n else:\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))\n info = GetVideoInfo(aid.strip('av'), key)\n extra['danmaku'] = request_danmaku(cid=info.cid)\n if 'country' in attr:\n extra['country'] = attr['country']\n extra['complete'] = False\n else:\n capture = get_capture(fn)\n print('[*] Capture : {}'.format(fn))\n extra['duration'] = get_duration(capture=capture)\n extra['duration'] = get_duration(capture=capture)\n extra['nframes'] = get_nframes(capture=capture)\n extra['fps'] = get_fps(capture=capture)\n extra['boundary'] = get_boundary(fn, capture, extra['nframes'],\n 'hecate')\n extra['positions'] = get_positions(extra['nframes'])\n extra['fpsegment'] = get_fpsegment(extra['boundary'])\n extra['score'] = get_score(**extra)\n extra['summary'] = get_summary(**extra)\n extra['complete'] = True\n for k, v in extra.items():\n setattr(info, k, v)\n infos[aid] = info\nsave_pickle(infos, '{}.info'.format(args.d))\n",
"step-3": "<mask token>\nkey = '03fc8eb101b091fb'\nparser = argparse.ArgumentParser(description='Download Video From Bilibili')\nparser.add_argument('-d', type=str, help='dataset')\nparser.add_argument('-o', type=str, default='dataset', help='output directory')\nparser.add_argument('-f', type=str, default='mp4', help='format')\nparser.add_argument('-c', type=str, default='', help='country')\nparser.add_argument('-q', type=int, default=0, help='quality')\nparser.add_argument('-i', action='store_true', default=False, help=\n 'ignore download')\nargs = parser.parse_args()\ncookie = dict()\ncookie['DedeUserID'] = '347368229'\ncookie['DedeUserID__ckMd5'] = '6e02ca142544e64c'\ncookie['sid'] = 'ii8ca1k2'\ncookie['SESSDATA'] = '1d13f39c%2C1544246349%2Cc62b611b'\naids, attr = download_list(os.path.join('list', args.d + '.txt'), os.path.\n join(args.o, args.d, 'video'), **cookie, ignore=args.i, quality=args.q,\n debug=True)\nprint('[*] Video Download Finished')\ninfos = dict()\nfor aid in aids:\n extra = dict()\n if 'ep' in aid:\n epid = aid\n aid = attr['aid']\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)\n )\n page = int(epid[2:]) - int(attr['base'][2:]) + 1\n info = GetVideoInfo(aid.strip('av'), key, 1)\n else:\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))\n info = GetVideoInfo(aid.strip('av'), key)\n extra['danmaku'] = request_danmaku(cid=info.cid)\n if 'country' in attr:\n extra['country'] = attr['country']\n extra['complete'] = False\n else:\n capture = get_capture(fn)\n print('[*] Capture : {}'.format(fn))\n extra['duration'] = get_duration(capture=capture)\n extra['duration'] = get_duration(capture=capture)\n extra['nframes'] = get_nframes(capture=capture)\n extra['fps'] = get_fps(capture=capture)\n extra['boundary'] = get_boundary(fn, capture, extra['nframes'],\n 'hecate')\n extra['positions'] = get_positions(extra['nframes'])\n extra['fpsegment'] = get_fpsegment(extra['boundary'])\n extra['score'] = get_score(**extra)\n extra['summary'] = get_summary(**extra)\n extra['complete'] = True\n for k, v in extra.items():\n setattr(info, k, v)\n infos[aid] = info\nsave_pickle(infos, '{}.info'.format(args.d))\n",
"step-4": "import os\nimport argparse\nfrom data.downloader import *\nfrom data.utils import *\nfrom data.danmaku import *\nfrom utils import *\nkey = '03fc8eb101b091fb'\nparser = argparse.ArgumentParser(description='Download Video From Bilibili')\nparser.add_argument('-d', type=str, help='dataset')\nparser.add_argument('-o', type=str, default='dataset', help='output directory')\nparser.add_argument('-f', type=str, default='mp4', help='format')\nparser.add_argument('-c', type=str, default='', help='country')\nparser.add_argument('-q', type=int, default=0, help='quality')\nparser.add_argument('-i', action='store_true', default=False, help=\n 'ignore download')\nargs = parser.parse_args()\ncookie = dict()\ncookie['DedeUserID'] = '347368229'\ncookie['DedeUserID__ckMd5'] = '6e02ca142544e64c'\ncookie['sid'] = 'ii8ca1k2'\ncookie['SESSDATA'] = '1d13f39c%2C1544246349%2Cc62b611b'\naids, attr = download_list(os.path.join('list', args.d + '.txt'), os.path.\n join(args.o, args.d, 'video'), **cookie, ignore=args.i, quality=args.q,\n debug=True)\nprint('[*] Video Download Finished')\ninfos = dict()\nfor aid in aids:\n extra = dict()\n if 'ep' in aid:\n epid = aid\n aid = attr['aid']\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(epid, args.f)\n )\n page = int(epid[2:]) - int(attr['base'][2:]) + 1\n info = GetVideoInfo(aid.strip('av'), key, 1)\n else:\n fn = os.path.join(args.o, args.d, 'video', '{}.{}'.format(aid, args.f))\n info = GetVideoInfo(aid.strip('av'), key)\n extra['danmaku'] = request_danmaku(cid=info.cid)\n if 'country' in attr:\n extra['country'] = attr['country']\n extra['complete'] = False\n else:\n capture = get_capture(fn)\n print('[*] Capture : {}'.format(fn))\n extra['duration'] = get_duration(capture=capture)\n extra['duration'] = get_duration(capture=capture)\n extra['nframes'] = get_nframes(capture=capture)\n extra['fps'] = get_fps(capture=capture)\n extra['boundary'] = get_boundary(fn, capture, extra['nframes'],\n 'hecate')\n extra['positions'] = get_positions(extra['nframes'])\n extra['fpsegment'] = get_fpsegment(extra['boundary'])\n extra['score'] = get_score(**extra)\n extra['summary'] = get_summary(**extra)\n extra['complete'] = True\n for k, v in extra.items():\n setattr(info, k, v)\n infos[aid] = info\nsave_pickle(infos, '{}.info'.format(args.d))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import datetime
import logging
import os
import requests
from bs4 import BeautifulSoup
import telebot
from azure.storage.blob import BlobClient
import hashlib
import azure.functions as func
def hash_string(input_string: str) -> str:
return hashlib.sha256(input_string.encode("utf-8")).hexdigest()
def main(mytimer: func.TimerRequest) -> None:
utc_timestamp = datetime.datetime.utcnow().replace(
tzinfo=datetime.timezone.utc).isoformat()
if mytimer.past_due:
logging.info('The timer is past due!')
logging.info('Python timer trigger function ran at %s', utc_timestamp)
url = os.environ['TargetUrl']
search_term = os.environ['SearchTerm']
reqs = requests.get(url)
soup = BeautifulSoup(reqs.text, 'html.parser')
token = telebot.TeleBot(os.environ['TelebotToken'])
chat_id = os.environ['TelebotChatId']
urls = []
for link in soup.find_all('a'):
link_url = link.get('href')
# Add only links that contain the search term
if search_term in link_url:
urls.append(link_url)
logging.info(f"Looking for: {search_term}")
logging.info(f"Urls conatining the pattern: {urls}")
lst_to_str = ';'.join([str(i) for i in urls])
new_hash = hash_string(lst_to_str)
now = datetime.datetime.now()
file_suffix = now.strftime("%Y%m%d%I%M%S")
year = now.year
month = now.month
day = now.day
blob = BlobClient.from_connection_string(
conn_str=os.environ['AzureWebJobsStorage'], container_name="hashstore", blob_name=f'urls/{year}/{month}/{day}/html-{file_suffix}.html')
blob.upload_blob(lst_to_str, blob_type='BlockBlob')
logging.info(new_hash)
blob = BlobClient.from_connection_string(
conn_str=os.environ['AzureWebJobsStorage'], container_name="hashstore", blob_name='hash.tmp')
blob_hash = ''
if blob.exists():
blob_hash = str(blob.download_blob().readall())
if blob_hash != new_hash:
message = f'Hash of this page: {url} has changed'
bot = telebot.TeleBot(token)
bot.config['api_key'] = token
bot.send_message(chat_id, message)
blob.delete_blob()
blob.upload_blob(new_hash, blob_type='BlockBlob')
logging.info(f'Old hash >>>> {blob_hash}')
logging.info(f'New hash >>>> {new_hash}')
|
normal
|
{
"blob_id": "670a23aa910a6709735281b7e64e5254a19277c6",
"index": 7924,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef hash_string(input_string: str) ->str:\n return hashlib.sha256(input_string.encode('utf-8')).hexdigest()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef hash_string(input_string: str) ->str:\n return hashlib.sha256(input_string.encode('utf-8')).hexdigest()\n\n\ndef main(mytimer: func.TimerRequest) ->None:\n utc_timestamp = datetime.datetime.utcnow().replace(tzinfo=datetime.\n timezone.utc).isoformat()\n if mytimer.past_due:\n logging.info('The timer is past due!')\n logging.info('Python timer trigger function ran at %s', utc_timestamp)\n url = os.environ['TargetUrl']\n search_term = os.environ['SearchTerm']\n reqs = requests.get(url)\n soup = BeautifulSoup(reqs.text, 'html.parser')\n token = telebot.TeleBot(os.environ['TelebotToken'])\n chat_id = os.environ['TelebotChatId']\n urls = []\n for link in soup.find_all('a'):\n link_url = link.get('href')\n if search_term in link_url:\n urls.append(link_url)\n logging.info(f'Looking for: {search_term}')\n logging.info(f'Urls conatining the pattern: {urls}')\n lst_to_str = ';'.join([str(i) for i in urls])\n new_hash = hash_string(lst_to_str)\n now = datetime.datetime.now()\n file_suffix = now.strftime('%Y%m%d%I%M%S')\n year = now.year\n month = now.month\n day = now.day\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n f'urls/{year}/{month}/{day}/html-{file_suffix}.html')\n blob.upload_blob(lst_to_str, blob_type='BlockBlob')\n logging.info(new_hash)\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n 'hash.tmp')\n blob_hash = ''\n if blob.exists():\n blob_hash = str(blob.download_blob().readall())\n if blob_hash != new_hash:\n message = f'Hash of this page: {url} has changed'\n bot = telebot.TeleBot(token)\n bot.config['api_key'] = token\n bot.send_message(chat_id, message)\n blob.delete_blob()\n blob.upload_blob(new_hash, blob_type='BlockBlob')\n logging.info(f'Old hash >>>> {blob_hash}')\n logging.info(f'New hash >>>> {new_hash}')\n",
"step-4": "import datetime\nimport logging\nimport os\nimport requests\nfrom bs4 import BeautifulSoup\nimport telebot\nfrom azure.storage.blob import BlobClient\nimport hashlib\nimport azure.functions as func\n\n\ndef hash_string(input_string: str) ->str:\n return hashlib.sha256(input_string.encode('utf-8')).hexdigest()\n\n\ndef main(mytimer: func.TimerRequest) ->None:\n utc_timestamp = datetime.datetime.utcnow().replace(tzinfo=datetime.\n timezone.utc).isoformat()\n if mytimer.past_due:\n logging.info('The timer is past due!')\n logging.info('Python timer trigger function ran at %s', utc_timestamp)\n url = os.environ['TargetUrl']\n search_term = os.environ['SearchTerm']\n reqs = requests.get(url)\n soup = BeautifulSoup(reqs.text, 'html.parser')\n token = telebot.TeleBot(os.environ['TelebotToken'])\n chat_id = os.environ['TelebotChatId']\n urls = []\n for link in soup.find_all('a'):\n link_url = link.get('href')\n if search_term in link_url:\n urls.append(link_url)\n logging.info(f'Looking for: {search_term}')\n logging.info(f'Urls conatining the pattern: {urls}')\n lst_to_str = ';'.join([str(i) for i in urls])\n new_hash = hash_string(lst_to_str)\n now = datetime.datetime.now()\n file_suffix = now.strftime('%Y%m%d%I%M%S')\n year = now.year\n month = now.month\n day = now.day\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n f'urls/{year}/{month}/{day}/html-{file_suffix}.html')\n blob.upload_blob(lst_to_str, blob_type='BlockBlob')\n logging.info(new_hash)\n blob = BlobClient.from_connection_string(conn_str=os.environ[\n 'AzureWebJobsStorage'], container_name='hashstore', blob_name=\n 'hash.tmp')\n blob_hash = ''\n if blob.exists():\n blob_hash = str(blob.download_blob().readall())\n if blob_hash != new_hash:\n message = f'Hash of this page: {url} has changed'\n bot = telebot.TeleBot(token)\n bot.config['api_key'] = token\n bot.send_message(chat_id, message)\n blob.delete_blob()\n blob.upload_blob(new_hash, blob_type='BlockBlob')\n logging.info(f'Old hash >>>> {blob_hash}')\n logging.info(f'New hash >>>> {new_hash}')\n",
"step-5": "import datetime\r\nimport logging\r\nimport os\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport telebot\r\nfrom azure.storage.blob import BlobClient\r\nimport hashlib\r\n\r\nimport azure.functions as func\r\n\r\n\r\ndef hash_string(input_string: str) -> str:\r\n return hashlib.sha256(input_string.encode(\"utf-8\")).hexdigest()\r\n\r\n\r\ndef main(mytimer: func.TimerRequest) -> None:\r\n utc_timestamp = datetime.datetime.utcnow().replace(\r\n tzinfo=datetime.timezone.utc).isoformat()\r\n\r\n if mytimer.past_due:\r\n logging.info('The timer is past due!')\r\n\r\n logging.info('Python timer trigger function ran at %s', utc_timestamp)\r\n\r\n url = os.environ['TargetUrl']\r\n search_term = os.environ['SearchTerm']\r\n reqs = requests.get(url)\r\n soup = BeautifulSoup(reqs.text, 'html.parser')\r\n token = telebot.TeleBot(os.environ['TelebotToken'])\r\n chat_id = os.environ['TelebotChatId']\r\n\r\n urls = []\r\n for link in soup.find_all('a'):\r\n link_url = link.get('href')\r\n # Add only links that contain the search term\r\n if search_term in link_url:\r\n urls.append(link_url)\r\n\r\n logging.info(f\"Looking for: {search_term}\")\r\n logging.info(f\"Urls conatining the pattern: {urls}\")\r\n\r\n lst_to_str = ';'.join([str(i) for i in urls])\r\n new_hash = hash_string(lst_to_str)\r\n now = datetime.datetime.now()\r\n file_suffix = now.strftime(\"%Y%m%d%I%M%S\")\r\n year = now.year\r\n month = now.month\r\n day = now.day\r\n\r\n blob = BlobClient.from_connection_string(\r\n conn_str=os.environ['AzureWebJobsStorage'], container_name=\"hashstore\", blob_name=f'urls/{year}/{month}/{day}/html-{file_suffix}.html')\r\n blob.upload_blob(lst_to_str, blob_type='BlockBlob')\r\n\r\n logging.info(new_hash)\r\n\r\n blob = BlobClient.from_connection_string(\r\n conn_str=os.environ['AzureWebJobsStorage'], container_name=\"hashstore\", blob_name='hash.tmp')\r\n blob_hash = ''\r\n if blob.exists():\r\n blob_hash = str(blob.download_blob().readall())\r\n if blob_hash != new_hash:\r\n message = f'Hash of this page: {url} has changed'\r\n bot = telebot.TeleBot(token)\r\n bot.config['api_key'] = token\r\n bot.send_message(chat_id, message)\r\n blob.delete_blob()\r\n\r\n blob.upload_blob(new_hash, blob_type='BlockBlob')\r\n\r\n logging.info(f'Old hash >>>> {blob_hash}')\r\n logging.info(f'New hash >>>> {new_hash}')\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class PredictionQueryToken(Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, session=None, continuation=None, max_count=None,
order_by=None, tags=None, iteration_id=None, start_time=None,
end_time=None, application=None):
super(PredictionQueryToken, self).__init__()
self.session = session
self.continuation = continuation
self.max_count = max_count
self.order_by = order_by
self.tags = tags
self.iteration_id = iteration_id
self.start_time = start_time
self.end_time = end_time
self.application = application
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PredictionQueryToken(Model):
<|reserved_special_token_0|>
_attribute_map = {'session': {'key': 'Session', 'type': 'str'},
'continuation': {'key': 'Continuation', 'type': 'str'}, 'max_count':
{'key': 'MaxCount', 'type': 'int'}, 'order_by': {'key': 'OrderBy',
'type': 'str'}, 'tags': {'key': 'Tags', 'type':
'[PredictionQueryTag]'}, 'iteration_id': {'key': 'IterationId',
'type': 'str'}, 'start_time': {'key': 'StartTime', 'type':
'iso-8601'}, 'end_time': {'key': 'EndTime', 'type': 'iso-8601'},
'application': {'key': 'Application', 'type': 'str'}}
def __init__(self, session=None, continuation=None, max_count=None,
order_by=None, tags=None, iteration_id=None, start_time=None,
end_time=None, application=None):
super(PredictionQueryToken, self).__init__()
self.session = session
self.continuation = continuation
self.max_count = max_count
self.order_by = order_by
self.tags = tags
self.iteration_id = iteration_id
self.start_time = start_time
self.end_time = end_time
self.application = application
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PredictionQueryToken(Model):
"""PredictionQueryToken.
:param session:
:type session: str
:param continuation:
:type continuation: str
:param max_count:
:type max_count: int
:param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'
:type order_by: str or
~azure.cognitiveservices.vision.customvision.training.models.enum
:param tags:
:type tags:
list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]
:param iteration_id:
:type iteration_id: str
:param start_time:
:type start_time: datetime
:param end_time:
:type end_time: datetime
:param application:
:type application: str
"""
_attribute_map = {'session': {'key': 'Session', 'type': 'str'},
'continuation': {'key': 'Continuation', 'type': 'str'}, 'max_count':
{'key': 'MaxCount', 'type': 'int'}, 'order_by': {'key': 'OrderBy',
'type': 'str'}, 'tags': {'key': 'Tags', 'type':
'[PredictionQueryTag]'}, 'iteration_id': {'key': 'IterationId',
'type': 'str'}, 'start_time': {'key': 'StartTime', 'type':
'iso-8601'}, 'end_time': {'key': 'EndTime', 'type': 'iso-8601'},
'application': {'key': 'Application', 'type': 'str'}}
def __init__(self, session=None, continuation=None, max_count=None,
order_by=None, tags=None, iteration_id=None, start_time=None,
end_time=None, application=None):
super(PredictionQueryToken, self).__init__()
self.session = session
self.continuation = continuation
self.max_count = max_count
self.order_by = order_by
self.tags = tags
self.iteration_id = iteration_id
self.start_time = start_time
self.end_time = end_time
self.application = application
<|reserved_special_token_1|>
from msrest.serialization import Model
class PredictionQueryToken(Model):
"""PredictionQueryToken.
:param session:
:type session: str
:param continuation:
:type continuation: str
:param max_count:
:type max_count: int
:param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'
:type order_by: str or
~azure.cognitiveservices.vision.customvision.training.models.enum
:param tags:
:type tags:
list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]
:param iteration_id:
:type iteration_id: str
:param start_time:
:type start_time: datetime
:param end_time:
:type end_time: datetime
:param application:
:type application: str
"""
_attribute_map = {'session': {'key': 'Session', 'type': 'str'},
'continuation': {'key': 'Continuation', 'type': 'str'}, 'max_count':
{'key': 'MaxCount', 'type': 'int'}, 'order_by': {'key': 'OrderBy',
'type': 'str'}, 'tags': {'key': 'Tags', 'type':
'[PredictionQueryTag]'}, 'iteration_id': {'key': 'IterationId',
'type': 'str'}, 'start_time': {'key': 'StartTime', 'type':
'iso-8601'}, 'end_time': {'key': 'EndTime', 'type': 'iso-8601'},
'application': {'key': 'Application', 'type': 'str'}}
def __init__(self, session=None, continuation=None, max_count=None,
order_by=None, tags=None, iteration_id=None, start_time=None,
end_time=None, application=None):
super(PredictionQueryToken, self).__init__()
self.session = session
self.continuation = continuation
self.max_count = max_count
self.order_by = order_by
self.tags = tags
self.iteration_id = iteration_id
self.start_time = start_time
self.end_time = end_time
self.application = application
<|reserved_special_token_1|>
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PredictionQueryToken(Model):
"""PredictionQueryToken.
:param session:
:type session: str
:param continuation:
:type continuation: str
:param max_count:
:type max_count: int
:param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'
:type order_by: str or
~azure.cognitiveservices.vision.customvision.training.models.enum
:param tags:
:type tags:
list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]
:param iteration_id:
:type iteration_id: str
:param start_time:
:type start_time: datetime
:param end_time:
:type end_time: datetime
:param application:
:type application: str
"""
_attribute_map = {
'session': {'key': 'Session', 'type': 'str'},
'continuation': {'key': 'Continuation', 'type': 'str'},
'max_count': {'key': 'MaxCount', 'type': 'int'},
'order_by': {'key': 'OrderBy', 'type': 'str'},
'tags': {'key': 'Tags', 'type': '[PredictionQueryTag]'},
'iteration_id': {'key': 'IterationId', 'type': 'str'},
'start_time': {'key': 'StartTime', 'type': 'iso-8601'},
'end_time': {'key': 'EndTime', 'type': 'iso-8601'},
'application': {'key': 'Application', 'type': 'str'},
}
def __init__(self, session=None, continuation=None, max_count=None, order_by=None, tags=None, iteration_id=None, start_time=None, end_time=None, application=None):
super(PredictionQueryToken, self).__init__()
self.session = session
self.continuation = continuation
self.max_count = max_count
self.order_by = order_by
self.tags = tags
self.iteration_id = iteration_id
self.start_time = start_time
self.end_time = end_time
self.application = application
|
flexible
|
{
"blob_id": "0719448e7eb8d48e636be1332c904beebf27e02d",
"index": 4163,
"step-1": "<mask token>\n\n\nclass PredictionQueryToken(Model):\n <mask token>\n <mask token>\n\n def __init__(self, session=None, continuation=None, max_count=None,\n order_by=None, tags=None, iteration_id=None, start_time=None,\n end_time=None, application=None):\n super(PredictionQueryToken, self).__init__()\n self.session = session\n self.continuation = continuation\n self.max_count = max_count\n self.order_by = order_by\n self.tags = tags\n self.iteration_id = iteration_id\n self.start_time = start_time\n self.end_time = end_time\n self.application = application\n",
"step-2": "<mask token>\n\n\nclass PredictionQueryToken(Model):\n <mask token>\n _attribute_map = {'session': {'key': 'Session', 'type': 'str'},\n 'continuation': {'key': 'Continuation', 'type': 'str'}, 'max_count':\n {'key': 'MaxCount', 'type': 'int'}, 'order_by': {'key': 'OrderBy',\n 'type': 'str'}, 'tags': {'key': 'Tags', 'type':\n '[PredictionQueryTag]'}, 'iteration_id': {'key': 'IterationId',\n 'type': 'str'}, 'start_time': {'key': 'StartTime', 'type':\n 'iso-8601'}, 'end_time': {'key': 'EndTime', 'type': 'iso-8601'},\n 'application': {'key': 'Application', 'type': 'str'}}\n\n def __init__(self, session=None, continuation=None, max_count=None,\n order_by=None, tags=None, iteration_id=None, start_time=None,\n end_time=None, application=None):\n super(PredictionQueryToken, self).__init__()\n self.session = session\n self.continuation = continuation\n self.max_count = max_count\n self.order_by = order_by\n self.tags = tags\n self.iteration_id = iteration_id\n self.start_time = start_time\n self.end_time = end_time\n self.application = application\n",
"step-3": "<mask token>\n\n\nclass PredictionQueryToken(Model):\n \"\"\"PredictionQueryToken.\n\n :param session:\n :type session: str\n :param continuation:\n :type continuation: str\n :param max_count:\n :type max_count: int\n :param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'\n :type order_by: str or\n ~azure.cognitiveservices.vision.customvision.training.models.enum\n :param tags:\n :type tags:\n list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]\n :param iteration_id:\n :type iteration_id: str\n :param start_time:\n :type start_time: datetime\n :param end_time:\n :type end_time: datetime\n :param application:\n :type application: str\n \"\"\"\n _attribute_map = {'session': {'key': 'Session', 'type': 'str'},\n 'continuation': {'key': 'Continuation', 'type': 'str'}, 'max_count':\n {'key': 'MaxCount', 'type': 'int'}, 'order_by': {'key': 'OrderBy',\n 'type': 'str'}, 'tags': {'key': 'Tags', 'type':\n '[PredictionQueryTag]'}, 'iteration_id': {'key': 'IterationId',\n 'type': 'str'}, 'start_time': {'key': 'StartTime', 'type':\n 'iso-8601'}, 'end_time': {'key': 'EndTime', 'type': 'iso-8601'},\n 'application': {'key': 'Application', 'type': 'str'}}\n\n def __init__(self, session=None, continuation=None, max_count=None,\n order_by=None, tags=None, iteration_id=None, start_time=None,\n end_time=None, application=None):\n super(PredictionQueryToken, self).__init__()\n self.session = session\n self.continuation = continuation\n self.max_count = max_count\n self.order_by = order_by\n self.tags = tags\n self.iteration_id = iteration_id\n self.start_time = start_time\n self.end_time = end_time\n self.application = application\n",
"step-4": "from msrest.serialization import Model\n\n\nclass PredictionQueryToken(Model):\n \"\"\"PredictionQueryToken.\n\n :param session:\n :type session: str\n :param continuation:\n :type continuation: str\n :param max_count:\n :type max_count: int\n :param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'\n :type order_by: str or\n ~azure.cognitiveservices.vision.customvision.training.models.enum\n :param tags:\n :type tags:\n list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]\n :param iteration_id:\n :type iteration_id: str\n :param start_time:\n :type start_time: datetime\n :param end_time:\n :type end_time: datetime\n :param application:\n :type application: str\n \"\"\"\n _attribute_map = {'session': {'key': 'Session', 'type': 'str'},\n 'continuation': {'key': 'Continuation', 'type': 'str'}, 'max_count':\n {'key': 'MaxCount', 'type': 'int'}, 'order_by': {'key': 'OrderBy',\n 'type': 'str'}, 'tags': {'key': 'Tags', 'type':\n '[PredictionQueryTag]'}, 'iteration_id': {'key': 'IterationId',\n 'type': 'str'}, 'start_time': {'key': 'StartTime', 'type':\n 'iso-8601'}, 'end_time': {'key': 'EndTime', 'type': 'iso-8601'},\n 'application': {'key': 'Application', 'type': 'str'}}\n\n def __init__(self, session=None, continuation=None, max_count=None,\n order_by=None, tags=None, iteration_id=None, start_time=None,\n end_time=None, application=None):\n super(PredictionQueryToken, self).__init__()\n self.session = session\n self.continuation = continuation\n self.max_count = max_count\n self.order_by = order_by\n self.tags = tags\n self.iteration_id = iteration_id\n self.start_time = start_time\n self.end_time = end_time\n self.application = application\n",
"step-5": "# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass PredictionQueryToken(Model):\n \"\"\"PredictionQueryToken.\n\n :param session:\n :type session: str\n :param continuation:\n :type continuation: str\n :param max_count:\n :type max_count: int\n :param order_by: Possible values include: 'Newest', 'Oldest', 'Suggested'\n :type order_by: str or\n ~azure.cognitiveservices.vision.customvision.training.models.enum\n :param tags:\n :type tags:\n list[~azure.cognitiveservices.vision.customvision.training.models.PredictionQueryTag]\n :param iteration_id:\n :type iteration_id: str\n :param start_time:\n :type start_time: datetime\n :param end_time:\n :type end_time: datetime\n :param application:\n :type application: str\n \"\"\"\n\n _attribute_map = {\n 'session': {'key': 'Session', 'type': 'str'},\n 'continuation': {'key': 'Continuation', 'type': 'str'},\n 'max_count': {'key': 'MaxCount', 'type': 'int'},\n 'order_by': {'key': 'OrderBy', 'type': 'str'},\n 'tags': {'key': 'Tags', 'type': '[PredictionQueryTag]'},\n 'iteration_id': {'key': 'IterationId', 'type': 'str'},\n 'start_time': {'key': 'StartTime', 'type': 'iso-8601'},\n 'end_time': {'key': 'EndTime', 'type': 'iso-8601'},\n 'application': {'key': 'Application', 'type': 'str'},\n }\n\n def __init__(self, session=None, continuation=None, max_count=None, order_by=None, tags=None, iteration_id=None, start_time=None, end_time=None, application=None):\n super(PredictionQueryToken, self).__init__()\n self.session = session\n self.continuation = continuation\n self.max_count = max_count\n self.order_by = order_by\n self.tags = tags\n self.iteration_id = iteration_id\n self.start_time = start_time\n self.end_time = end_time\n self.application = application\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def assert_number(arg):
if not isinstance(arg, (int, float)):
raise TypeError(f'Expected number, got {type(arg)}')
<|reserved_special_token_1|>
def assert_number(arg):
if not isinstance(arg, (int, float)):
raise TypeError(f"Expected number, got {type(arg)}")
|
flexible
|
{
"blob_id": "2de62c73507acac597d70557adfe8286e2f28a1f",
"index": 5569,
"step-1": "<mask token>\n",
"step-2": "def assert_number(arg):\n if not isinstance(arg, (int, float)):\n raise TypeError(f'Expected number, got {type(arg)}')\n",
"step-3": "def assert_number(arg):\n if not isinstance(arg, (int, float)):\n raise TypeError(f\"Expected number, got {type(arg)}\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def canny(img):
grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)
canny_filtered = cv2.Canny(blurred_img, 30, 150)
return canny_filtered
<|reserved_special_token_0|>
def dispay_lines(img, lines):
line_img = np.zeros_like(img)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)
return line_img
def get_cords(img, line_slope_int):
slope, intercept = line_slope_int
y1 = img.shape[0]
y2 = int(y1 * (4 / 5))
x1 = int((y1 - intercept) / slope)
x2 = int((y2 - intercept) / slope)
height, width, _ = img.shape
if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <
0 or y2 > height or y2 < 0):
return np.array([0, 0, 0, 0])
return np.array([x1, y1, x2, y2])
def average_slope_intercept(img, lines):
left_fit = []
right_fit = []
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)
print(slope, intercept)
if slope >= 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
if left_fit:
left_fit_avg = np.average(left_fit, axis=0)
left_line = get_cords(img, left_fit_avg)
else:
left_line = np.array([0, 0, 0, 0])
if right_fit:
right_fit_avg = np.average(right_fit, axis=0)
right_line = get_cords(img, right_fit_avg)
else:
right_line = np.array([0, 0, 0, 0])
return np.array([left_line, right_line])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def canny(img):
grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)
canny_filtered = cv2.Canny(blurred_img, 30, 150)
return canny_filtered
def region_of_interest(img):
height, width = img.shape
height -= 60
width -= 10
Polygons = np.array([[(width, height), (50, height), (int(3 / 8 * width
), int(3 / 4 * height)), (int(5 / 8 * width), int(3 / 4 * height))]])
mask = np.zeros_like(img)
cv2.fillConvexPoly(img=mask, points=Polygons, color=255, lineType=cv2.
LINE_AA)
mask_img = cv2.bitwise_and(img, mask)
return mask_img
def dispay_lines(img, lines):
line_img = np.zeros_like(img)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)
return line_img
def get_cords(img, line_slope_int):
slope, intercept = line_slope_int
y1 = img.shape[0]
y2 = int(y1 * (4 / 5))
x1 = int((y1 - intercept) / slope)
x2 = int((y2 - intercept) / slope)
height, width, _ = img.shape
if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <
0 or y2 > height or y2 < 0):
return np.array([0, 0, 0, 0])
return np.array([x1, y1, x2, y2])
def average_slope_intercept(img, lines):
left_fit = []
right_fit = []
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)
print(slope, intercept)
if slope >= 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
if left_fit:
left_fit_avg = np.average(left_fit, axis=0)
left_line = get_cords(img, left_fit_avg)
else:
left_line = np.array([0, 0, 0, 0])
if right_fit:
right_fit_avg = np.average(right_fit, axis=0)
right_line = get_cords(img, right_fit_avg)
else:
right_line = np.array([0, 0, 0, 0])
return np.array([left_line, right_line])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def canny(img):
grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)
canny_filtered = cv2.Canny(blurred_img, 30, 150)
return canny_filtered
def region_of_interest(img):
height, width = img.shape
height -= 60
width -= 10
Polygons = np.array([[(width, height), (50, height), (int(3 / 8 * width
), int(3 / 4 * height)), (int(5 / 8 * width), int(3 / 4 * height))]])
mask = np.zeros_like(img)
cv2.fillConvexPoly(img=mask, points=Polygons, color=255, lineType=cv2.
LINE_AA)
mask_img = cv2.bitwise_and(img, mask)
return mask_img
def dispay_lines(img, lines):
line_img = np.zeros_like(img)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)
return line_img
def get_cords(img, line_slope_int):
slope, intercept = line_slope_int
y1 = img.shape[0]
y2 = int(y1 * (4 / 5))
x1 = int((y1 - intercept) / slope)
x2 = int((y2 - intercept) / slope)
height, width, _ = img.shape
if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <
0 or y2 > height or y2 < 0):
return np.array([0, 0, 0, 0])
return np.array([x1, y1, x2, y2])
def average_slope_intercept(img, lines):
left_fit = []
right_fit = []
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)
print(slope, intercept)
if slope >= 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
if left_fit:
left_fit_avg = np.average(left_fit, axis=0)
left_line = get_cords(img, left_fit_avg)
else:
left_line = np.array([0, 0, 0, 0])
if right_fit:
right_fit_avg = np.average(right_fit, axis=0)
right_line = get_cords(img, right_fit_avg)
else:
right_line = np.array([0, 0, 0, 0])
return np.array([left_line, right_line])
if __name__ == '__main__':
cap = cv2.VideoCapture('./../Downloads/detect_lanes_from.mp4')
lines = np.asarray((np.array([0, 0, 0, 0]), np.array([0, 0, 0, 0])))
estimate = lines
while cap.isOpened():
_, frame = cap.read()
canny_img = canny(frame)
masked_img = region_of_interest(canny_img)
estimate = lines
lines = cv2.HoughLinesP(masked_img, 1, np.pi / 180, 100, np.array([
]), minLineLength=10, maxLineGap=500)
if lines is None:
lines = estimate
avg_lines = average_slope_intercept(frame, lines)
line_img = dispay_lines(frame, avg_lines)
img_frame = cv2.addWeighted(frame, 1, line_img, 0.8, 0)
cv2.imshow('colour_camera_frame', img_frame)
cv2.imshow('contoured', masked_img)
if cv2.waitKey(2) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
import matplotlib.pyplot as plt
<|reserved_special_token_0|>
def canny(img):
grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)
canny_filtered = cv2.Canny(blurred_img, 30, 150)
return canny_filtered
def region_of_interest(img):
height, width = img.shape
height -= 60
width -= 10
Polygons = np.array([[(width, height), (50, height), (int(3 / 8 * width
), int(3 / 4 * height)), (int(5 / 8 * width), int(3 / 4 * height))]])
mask = np.zeros_like(img)
cv2.fillConvexPoly(img=mask, points=Polygons, color=255, lineType=cv2.
LINE_AA)
mask_img = cv2.bitwise_and(img, mask)
return mask_img
def dispay_lines(img, lines):
line_img = np.zeros_like(img)
if lines is not None:
for x1, y1, x2, y2 in lines:
cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)
return line_img
def get_cords(img, line_slope_int):
slope, intercept = line_slope_int
y1 = img.shape[0]
y2 = int(y1 * (4 / 5))
x1 = int((y1 - intercept) / slope)
x2 = int((y2 - intercept) / slope)
height, width, _ = img.shape
if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <
0 or y2 > height or y2 < 0):
return np.array([0, 0, 0, 0])
return np.array([x1, y1, x2, y2])
def average_slope_intercept(img, lines):
left_fit = []
right_fit = []
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)
print(slope, intercept)
if slope >= 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
if left_fit:
left_fit_avg = np.average(left_fit, axis=0)
left_line = get_cords(img, left_fit_avg)
else:
left_line = np.array([0, 0, 0, 0])
if right_fit:
right_fit_avg = np.average(right_fit, axis=0)
right_line = get_cords(img, right_fit_avg)
else:
right_line = np.array([0, 0, 0, 0])
return np.array([left_line, right_line])
if __name__ == '__main__':
cap = cv2.VideoCapture('./../Downloads/detect_lanes_from.mp4')
lines = np.asarray((np.array([0, 0, 0, 0]), np.array([0, 0, 0, 0])))
estimate = lines
while cap.isOpened():
_, frame = cap.read()
canny_img = canny(frame)
masked_img = region_of_interest(canny_img)
estimate = lines
lines = cv2.HoughLinesP(masked_img, 1, np.pi / 180, 100, np.array([
]), minLineLength=10, maxLineGap=500)
if lines is None:
lines = estimate
avg_lines = average_slope_intercept(frame, lines)
line_img = dispay_lines(frame, avg_lines)
img_frame = cv2.addWeighted(frame, 1, line_img, 0.8, 0)
cv2.imshow('colour_camera_frame', img_frame)
cv2.imshow('contoured', masked_img)
if cv2.waitKey(2) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import cv2
import numpy as np
import matplotlib.pyplot as plt
'''
def diff_of_gaussians(img):
grey_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
blur_img_grey = cv2.GaussianBlur(grey_img, (9,9), 0)
blur_img_colour = cv2.GaussianBlur(img, (9,9), 0)
#plt.figure(figsize = (20,2))
#plt.imshow(blur_img_grey, cmap = 'gray')
#plt.show()
#plt.imshow(blur_img_colour)
#plt.show()
fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(nrows = 2, ncols = 2)
edges_grey = cv2.Canny(grey_img,100,200)
edges = cv2.Canny(img, 100, 200)
#plt.subplot(411)
ax1.imshow(edges_grey, cmap = 'gray')
#plt.imshow(edges_grey, cmap = 'gray')
#plt.show()
#plt.subplot(412)
ax2.imshow(edges);
#plt.imshow(edges)
#plt.subplot(421)
ax3.imshow(canny(grey_img), cmap = 'gray')
#plt.show()
#plt.subplot(422)
ax4.imshow(canny(img))
#plt.show()
plt.show()
#plt.imshow(blur_img_grey - grey_img, cmap = 'gray')
#plt.show()
#plt.imshow(blur_img_colour - img)
#plt.show()
return
'''
def canny(img):
# changes in intensity are to be captured.
# Canny and the Sobel operator work no greyscale images
grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Gaussian Blurring to reduce noise, removes high-frequncy components in the image
# High frequency due to high ISO of the camera, contours that aren't really edges.
# https://www.youtube.com/watch?v=uihBwtPIBxM
blurred_img = cv2.GaussianBlur(grey_img, (9,9), 0)
# Canny Edge Detector, identifying any sharp changes in intesity, Uses edge-gradients
# the strongest gradents are then traced
# https://www.youtube.com/watch?v=sRFM5IEqR2w
canny_filtered = cv2.Canny(blurred_img, 30, 150)
return canny_filtered
def region_of_interest(img):
height, width = img.shape
height -= 60
width -= 10
#Reducing the image size to "focus" more on the center of the frame (region of interest)
#These dimensions are later used in the generation of the mask
#The reduction in height enables us to ignore the part of the image corresponding to the dashboard.
#Coordinates marking our "region of interest"
#The top-left of the image is (0,0)
Polygons = np.array([
[(width, height),(50, height), (int((3/8) * width), int((3/4) * height)),(int((5/8) * width), int((3/4) * height))]
])
#(width, height),(50,height) removes what's visible of the dash of the car.
mask = np.zeros_like(img)
# filling mask
cv2.fillConvexPoly(img = mask, points = Polygons, color = 255, lineType = cv2.LINE_AA)
# Uncomment "return mask" to see the "region of interest" marked in white
mask_img = cv2.bitwise_and(img, mask)
# mask_img now has the detected edges in our region of interest.
#return mask
return mask_img
def dispay_lines(img, lines):
line_img = np.zeros_like(img)
if lines is not None:
for x1,y1,x2,y2 in lines:
cv2.line(line_img, (x1,y1), (x2,y2), (0,255,0), 30)
return line_img
def get_cords(img, line_slope_int):
slope, intercept = line_slope_int
y1 = img.shape[0]
#Line starts from the bottom left
y2 = int(y1 * (4/5))
# The line goes 1 fifth of the way up
x1 = int((y1 - intercept) / slope)
x2 = int((y2 - intercept) / slope)
#from y = mx + c
#print(img.shape)
height, width, _ = img.shape
if x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 < 0 or y2 > height or y2 < 0:
return np.array([0,0,0,0])
return np.array([x1, y1, x2, y2])
def average_slope_intercept(img, lines):
left_fit = []
right_fit = []
#if lines is None:
# return (np.array([0,0,0,0]), np.array([0,0,0,0]))
for line in lines:
x1, y1, x2, y2 = line.reshape(4)
slope, intercept = np.polyfit((x1,x2), (y1,y2), 1)
#Linear least squares :) (not exactly but it's easy to think of it like this)
print(slope, intercept)
#left lines have a positive slope.
if slope >= 0:
left_fit.append((slope, intercept))
else:
right_fit.append((slope, intercept))
if left_fit:
left_fit_avg = np.average(left_fit, axis = 0)
left_line = get_cords(img, left_fit_avg)
else:
left_line = np.array([0,0,0,0])
if right_fit:
right_fit_avg = np.average(right_fit, axis = 0)
right_line = get_cords(img, right_fit_avg)
else:
right_line = np.array([0,0,0,0])
return np.array([left_line, right_line])
if __name__ == "__main__":
cap = cv2.VideoCapture("./../Downloads/detect_lanes_from.mp4")
lines = np.asarray((np.array([0,0,0,0]), np.array([0,0,0,0])))
estimate = lines
while (cap.isOpened()):
_, frame = cap.read()
canny_img = canny(frame)
masked_img = region_of_interest(canny_img)
estimate = lines
#print(len(estimate), len(lines))
# Finding straight lines and therefore the lane lines --> Hough transform
lines = cv2.HoughLinesP(masked_img, 1, (np.pi / 180), 100, np.array([]), minLineLength = 10, maxLineGap = 500)
#print(estimate.shape, lines.shape)
if lines is None:
lines = estimate
# https://www.youtube.com/watch?v=4zHbI-fFIlI watch at 1.5x lol
avg_lines = average_slope_intercept(frame, lines)
#print(avg_lines)
line_img = dispay_lines(frame, avg_lines)
img_frame = cv2.addWeighted(frame, 1, line_img, 0.8, 0)
cv2.imshow("colour_camera_frame", img_frame)
cv2.imshow("contoured", masked_img)
if cv2.waitKey(2) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "c3a7a8a006f717057a7ad2920f19d82842b04a85",
"index": 9510,
"step-1": "<mask token>\n\n\ndef canny(img):\n grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)\n canny_filtered = cv2.Canny(blurred_img, 30, 150)\n return canny_filtered\n\n\n<mask token>\n\n\ndef dispay_lines(img, lines):\n line_img = np.zeros_like(img)\n if lines is not None:\n for x1, y1, x2, y2 in lines:\n cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)\n return line_img\n\n\ndef get_cords(img, line_slope_int):\n slope, intercept = line_slope_int\n y1 = img.shape[0]\n y2 = int(y1 * (4 / 5))\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n height, width, _ = img.shape\n if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <\n 0 or y2 > height or y2 < 0):\n return np.array([0, 0, 0, 0])\n return np.array([x1, y1, x2, y2])\n\n\ndef average_slope_intercept(img, lines):\n left_fit = []\n right_fit = []\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)\n print(slope, intercept)\n if slope >= 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n if left_fit:\n left_fit_avg = np.average(left_fit, axis=0)\n left_line = get_cords(img, left_fit_avg)\n else:\n left_line = np.array([0, 0, 0, 0])\n if right_fit:\n right_fit_avg = np.average(right_fit, axis=0)\n right_line = get_cords(img, right_fit_avg)\n else:\n right_line = np.array([0, 0, 0, 0])\n return np.array([left_line, right_line])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef canny(img):\n grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)\n canny_filtered = cv2.Canny(blurred_img, 30, 150)\n return canny_filtered\n\n\ndef region_of_interest(img):\n height, width = img.shape\n height -= 60\n width -= 10\n Polygons = np.array([[(width, height), (50, height), (int(3 / 8 * width\n ), int(3 / 4 * height)), (int(5 / 8 * width), int(3 / 4 * height))]])\n mask = np.zeros_like(img)\n cv2.fillConvexPoly(img=mask, points=Polygons, color=255, lineType=cv2.\n LINE_AA)\n mask_img = cv2.bitwise_and(img, mask)\n return mask_img\n\n\ndef dispay_lines(img, lines):\n line_img = np.zeros_like(img)\n if lines is not None:\n for x1, y1, x2, y2 in lines:\n cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)\n return line_img\n\n\ndef get_cords(img, line_slope_int):\n slope, intercept = line_slope_int\n y1 = img.shape[0]\n y2 = int(y1 * (4 / 5))\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n height, width, _ = img.shape\n if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <\n 0 or y2 > height or y2 < 0):\n return np.array([0, 0, 0, 0])\n return np.array([x1, y1, x2, y2])\n\n\ndef average_slope_intercept(img, lines):\n left_fit = []\n right_fit = []\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)\n print(slope, intercept)\n if slope >= 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n if left_fit:\n left_fit_avg = np.average(left_fit, axis=0)\n left_line = get_cords(img, left_fit_avg)\n else:\n left_line = np.array([0, 0, 0, 0])\n if right_fit:\n right_fit_avg = np.average(right_fit, axis=0)\n right_line = get_cords(img, right_fit_avg)\n else:\n right_line = np.array([0, 0, 0, 0])\n return np.array([left_line, right_line])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef canny(img):\n grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)\n canny_filtered = cv2.Canny(blurred_img, 30, 150)\n return canny_filtered\n\n\ndef region_of_interest(img):\n height, width = img.shape\n height -= 60\n width -= 10\n Polygons = np.array([[(width, height), (50, height), (int(3 / 8 * width\n ), int(3 / 4 * height)), (int(5 / 8 * width), int(3 / 4 * height))]])\n mask = np.zeros_like(img)\n cv2.fillConvexPoly(img=mask, points=Polygons, color=255, lineType=cv2.\n LINE_AA)\n mask_img = cv2.bitwise_and(img, mask)\n return mask_img\n\n\ndef dispay_lines(img, lines):\n line_img = np.zeros_like(img)\n if lines is not None:\n for x1, y1, x2, y2 in lines:\n cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)\n return line_img\n\n\ndef get_cords(img, line_slope_int):\n slope, intercept = line_slope_int\n y1 = img.shape[0]\n y2 = int(y1 * (4 / 5))\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n height, width, _ = img.shape\n if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <\n 0 or y2 > height or y2 < 0):\n return np.array([0, 0, 0, 0])\n return np.array([x1, y1, x2, y2])\n\n\ndef average_slope_intercept(img, lines):\n left_fit = []\n right_fit = []\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)\n print(slope, intercept)\n if slope >= 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n if left_fit:\n left_fit_avg = np.average(left_fit, axis=0)\n left_line = get_cords(img, left_fit_avg)\n else:\n left_line = np.array([0, 0, 0, 0])\n if right_fit:\n right_fit_avg = np.average(right_fit, axis=0)\n right_line = get_cords(img, right_fit_avg)\n else:\n right_line = np.array([0, 0, 0, 0])\n return np.array([left_line, right_line])\n\n\nif __name__ == '__main__':\n cap = cv2.VideoCapture('./../Downloads/detect_lanes_from.mp4')\n lines = np.asarray((np.array([0, 0, 0, 0]), np.array([0, 0, 0, 0])))\n estimate = lines\n while cap.isOpened():\n _, frame = cap.read()\n canny_img = canny(frame)\n masked_img = region_of_interest(canny_img)\n estimate = lines\n lines = cv2.HoughLinesP(masked_img, 1, np.pi / 180, 100, np.array([\n ]), minLineLength=10, maxLineGap=500)\n if lines is None:\n lines = estimate\n avg_lines = average_slope_intercept(frame, lines)\n line_img = dispay_lines(frame, avg_lines)\n img_frame = cv2.addWeighted(frame, 1, line_img, 0.8, 0)\n cv2.imshow('colour_camera_frame', img_frame)\n cv2.imshow('contoured', masked_img)\n if cv2.waitKey(2) == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n<mask token>\n\n\ndef canny(img):\n grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n blurred_img = cv2.GaussianBlur(grey_img, (9, 9), 0)\n canny_filtered = cv2.Canny(blurred_img, 30, 150)\n return canny_filtered\n\n\ndef region_of_interest(img):\n height, width = img.shape\n height -= 60\n width -= 10\n Polygons = np.array([[(width, height), (50, height), (int(3 / 8 * width\n ), int(3 / 4 * height)), (int(5 / 8 * width), int(3 / 4 * height))]])\n mask = np.zeros_like(img)\n cv2.fillConvexPoly(img=mask, points=Polygons, color=255, lineType=cv2.\n LINE_AA)\n mask_img = cv2.bitwise_and(img, mask)\n return mask_img\n\n\ndef dispay_lines(img, lines):\n line_img = np.zeros_like(img)\n if lines is not None:\n for x1, y1, x2, y2 in lines:\n cv2.line(line_img, (x1, y1), (x2, y2), (0, 255, 0), 30)\n return line_img\n\n\ndef get_cords(img, line_slope_int):\n slope, intercept = line_slope_int\n y1 = img.shape[0]\n y2 = int(y1 * (4 / 5))\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n height, width, _ = img.shape\n if (x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 <\n 0 or y2 > height or y2 < 0):\n return np.array([0, 0, 0, 0])\n return np.array([x1, y1, x2, y2])\n\n\ndef average_slope_intercept(img, lines):\n left_fit = []\n right_fit = []\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n slope, intercept = np.polyfit((x1, x2), (y1, y2), 1)\n print(slope, intercept)\n if slope >= 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n if left_fit:\n left_fit_avg = np.average(left_fit, axis=0)\n left_line = get_cords(img, left_fit_avg)\n else:\n left_line = np.array([0, 0, 0, 0])\n if right_fit:\n right_fit_avg = np.average(right_fit, axis=0)\n right_line = get_cords(img, right_fit_avg)\n else:\n right_line = np.array([0, 0, 0, 0])\n return np.array([left_line, right_line])\n\n\nif __name__ == '__main__':\n cap = cv2.VideoCapture('./../Downloads/detect_lanes_from.mp4')\n lines = np.asarray((np.array([0, 0, 0, 0]), np.array([0, 0, 0, 0])))\n estimate = lines\n while cap.isOpened():\n _, frame = cap.read()\n canny_img = canny(frame)\n masked_img = region_of_interest(canny_img)\n estimate = lines\n lines = cv2.HoughLinesP(masked_img, 1, np.pi / 180, 100, np.array([\n ]), minLineLength=10, maxLineGap=500)\n if lines is None:\n lines = estimate\n avg_lines = average_slope_intercept(frame, lines)\n line_img = dispay_lines(frame, avg_lines)\n img_frame = cv2.addWeighted(frame, 1, line_img, 0.8, 0)\n cv2.imshow('colour_camera_frame', img_frame)\n cv2.imshow('contoured', masked_img)\n if cv2.waitKey(2) == ord('q'):\n break\n cap.release()\n cv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n'''\ndef diff_of_gaussians(img):\n\n grey_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n blur_img_grey = cv2.GaussianBlur(grey_img, (9,9), 0)\n blur_img_colour = cv2.GaussianBlur(img, (9,9), 0)\n\n #plt.figure(figsize = (20,2))\n #plt.imshow(blur_img_grey, cmap = 'gray')\n #plt.show()\n #plt.imshow(blur_img_colour)\n #plt.show()\n\n fig, [[ax1, ax2], [ax3, ax4]] = plt.subplots(nrows = 2, ncols = 2)\n edges_grey = cv2.Canny(grey_img,100,200)\n edges = cv2.Canny(img, 100, 200)\n #plt.subplot(411)\n ax1.imshow(edges_grey, cmap = 'gray')\n #plt.imshow(edges_grey, cmap = 'gray')\n #plt.show()\n #plt.subplot(412)\n ax2.imshow(edges);\n #plt.imshow(edges)\n #plt.subplot(421)\n ax3.imshow(canny(grey_img), cmap = 'gray')\n #plt.show()\n #plt.subplot(422)\n ax4.imshow(canny(img))\n #plt.show()\n\n plt.show()\n #plt.imshow(blur_img_grey - grey_img, cmap = 'gray')\n #plt.show()\n #plt.imshow(blur_img_colour - img)\n #plt.show()\n\n return\n'''\n\ndef canny(img):\n # changes in intensity are to be captured.\n # Canny and the Sobel operator work no greyscale images\n grey_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n # Gaussian Blurring to reduce noise, removes high-frequncy components in the image\n # High frequency due to high ISO of the camera, contours that aren't really edges.\n # https://www.youtube.com/watch?v=uihBwtPIBxM\n blurred_img = cv2.GaussianBlur(grey_img, (9,9), 0)\n\n # Canny Edge Detector, identifying any sharp changes in intesity, Uses edge-gradients\n # the strongest gradents are then traced\n # https://www.youtube.com/watch?v=sRFM5IEqR2w\n canny_filtered = cv2.Canny(blurred_img, 30, 150)\n return canny_filtered\n\ndef region_of_interest(img):\n height, width = img.shape\n height -= 60\n width -= 10\n #Reducing the image size to \"focus\" more on the center of the frame (region of interest)\n #These dimensions are later used in the generation of the mask\n #The reduction in height enables us to ignore the part of the image corresponding to the dashboard.\n\n #Coordinates marking our \"region of interest\"\n #The top-left of the image is (0,0)\n Polygons = np.array([\n [(width, height),(50, height), (int((3/8) * width), int((3/4) * height)),(int((5/8) * width), int((3/4) * height))]\n ])\n #(width, height),(50,height) removes what's visible of the dash of the car.\n mask = np.zeros_like(img)\n # filling mask\n cv2.fillConvexPoly(img = mask, points = Polygons, color = 255, lineType = cv2.LINE_AA)\n # Uncomment \"return mask\" to see the \"region of interest\" marked in white\n mask_img = cv2.bitwise_and(img, mask)\n # mask_img now has the detected edges in our region of interest.\n #return mask\n return mask_img\n\ndef dispay_lines(img, lines):\n line_img = np.zeros_like(img)\n if lines is not None:\n for x1,y1,x2,y2 in lines:\n cv2.line(line_img, (x1,y1), (x2,y2), (0,255,0), 30)\n return line_img\n\ndef get_cords(img, line_slope_int):\n slope, intercept = line_slope_int\n y1 = img.shape[0]\n #Line starts from the bottom left\n y2 = int(y1 * (4/5))\n # The line goes 1 fifth of the way up\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n #from y = mx + c\n #print(img.shape)\n height, width, _ = img.shape\n if x1 > width or x1 < 0 or x2 > width or x2 < 0 or y1 > height or y1 < 0 or y2 > height or y2 < 0:\n return np.array([0,0,0,0])\n return np.array([x1, y1, x2, y2])\n\ndef average_slope_intercept(img, lines):\n left_fit = []\n right_fit = []\n #if lines is None:\n # return (np.array([0,0,0,0]), np.array([0,0,0,0]))\n for line in lines:\n x1, y1, x2, y2 = line.reshape(4)\n slope, intercept = np.polyfit((x1,x2), (y1,y2), 1)\n #Linear least squares :) (not exactly but it's easy to think of it like this)\n print(slope, intercept)\n #left lines have a positive slope.\n if slope >= 0:\n left_fit.append((slope, intercept))\n else:\n right_fit.append((slope, intercept))\n\n if left_fit:\n left_fit_avg = np.average(left_fit, axis = 0)\n left_line = get_cords(img, left_fit_avg)\n else:\n left_line = np.array([0,0,0,0])\n if right_fit:\n right_fit_avg = np.average(right_fit, axis = 0)\n right_line = get_cords(img, right_fit_avg)\n else:\n right_line = np.array([0,0,0,0])\n\n return np.array([left_line, right_line])\n\nif __name__ == \"__main__\":\n cap = cv2.VideoCapture(\"./../Downloads/detect_lanes_from.mp4\")\n lines = np.asarray((np.array([0,0,0,0]), np.array([0,0,0,0])))\n estimate = lines\n while (cap.isOpened()):\n _, frame = cap.read()\n canny_img = canny(frame)\n masked_img = region_of_interest(canny_img)\n estimate = lines\n #print(len(estimate), len(lines))\n # Finding straight lines and therefore the lane lines --> Hough transform\n lines = cv2.HoughLinesP(masked_img, 1, (np.pi / 180), 100, np.array([]), minLineLength = 10, maxLineGap = 500)\n #print(estimate.shape, lines.shape)\n if lines is None:\n lines = estimate\n # https://www.youtube.com/watch?v=4zHbI-fFIlI watch at 1.5x lol\n avg_lines = average_slope_intercept(frame, lines)\n #print(avg_lines)\n line_img = dispay_lines(frame, avg_lines)\n img_frame = cv2.addWeighted(frame, 1, line_img, 0.8, 0)\n\n cv2.imshow(\"colour_camera_frame\", img_frame)\n cv2.imshow(\"contoured\", masked_img)\n if cv2.waitKey(2) == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
#This version assumes domains = train/test set
import numpy as np
from ..utils import Dataset
import math
import random
from .interface import TopicModel
from .man_model.models import *
from .man_model import utils
from .man_model.options import opt
import torch.utils.data as data_utils
from tqdm import tqdm
from collections import defaultdict
import itertools
from torchnet.meter import ConfusionMeter
import torch
import torch.nn as nn
import torch.nn.functional as functional
import torch.optim as optim
from torch.utils.data import ConcatDataset, DataLoader
"""
IMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32
=> need to convert. Dont know if same thing for target tho?
Also apparently, domain labels retrieved from get_domain_labels cannot be -1?
Output size for C HAS TO BE 2 even if it's a binary classification
"""
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))
return e_x / np.sum(e_x, axis=1).reshape(-1, 1)
class MultinomialAdversarialNetwork(TopicModel):
def __init__(self, k, m, model_params=None, log_params=None):
super().__init__(k,m,model_params,log_params)
def prepare_data(self,d):
"""
Assume d is a dictionary of dataset where d[domain] = another dataset class
Assume labeled domain = train set, unlabeled = test
"""
train_loaders, train_iters = {}, {}
unlabeled_loaders, unlabeled_iters = {}, {}
for domain in opt.domains:
#CONVERT TO FLOAT32
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))
train = data_utils.TensorDataset(features,target)
train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)
train_iters[domain] = iter(train_loaders[domain])
for domain in opt.unlabeled_domains:
features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))
uset = data_utils.TensorDataset(features,target)
unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)
unlabeled_iters[domain] = iter(unlabeled_loaders[domain])
return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters
def fit(self, d, *args, **kwargs):
#minibatches = create_minibatch(X, y, z, batch_size)
#TODO: make this able to fit consecutively
train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)
#Training
self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)
self.F_d = {}
for domain in opt.domains:
self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)
self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)
self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)
# print("try")
# print(opt.device)
self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)
for f_d in self.F_d.values():
f_d = f_d.to(opt.device)
# print("endtry")
# # optimizers
optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)
optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)
loss_d_res = []
l_d_res = []
l_c_res = []
for epoch in range(opt.max_epoch):
self.F_s.train()
self.C.train()
self.D.train()
for f in self.F_d.values():
f.train()
# training accuracy
correct, total = defaultdict(int), defaultdict(int)
# D accuracy
d_correct, d_total = 0, 0
# conceptually view 1 epoch as 1 epoch of the first domain
num_iter = len(train_loaders[opt.domains[0]])
for i in range(num_iter):
# D iterations
utils.freeze_net(self.F_s)
map(utils.freeze_net, self.F_d.values())
utils.freeze_net(self.C)
utils.unfreeze_net(self.D)
# optional WGAN n_critic trick
n_critic = opt.n_critic
for _ in range(n_critic):
self.D.zero_grad()
loss_d = {}
# train on both labeled and unlabeled domains
for domain in opt.unlabeled_domains:
# targets not used
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
# D accuracy
_, pred = torch.max(d_outputs, 1)
d_total += len(d_inputs)
if opt.loss.lower() == 'l2':
_, tgt_indices = torch.max(d_targets, 1)
d_correct += (pred==tgt_indices).sum().item()
l_d = functional.mse_loss(d_outputs, d_targets)
l_d.backward()
else:
d_correct += (pred==d_targets).sum().item()
l_d = functional.nll_loss(d_outputs, d_targets)
l_d.backward()
loss_d[domain] = l_d.item()
optimizerD.step()
# F&C iteration
utils.unfreeze_net(self.F_s)
map(utils.unfreeze_net, self.F_d.values())
utils.unfreeze_net(self.C)
utils.freeze_net(self.D)
#if opt.fix_emb:
# utils.freeze_net(self.F_s.word_emb)
# map(utils.freeze_net, self.F_d.values())
self.F_s.zero_grad()
for f_d in self.F_d.values():
f_d.zero_grad()
self.C.zero_grad()
shared_feats, domain_feats = [], []
for domain in opt.domains:
inputs, targets = utils.endless_get_next_batch(
train_loaders, train_iters, domain)
#target = torch.int64 rn
targets = targets.to(opt.device)
inputs = inputs.to(opt.device)
shared_feat = self.F_s(inputs)
shared_feats.append(shared_feat)
domain_feat = self.F_d[domain](inputs)
domain_feats.append(domain_feat)
features = torch.cat((shared_feat, domain_feat), dim=1)
c_outputs = self.C(features)
#return c_outputs, targets
#DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)
l_c = functional.nll_loss(c_outputs, targets)
l_c.backward(retain_graph=True)
# training accuracy
_, pred = torch.max(c_outputs, 1)
total[domain] += targets.size(0)
correct[domain] += (pred == targets).sum().item()
# update F with D gradients on all domains
for domain in opt.unlabeled_domains:
d_inputs, _ = utils.endless_get_next_batch(
unlabeled_loaders, unlabeled_iters, domain)
d_inputs = d_inputs.to(opt.device)
shared_feat = self.F_s(d_inputs)
d_outputs = self.D(shared_feat)
if opt.loss.lower() == 'gr':
d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))
l_d = functional.nll_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= -opt.lambd
elif opt.loss.lower() == 'l2':
d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))
l_d = functional.mse_loss(d_outputs, d_targets)
if opt.lambd > 0:
l_d *= opt.lambd
l_d.backward()
optimizer.step()
# print(loss_d)
# print('l_d loss: {}'.format(l_d.item()))
# print('l_c loss: {}'.format(l_c.item()))
loss_d_res.append(loss_d['test'])
l_d_res.append(l_d.item())
l_c_res.append(l_c.item())
if (epoch + 1) % kwargs["display_step"] == 0:
print(
"Epoch:", "%04d, done" % (epoch + 1) #"cost=", "{:.9f}"#.format(l_d.data[0])
)
return loss_d_res, l_d_res, l_c_res
def transform(self, d, *args, **kwargs):
F_d = self.F_d[opt.domains[0]]
self.F_s.eval()
F_d.eval()
self.C.eval()
_,_,_,it = self.prepare_data(d)
it = it[opt.unlabeled_domains[0]]
correct = 0
total = 0
confusion = ConfusionMeter(opt.num_labels)
preds = []
for inputs,targets in it:
inputs = inputs.to(opt.device)
targets = targets.to(opt.device)
d_features = F_d(inputs)
features = torch.cat((self.F_s(inputs), d_features), dim=1)
outputs = self.C(features)
_, pred = torch.max(outputs, 1)
#preds.extend(pred.data)
confusion.add(pred.data, targets.data)
total += targets.size(0)
correct += (pred == targets).sum().item()
acc = correct / total
#('{}: Accuracy on {} samples: {}%'.format(name, total, 100.0*acc))
return acc, correct
#return preds
def get_name(self):
if self._name is None:
self._name = "MAN({},{},{})".format(self.k,self.m,1)
return self._name
|
normal
|
{
"blob_id": "8f01934472805b5ad6dca328483a7ac79ae7748a",
"index": 6474,
"step-1": "<mask token>\n\n\nclass MultinomialAdversarialNetwork(TopicModel):\n <mask token>\n\n def prepare_data(self, d):\n \"\"\"\n Assume d is a dictionary of dataset where d[domain] = another dataset class\n Assume labeled domain = train set, unlabeled = test\n \"\"\"\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n train = data_utils.TensorDataset(features, target)\n train_loaders[domain] = DataLoader(train, opt.batch_size,\n shuffle=True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n uset = data_utils.TensorDataset(features, target)\n unlabeled_loaders[domain] = DataLoader(uset, opt.batch_size,\n shuffle=True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters\n\n def fit(self, d, *args, **kwargs):\n train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = (self\n .prepare_data(d))\n self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.\n F_hidden_sizes, opt.shared_hidden_size, opt.dropout)\n self.F_d = {}\n for domain in opt.domains:\n self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1],\n opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)\n self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size +\n opt.domain_hidden_size, opt.shared_hidden_size + opt.\n domain_hidden_size, 2, opt.dropout, opt.C_bn)\n self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt\n .shared_hidden_size, len(opt.all_domains), opt.loss, opt.\n dropout, opt.D_bn)\n self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.\n device), self.D.to(opt.device)\n for f_d in self.F_d.values():\n f_d = f_d.to(opt.device)\n optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.\n parameters() if self.F_s else [], self.C.parameters()] + [f.\n parameters() for f in self.F_d.values()])), lr=0.0001)\n optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)\n loss_d_res = []\n l_d_res = []\n l_c_res = []\n for epoch in range(opt.max_epoch):\n self.F_s.train()\n self.C.train()\n self.D.train()\n for f in self.F_d.values():\n f.train()\n correct, total = defaultdict(int), defaultdict(int)\n d_correct, d_total = 0, 0\n num_iter = len(train_loaders[opt.domains[0]])\n for i in range(num_iter):\n utils.freeze_net(self.F_s)\n map(utils.freeze_net, self.F_d.values())\n utils.freeze_net(self.C)\n utils.unfreeze_net(self.D)\n n_critic = opt.n_critic\n for _ in range(n_critic):\n self.D.zero_grad()\n loss_d = {}\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n _, pred = torch.max(d_outputs, 1)\n d_total += len(d_inputs)\n if opt.loss.lower() == 'l2':\n _, tgt_indices = torch.max(d_targets, 1)\n d_correct += (pred == tgt_indices).sum().item()\n l_d = functional.mse_loss(d_outputs, d_targets)\n l_d.backward()\n else:\n d_correct += (pred == d_targets).sum().item()\n l_d = functional.nll_loss(d_outputs, d_targets)\n l_d.backward()\n loss_d[domain] = l_d.item()\n optimizerD.step()\n utils.unfreeze_net(self.F_s)\n map(utils.unfreeze_net, self.F_d.values())\n utils.unfreeze_net(self.C)\n utils.freeze_net(self.D)\n self.F_s.zero_grad()\n for f_d in self.F_d.values():\n f_d.zero_grad()\n self.C.zero_grad()\n shared_feats, domain_feats = [], []\n for domain in opt.domains:\n inputs, targets = utils.endless_get_next_batch(\n train_loaders, train_iters, domain)\n targets = targets.to(opt.device)\n inputs = inputs.to(opt.device)\n shared_feat = self.F_s(inputs)\n shared_feats.append(shared_feat)\n domain_feat = self.F_d[domain](inputs)\n domain_feats.append(domain_feat)\n features = torch.cat((shared_feat, domain_feat), dim=1)\n c_outputs = self.C(features)\n l_c = functional.nll_loss(c_outputs, targets)\n l_c.backward(retain_graph=True)\n _, pred = torch.max(c_outputs, 1)\n total[domain] += targets.size(0)\n correct[domain] += (pred == targets).sum().item()\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n if opt.loss.lower() == 'gr':\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n l_d = functional.nll_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= -opt.lambd\n elif opt.loss.lower() == 'l2':\n d_targets = utils.get_random_domain_label(opt.loss,\n len(d_inputs))\n l_d = functional.mse_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= opt.lambd\n l_d.backward()\n optimizer.step()\n loss_d_res.append(loss_d['test'])\n l_d_res.append(l_d.item())\n l_c_res.append(l_c.item())\n if (epoch + 1) % kwargs['display_step'] == 0:\n print('Epoch:', '%04d, done' % (epoch + 1))\n return loss_d_res, l_d_res, l_c_res\n <mask token>\n\n def get_name(self):\n if self._name is None:\n self._name = 'MAN({},{},{})'.format(self.k, self.m, 1)\n return self._name\n",
"step-2": "<mask token>\n\n\nclass MultinomialAdversarialNetwork(TopicModel):\n\n def __init__(self, k, m, model_params=None, log_params=None):\n super().__init__(k, m, model_params, log_params)\n\n def prepare_data(self, d):\n \"\"\"\n Assume d is a dictionary of dataset where d[domain] = another dataset class\n Assume labeled domain = train set, unlabeled = test\n \"\"\"\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n train = data_utils.TensorDataset(features, target)\n train_loaders[domain] = DataLoader(train, opt.batch_size,\n shuffle=True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n uset = data_utils.TensorDataset(features, target)\n unlabeled_loaders[domain] = DataLoader(uset, opt.batch_size,\n shuffle=True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters\n\n def fit(self, d, *args, **kwargs):\n train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = (self\n .prepare_data(d))\n self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.\n F_hidden_sizes, opt.shared_hidden_size, opt.dropout)\n self.F_d = {}\n for domain in opt.domains:\n self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1],\n opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)\n self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size +\n opt.domain_hidden_size, opt.shared_hidden_size + opt.\n domain_hidden_size, 2, opt.dropout, opt.C_bn)\n self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt\n .shared_hidden_size, len(opt.all_domains), opt.loss, opt.\n dropout, opt.D_bn)\n self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.\n device), self.D.to(opt.device)\n for f_d in self.F_d.values():\n f_d = f_d.to(opt.device)\n optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.\n parameters() if self.F_s else [], self.C.parameters()] + [f.\n parameters() for f in self.F_d.values()])), lr=0.0001)\n optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)\n loss_d_res = []\n l_d_res = []\n l_c_res = []\n for epoch in range(opt.max_epoch):\n self.F_s.train()\n self.C.train()\n self.D.train()\n for f in self.F_d.values():\n f.train()\n correct, total = defaultdict(int), defaultdict(int)\n d_correct, d_total = 0, 0\n num_iter = len(train_loaders[opt.domains[0]])\n for i in range(num_iter):\n utils.freeze_net(self.F_s)\n map(utils.freeze_net, self.F_d.values())\n utils.freeze_net(self.C)\n utils.unfreeze_net(self.D)\n n_critic = opt.n_critic\n for _ in range(n_critic):\n self.D.zero_grad()\n loss_d = {}\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n _, pred = torch.max(d_outputs, 1)\n d_total += len(d_inputs)\n if opt.loss.lower() == 'l2':\n _, tgt_indices = torch.max(d_targets, 1)\n d_correct += (pred == tgt_indices).sum().item()\n l_d = functional.mse_loss(d_outputs, d_targets)\n l_d.backward()\n else:\n d_correct += (pred == d_targets).sum().item()\n l_d = functional.nll_loss(d_outputs, d_targets)\n l_d.backward()\n loss_d[domain] = l_d.item()\n optimizerD.step()\n utils.unfreeze_net(self.F_s)\n map(utils.unfreeze_net, self.F_d.values())\n utils.unfreeze_net(self.C)\n utils.freeze_net(self.D)\n self.F_s.zero_grad()\n for f_d in self.F_d.values():\n f_d.zero_grad()\n self.C.zero_grad()\n shared_feats, domain_feats = [], []\n for domain in opt.domains:\n inputs, targets = utils.endless_get_next_batch(\n train_loaders, train_iters, domain)\n targets = targets.to(opt.device)\n inputs = inputs.to(opt.device)\n shared_feat = self.F_s(inputs)\n shared_feats.append(shared_feat)\n domain_feat = self.F_d[domain](inputs)\n domain_feats.append(domain_feat)\n features = torch.cat((shared_feat, domain_feat), dim=1)\n c_outputs = self.C(features)\n l_c = functional.nll_loss(c_outputs, targets)\n l_c.backward(retain_graph=True)\n _, pred = torch.max(c_outputs, 1)\n total[domain] += targets.size(0)\n correct[domain] += (pred == targets).sum().item()\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n if opt.loss.lower() == 'gr':\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n l_d = functional.nll_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= -opt.lambd\n elif opt.loss.lower() == 'l2':\n d_targets = utils.get_random_domain_label(opt.loss,\n len(d_inputs))\n l_d = functional.mse_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= opt.lambd\n l_d.backward()\n optimizer.step()\n loss_d_res.append(loss_d['test'])\n l_d_res.append(l_d.item())\n l_c_res.append(l_c.item())\n if (epoch + 1) % kwargs['display_step'] == 0:\n print('Epoch:', '%04d, done' % (epoch + 1))\n return loss_d_res, l_d_res, l_c_res\n <mask token>\n\n def get_name(self):\n if self._name is None:\n self._name = 'MAN({},{},{})'.format(self.k, self.m, 1)\n return self._name\n",
"step-3": "<mask token>\n\n\nclass MultinomialAdversarialNetwork(TopicModel):\n\n def __init__(self, k, m, model_params=None, log_params=None):\n super().__init__(k, m, model_params, log_params)\n\n def prepare_data(self, d):\n \"\"\"\n Assume d is a dictionary of dataset where d[domain] = another dataset class\n Assume labeled domain = train set, unlabeled = test\n \"\"\"\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n train = data_utils.TensorDataset(features, target)\n train_loaders[domain] = DataLoader(train, opt.batch_size,\n shuffle=True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n uset = data_utils.TensorDataset(features, target)\n unlabeled_loaders[domain] = DataLoader(uset, opt.batch_size,\n shuffle=True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters\n\n def fit(self, d, *args, **kwargs):\n train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = (self\n .prepare_data(d))\n self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.\n F_hidden_sizes, opt.shared_hidden_size, opt.dropout)\n self.F_d = {}\n for domain in opt.domains:\n self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1],\n opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)\n self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size +\n opt.domain_hidden_size, opt.shared_hidden_size + opt.\n domain_hidden_size, 2, opt.dropout, opt.C_bn)\n self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt\n .shared_hidden_size, len(opt.all_domains), opt.loss, opt.\n dropout, opt.D_bn)\n self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.\n device), self.D.to(opt.device)\n for f_d in self.F_d.values():\n f_d = f_d.to(opt.device)\n optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.\n parameters() if self.F_s else [], self.C.parameters()] + [f.\n parameters() for f in self.F_d.values()])), lr=0.0001)\n optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)\n loss_d_res = []\n l_d_res = []\n l_c_res = []\n for epoch in range(opt.max_epoch):\n self.F_s.train()\n self.C.train()\n self.D.train()\n for f in self.F_d.values():\n f.train()\n correct, total = defaultdict(int), defaultdict(int)\n d_correct, d_total = 0, 0\n num_iter = len(train_loaders[opt.domains[0]])\n for i in range(num_iter):\n utils.freeze_net(self.F_s)\n map(utils.freeze_net, self.F_d.values())\n utils.freeze_net(self.C)\n utils.unfreeze_net(self.D)\n n_critic = opt.n_critic\n for _ in range(n_critic):\n self.D.zero_grad()\n loss_d = {}\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n _, pred = torch.max(d_outputs, 1)\n d_total += len(d_inputs)\n if opt.loss.lower() == 'l2':\n _, tgt_indices = torch.max(d_targets, 1)\n d_correct += (pred == tgt_indices).sum().item()\n l_d = functional.mse_loss(d_outputs, d_targets)\n l_d.backward()\n else:\n d_correct += (pred == d_targets).sum().item()\n l_d = functional.nll_loss(d_outputs, d_targets)\n l_d.backward()\n loss_d[domain] = l_d.item()\n optimizerD.step()\n utils.unfreeze_net(self.F_s)\n map(utils.unfreeze_net, self.F_d.values())\n utils.unfreeze_net(self.C)\n utils.freeze_net(self.D)\n self.F_s.zero_grad()\n for f_d in self.F_d.values():\n f_d.zero_grad()\n self.C.zero_grad()\n shared_feats, domain_feats = [], []\n for domain in opt.domains:\n inputs, targets = utils.endless_get_next_batch(\n train_loaders, train_iters, domain)\n targets = targets.to(opt.device)\n inputs = inputs.to(opt.device)\n shared_feat = self.F_s(inputs)\n shared_feats.append(shared_feat)\n domain_feat = self.F_d[domain](inputs)\n domain_feats.append(domain_feat)\n features = torch.cat((shared_feat, domain_feat), dim=1)\n c_outputs = self.C(features)\n l_c = functional.nll_loss(c_outputs, targets)\n l_c.backward(retain_graph=True)\n _, pred = torch.max(c_outputs, 1)\n total[domain] += targets.size(0)\n correct[domain] += (pred == targets).sum().item()\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n if opt.loss.lower() == 'gr':\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n l_d = functional.nll_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= -opt.lambd\n elif opt.loss.lower() == 'l2':\n d_targets = utils.get_random_domain_label(opt.loss,\n len(d_inputs))\n l_d = functional.mse_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= opt.lambd\n l_d.backward()\n optimizer.step()\n loss_d_res.append(loss_d['test'])\n l_d_res.append(l_d.item())\n l_c_res.append(l_c.item())\n if (epoch + 1) % kwargs['display_step'] == 0:\n print('Epoch:', '%04d, done' % (epoch + 1))\n return loss_d_res, l_d_res, l_c_res\n\n def transform(self, d, *args, **kwargs):\n F_d = self.F_d[opt.domains[0]]\n self.F_s.eval()\n F_d.eval()\n self.C.eval()\n _, _, _, it = self.prepare_data(d)\n it = it[opt.unlabeled_domains[0]]\n correct = 0\n total = 0\n confusion = ConfusionMeter(opt.num_labels)\n preds = []\n for inputs, targets in it:\n inputs = inputs.to(opt.device)\n targets = targets.to(opt.device)\n d_features = F_d(inputs)\n features = torch.cat((self.F_s(inputs), d_features), dim=1)\n outputs = self.C(features)\n _, pred = torch.max(outputs, 1)\n confusion.add(pred.data, targets.data)\n total += targets.size(0)\n correct += (pred == targets).sum().item()\n acc = correct / total\n return acc, correct\n\n def get_name(self):\n if self._name is None:\n self._name = 'MAN({},{},{})'.format(self.k, self.m, 1)\n return self._name\n",
"step-4": "import numpy as np\nfrom ..utils import Dataset\nimport math\nimport random\nfrom .interface import TopicModel\nfrom .man_model.models import *\nfrom .man_model import utils\nfrom .man_model.options import opt\nimport torch.utils.data as data_utils\nfrom tqdm import tqdm\nfrom collections import defaultdict\nimport itertools\nfrom torchnet.meter import ConfusionMeter\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as functional\nimport torch.optim as optim\nfrom torch.utils.data import ConcatDataset, DataLoader\n<mask token>\n\n\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))\n return e_x / np.sum(e_x, axis=1).reshape(-1, 1)\n\n\nclass MultinomialAdversarialNetwork(TopicModel):\n\n def __init__(self, k, m, model_params=None, log_params=None):\n super().__init__(k, m, model_params, log_params)\n\n def prepare_data(self, d):\n \"\"\"\n Assume d is a dictionary of dataset where d[domain] = another dataset class\n Assume labeled domain = train set, unlabeled = test\n \"\"\"\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n train = data_utils.TensorDataset(features, target)\n train_loaders[domain] = DataLoader(train, opt.batch_size,\n shuffle=True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().\n astype('float32')), torch.from_numpy(d[domain].y)\n uset = data_utils.TensorDataset(features, target)\n unlabeled_loaders[domain] = DataLoader(uset, opt.batch_size,\n shuffle=True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters\n\n def fit(self, d, *args, **kwargs):\n train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = (self\n .prepare_data(d))\n self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.\n F_hidden_sizes, opt.shared_hidden_size, opt.dropout)\n self.F_d = {}\n for domain in opt.domains:\n self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1],\n opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)\n self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size +\n opt.domain_hidden_size, opt.shared_hidden_size + opt.\n domain_hidden_size, 2, opt.dropout, opt.C_bn)\n self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt\n .shared_hidden_size, len(opt.all_domains), opt.loss, opt.\n dropout, opt.D_bn)\n self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.\n device), self.D.to(opt.device)\n for f_d in self.F_d.values():\n f_d = f_d.to(opt.device)\n optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.\n parameters() if self.F_s else [], self.C.parameters()] + [f.\n parameters() for f in self.F_d.values()])), lr=0.0001)\n optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)\n loss_d_res = []\n l_d_res = []\n l_c_res = []\n for epoch in range(opt.max_epoch):\n self.F_s.train()\n self.C.train()\n self.D.train()\n for f in self.F_d.values():\n f.train()\n correct, total = defaultdict(int), defaultdict(int)\n d_correct, d_total = 0, 0\n num_iter = len(train_loaders[opt.domains[0]])\n for i in range(num_iter):\n utils.freeze_net(self.F_s)\n map(utils.freeze_net, self.F_d.values())\n utils.freeze_net(self.C)\n utils.unfreeze_net(self.D)\n n_critic = opt.n_critic\n for _ in range(n_critic):\n self.D.zero_grad()\n loss_d = {}\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n _, pred = torch.max(d_outputs, 1)\n d_total += len(d_inputs)\n if opt.loss.lower() == 'l2':\n _, tgt_indices = torch.max(d_targets, 1)\n d_correct += (pred == tgt_indices).sum().item()\n l_d = functional.mse_loss(d_outputs, d_targets)\n l_d.backward()\n else:\n d_correct += (pred == d_targets).sum().item()\n l_d = functional.nll_loss(d_outputs, d_targets)\n l_d.backward()\n loss_d[domain] = l_d.item()\n optimizerD.step()\n utils.unfreeze_net(self.F_s)\n map(utils.unfreeze_net, self.F_d.values())\n utils.unfreeze_net(self.C)\n utils.freeze_net(self.D)\n self.F_s.zero_grad()\n for f_d in self.F_d.values():\n f_d.zero_grad()\n self.C.zero_grad()\n shared_feats, domain_feats = [], []\n for domain in opt.domains:\n inputs, targets = utils.endless_get_next_batch(\n train_loaders, train_iters, domain)\n targets = targets.to(opt.device)\n inputs = inputs.to(opt.device)\n shared_feat = self.F_s(inputs)\n shared_feats.append(shared_feat)\n domain_feat = self.F_d[domain](inputs)\n domain_feats.append(domain_feat)\n features = torch.cat((shared_feat, domain_feat), dim=1)\n c_outputs = self.C(features)\n l_c = functional.nll_loss(c_outputs, targets)\n l_c.backward(retain_graph=True)\n _, pred = torch.max(c_outputs, 1)\n total[domain] += targets.size(0)\n correct[domain] += (pred == targets).sum().item()\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n if opt.loss.lower() == 'gr':\n d_targets = utils.get_domain_label(opt.loss, domain,\n len(d_inputs))\n l_d = functional.nll_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= -opt.lambd\n elif opt.loss.lower() == 'l2':\n d_targets = utils.get_random_domain_label(opt.loss,\n len(d_inputs))\n l_d = functional.mse_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= opt.lambd\n l_d.backward()\n optimizer.step()\n loss_d_res.append(loss_d['test'])\n l_d_res.append(l_d.item())\n l_c_res.append(l_c.item())\n if (epoch + 1) % kwargs['display_step'] == 0:\n print('Epoch:', '%04d, done' % (epoch + 1))\n return loss_d_res, l_d_res, l_c_res\n\n def transform(self, d, *args, **kwargs):\n F_d = self.F_d[opt.domains[0]]\n self.F_s.eval()\n F_d.eval()\n self.C.eval()\n _, _, _, it = self.prepare_data(d)\n it = it[opt.unlabeled_domains[0]]\n correct = 0\n total = 0\n confusion = ConfusionMeter(opt.num_labels)\n preds = []\n for inputs, targets in it:\n inputs = inputs.to(opt.device)\n targets = targets.to(opt.device)\n d_features = F_d(inputs)\n features = torch.cat((self.F_s(inputs), d_features), dim=1)\n outputs = self.C(features)\n _, pred = torch.max(outputs, 1)\n confusion.add(pred.data, targets.data)\n total += targets.size(0)\n correct += (pred == targets).sum().item()\n acc = correct / total\n return acc, correct\n\n def get_name(self):\n if self._name is None:\n self._name = 'MAN({},{},{})'.format(self.k, self.m, 1)\n return self._name\n",
"step-5": "#This version assumes domains = train/test set\nimport numpy as np\nfrom ..utils import Dataset\nimport math\nimport random\nfrom .interface import TopicModel\nfrom .man_model.models import *\nfrom .man_model import utils\nfrom .man_model.options import opt\nimport torch.utils.data as data_utils\nfrom tqdm import tqdm\nfrom collections import defaultdict\nimport itertools\nfrom torchnet.meter import ConfusionMeter\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as functional\nimport torch.optim as optim\nfrom torch.utils.data import ConcatDataset, DataLoader\n\n\"\"\"\nIMPORTANT: for some reason, Model (self.F_s,etc) will not work if inputs are not float32\n=> need to convert. Dont know if same thing for target tho?\nAlso apparently, domain labels retrieved from get_domain_labels cannot be -1?\nOutput size for C HAS TO BE 2 even if it's a binary classification\n\"\"\"\ndef softmax(x):\n \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n e_x = np.exp(x - np.max(x, axis=1).reshape(-1, 1))\n return e_x / np.sum(e_x, axis=1).reshape(-1, 1)\n\nclass MultinomialAdversarialNetwork(TopicModel):\n def __init__(self, k, m, model_params=None, log_params=None):\n super().__init__(k,m,model_params,log_params)\n \n def prepare_data(self,d):\n \"\"\"\n Assume d is a dictionary of dataset where d[domain] = another dataset class\n Assume labeled domain = train set, unlabeled = test\n \"\"\"\n train_loaders, train_iters = {}, {}\n unlabeled_loaders, unlabeled_iters = {}, {}\n for domain in opt.domains:\n #CONVERT TO FLOAT32\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape((-1,1))\n train = data_utils.TensorDataset(features,target)\n train_loaders[domain] = DataLoader(train, opt.batch_size, shuffle = True)\n train_iters[domain] = iter(train_loaders[domain])\n for domain in opt.unlabeled_domains:\n features, target = torch.from_numpy(d[domain].X.todense().astype('float32')), torch.from_numpy(d[domain].y)#.reshape(-1,1))\n uset = data_utils.TensorDataset(features,target)\n unlabeled_loaders[domain] = DataLoader(uset,opt.batch_size, shuffle = True)\n unlabeled_iters[domain] = iter(unlabeled_loaders[domain])\n \n return train_loaders, train_iters, unlabeled_loaders, unlabeled_iters\n \n \n def fit(self, d, *args, **kwargs):\n #minibatches = create_minibatch(X, y, z, batch_size)\n #TODO: make this able to fit consecutively\n train_loaders, train_iters, unlabeled_loaders, unlabeled_iters = self.prepare_data(d)\n #Training\n self.F_s = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes,opt.shared_hidden_size, opt.dropout)\n self.F_d = {}\n for domain in opt.domains:\n self.F_d[domain] = MlpFeatureExtractor(d['train'].X.shape[1], opt.F_hidden_sizes, opt.domain_hidden_size, opt.dropout)\n self.C = SentimentClassifier(opt.C_layers, opt.shared_hidden_size + opt.domain_hidden_size, opt.shared_hidden_size + opt.domain_hidden_size, 2,opt.dropout, opt.C_bn)\n self.D = DomainClassifier(opt.D_layers, opt.shared_hidden_size, opt.shared_hidden_size,len(opt.all_domains), opt.loss, opt.dropout, opt.D_bn)\n# print(\"try\")\n# print(opt.device)\n self.F_s, self.C, self.D = self.F_s.to(opt.device), self.C.to(opt.device), self.D.to(opt.device)\n for f_d in self.F_d.values():\n f_d = f_d.to(opt.device)\n# print(\"endtry\")\n# # optimizers\n optimizer = optim.Adam(itertools.chain(*map(list, [self.F_s.parameters() if self.F_s else [], self.C.parameters()] + [f.parameters() for f in self.F_d.values()])), lr=0.0001)\n optimizerD = optim.Adam(self.D.parameters(), lr=0.0001)\n loss_d_res = []\n l_d_res = []\n l_c_res = []\n for epoch in range(opt.max_epoch):\n self.F_s.train()\n self.C.train()\n self.D.train()\n for f in self.F_d.values():\n f.train()\n\n # training accuracy\n correct, total = defaultdict(int), defaultdict(int)\n # D accuracy\n d_correct, d_total = 0, 0\n # conceptually view 1 epoch as 1 epoch of the first domain\n num_iter = len(train_loaders[opt.domains[0]])\n for i in range(num_iter):\n # D iterations\n utils.freeze_net(self.F_s)\n map(utils.freeze_net, self.F_d.values())\n utils.freeze_net(self.C)\n utils.unfreeze_net(self.D)\n # optional WGAN n_critic trick\n n_critic = opt.n_critic\n\n for _ in range(n_critic):\n self.D.zero_grad()\n loss_d = {}\n # train on both labeled and unlabeled domains\n for domain in opt.unlabeled_domains:\n # targets not used\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n # D accuracy\n _, pred = torch.max(d_outputs, 1)\n d_total += len(d_inputs)\n if opt.loss.lower() == 'l2':\n _, tgt_indices = torch.max(d_targets, 1)\n d_correct += (pred==tgt_indices).sum().item()\n l_d = functional.mse_loss(d_outputs, d_targets)\n l_d.backward()\n else:\n d_correct += (pred==d_targets).sum().item()\n l_d = functional.nll_loss(d_outputs, d_targets)\n l_d.backward()\n loss_d[domain] = l_d.item()\n optimizerD.step()\n # F&C iteration\n utils.unfreeze_net(self.F_s)\n map(utils.unfreeze_net, self.F_d.values())\n utils.unfreeze_net(self.C)\n utils.freeze_net(self.D)\n #if opt.fix_emb:\n # utils.freeze_net(self.F_s.word_emb)\n # map(utils.freeze_net, self.F_d.values())\n self.F_s.zero_grad()\n for f_d in self.F_d.values():\n f_d.zero_grad()\n self.C.zero_grad()\n shared_feats, domain_feats = [], []\n for domain in opt.domains:\n inputs, targets = utils.endless_get_next_batch(\n train_loaders, train_iters, domain)\n #target = torch.int64 rn\n targets = targets.to(opt.device)\n inputs = inputs.to(opt.device)\n shared_feat = self.F_s(inputs)\n shared_feats.append(shared_feat)\n domain_feat = self.F_d[domain](inputs)\n domain_feats.append(domain_feat)\n features = torch.cat((shared_feat, domain_feat), dim=1)\n c_outputs = self.C(features)\n #return c_outputs, targets\n #DEVICE SIDE TRIGGERED ERROR OCCUR HERE (l_c=...)\n l_c = functional.nll_loss(c_outputs, targets)\n l_c.backward(retain_graph=True)\n # training accuracy\n _, pred = torch.max(c_outputs, 1)\n total[domain] += targets.size(0)\n correct[domain] += (pred == targets).sum().item()\n # update F with D gradients on all domains\n for domain in opt.unlabeled_domains:\n d_inputs, _ = utils.endless_get_next_batch(\n unlabeled_loaders, unlabeled_iters, domain)\n d_inputs = d_inputs.to(opt.device)\n shared_feat = self.F_s(d_inputs)\n d_outputs = self.D(shared_feat)\n if opt.loss.lower() == 'gr':\n d_targets = utils.get_domain_label(opt.loss, domain, len(d_inputs))\n l_d = functional.nll_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= -opt.lambd\n elif opt.loss.lower() == 'l2':\n d_targets = utils.get_random_domain_label(opt.loss, len(d_inputs))\n l_d = functional.mse_loss(d_outputs, d_targets)\n if opt.lambd > 0:\n l_d *= opt.lambd\n l_d.backward()\n \n\n optimizer.step()\n \n\n# print(loss_d)\n# print('l_d loss: {}'.format(l_d.item()))\n# print('l_c loss: {}'.format(l_c.item()))\n loss_d_res.append(loss_d['test'])\n l_d_res.append(l_d.item())\n l_c_res.append(l_c.item())\n if (epoch + 1) % kwargs[\"display_step\"] == 0:\n print(\n \"Epoch:\", \"%04d, done\" % (epoch + 1) #\"cost=\", \"{:.9f}\"#.format(l_d.data[0])\n )\n return loss_d_res, l_d_res, l_c_res\n \n def transform(self, d, *args, **kwargs):\n F_d = self.F_d[opt.domains[0]]\n self.F_s.eval()\n F_d.eval()\n self.C.eval()\n _,_,_,it = self.prepare_data(d)\n it = it[opt.unlabeled_domains[0]]\n correct = 0\n total = 0\n confusion = ConfusionMeter(opt.num_labels)\n preds = []\n for inputs,targets in it:\n inputs = inputs.to(opt.device)\n targets = targets.to(opt.device)\n d_features = F_d(inputs)\n features = torch.cat((self.F_s(inputs), d_features), dim=1)\n outputs = self.C(features)\n _, pred = torch.max(outputs, 1)\n #preds.extend(pred.data)\n confusion.add(pred.data, targets.data)\n total += targets.size(0)\n correct += (pred == targets).sum().item()\n acc = correct / total\n #('{}: Accuracy on {} samples: {}%'.format(name, total, 100.0*acc))\n return acc, correct\n #return preds\n \n def get_name(self):\n if self._name is None:\n self._name = \"MAN({},{},{})\".format(self.k,self.m,1)\n return self._name",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
workdir = './model/adamW-BCE/model_seresnext50_32x4d_i768_runmila_2fold_50ep'
seed = 300
n_fold = 2
epoch = 50
resume_from = None
batch_size = 32
num_workers = 32
imgsize = (768, 768) #(height, width)
loss = dict(
name='BCEWithLogitsLoss',
params=dict(),
)
optim = dict(
name='AdamW',
params=dict(
lr=0.0003,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=0.01,
),
)
model = dict(
name='se_resnext50_32x4d'
)
normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225],}
totensor = dict(name='ToTensor', params=dict(normalize=normalize))
crop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))
crop_test = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))
rotate_test = dict(name='Rotate', params=dict(limit=25, border_mode=0, p=0.7))
hflip = dict(name='HorizontalFlip', params=dict(p=0.5))
'''
Additional augmentarions
------------------------
vflip = dict(name='VerticalFlip', params=dict(p=0.5,))
random_brightness_contrast = dict(name='RandomBrightnessContrast', params=dict(brightness_limit=0.2, contrast_limit=0.2, p=0.5))
#gaussian_blur = dict(name='GaussianBlur', params=dict(blur_limit=7, always_apply=False, p=0.5))
#iaa_emboss = dict(name='IAAEmboss', params=dict(alpha=(0.2, 0.5), strength=(0.2, 0.7), always_apply=False, p=0.5))
#iaa_sharpen = dict(name='IAASharpen', params=dict(alpha=(0.2, 0.5), lightness=(0.5, 1.0), always_apply=False, p=0.5))
hue_saturation_value = dict(name='HueSaturationValue', params=dict(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=0.4))
cut_out = dict(name='Cutout', params=dict(num_holes=8, max_h_size=546//8, max_w_size=546//8, fill_value=0, p=0.3))
blur = dict(name='Blur', params=dict(blur_limit=4, p=.25))
shift_scale_rotate = dict(name='ShiftScaleRotate', params=dict(shift_limit=0.2, scale_limit=0.2, rotate_limit=20, p=1))
'''
rotate = dict(name='Rotate', params=dict(limit=30, border_mode=0, p=0.7))
dicomnoise = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.06, p=0.9))
dicomnoise_test = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.05, p=0.7))
elastic_transform = dict(name='ElasticTransform', params=dict(alpha=1, sigma=50, p=0.5))
grid_distortion = dict(name='GridDistortion', params=dict(), p=0.5)
window_policy = 1
data = dict(
train=dict(
dataset_type='CustomDataset',
annotations='./cache/train-runmila_2folds_seed123.pkl',
imgdir='./input/runmila_i768',
imgsize=imgsize,
n_grad_acc=2,
loader=dict(
shuffle=True,
batch_size=batch_size,
drop_last=True,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop, hflip, rotate, dicomnoise, totensor],
dataset_policy=1,
window_policy=window_policy,
),
valid = dict(
dataset_type='CustomDataset',
annotations='./cache/train-runmila_2folds_seed123.pkl',
imgdir='./input/runmila_i768',
imgsize=imgsize,
loader=dict(
shuffle=False,
batch_size=batch_size,
drop_last=False,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],
dataset_policy=1,
window_policy=window_policy,
),
test = dict(
dataset_type='CustomDataset',
annotations='./cache/test.pkl',
imgdir='./input/test_runmila_i768',
imgsize=imgsize,
loader=dict(
shuffle=False,
batch_size=batch_size,
drop_last=False,
num_workers=num_workers,
pin_memory=False,
),
transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],
dataset_policy=1,
window_policy=window_policy,
),
)
|
normal
|
{
"blob_id": "8030bdb6c9f0b7114916d7abc245ff680d1fc917",
"index": 6790,
"step-1": "<mask token>\n",
"step-2": "workdir = './model/adamW-BCE/model_seresnext50_32x4d_i768_runmila_2fold_50ep'\nseed = 300\nn_fold = 2\nepoch = 50\nresume_from = None\nbatch_size = 32\nnum_workers = 32\nimgsize = 768, 768\nloss = dict(name='BCEWithLogitsLoss', params=dict())\noptim = dict(name='AdamW', params=dict(lr=0.0003, betas=(0.9, 0.999), eps=\n 1e-08, weight_decay=0.01))\nmodel = dict(name='se_resnext50_32x4d')\nnormalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}\ntotensor = dict(name='ToTensor', params=dict(normalize=normalize))\ncrop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=\n imgsize[1], scale=(0.7, 1.0), p=1.0))\ncrop_test = dict(name='RandomResizedCrop', params=dict(height=imgsize[0],\n width=imgsize[1], scale=(0.7, 1.0), p=1.0))\nrotate_test = dict(name='Rotate', params=dict(limit=25, border_mode=0, p=0.7))\nhflip = dict(name='HorizontalFlip', params=dict(p=0.5))\n<mask token>\nrotate = dict(name='Rotate', params=dict(limit=30, border_mode=0, p=0.7))\ndicomnoise = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.06, p=0.9)\n )\ndicomnoise_test = dict(name='RandomDicomNoise', params=dict(limit_ratio=\n 0.05, p=0.7))\nelastic_transform = dict(name='ElasticTransform', params=dict(alpha=1,\n sigma=50, p=0.5))\ngrid_distortion = dict(name='GridDistortion', params=dict(), p=0.5)\nwindow_policy = 1\ndata = dict(train=dict(dataset_type='CustomDataset', annotations=\n './cache/train-runmila_2folds_seed123.pkl', imgdir=\n './input/runmila_i768', imgsize=imgsize, n_grad_acc=2, loader=dict(\n shuffle=True, batch_size=batch_size, drop_last=True, num_workers=\n num_workers, pin_memory=False), transforms=[crop, hflip, rotate,\n dicomnoise, totensor], dataset_policy=1, window_policy=window_policy),\n valid=dict(dataset_type='CustomDataset', annotations=\n './cache/train-runmila_2folds_seed123.pkl', imgdir=\n './input/runmila_i768', imgsize=imgsize, loader=dict(shuffle=False,\n batch_size=batch_size, drop_last=False, num_workers=num_workers,\n pin_memory=False), transforms=[crop_test, hflip, rotate_test,\n dicomnoise_test, totensor], dataset_policy=1, window_policy=\n window_policy), test=dict(dataset_type='CustomDataset', annotations=\n './cache/test.pkl', imgdir='./input/test_runmila_i768', imgsize=imgsize,\n loader=dict(shuffle=False, batch_size=batch_size, drop_last=False,\n num_workers=num_workers, pin_memory=False), transforms=[crop_test,\n hflip, rotate_test, dicomnoise_test, totensor], dataset_policy=1,\n window_policy=window_policy))\n",
"step-3": "workdir = './model/adamW-BCE/model_seresnext50_32x4d_i768_runmila_2fold_50ep'\nseed = 300\n\nn_fold = 2\nepoch = 50\nresume_from = None\n\nbatch_size = 32\nnum_workers = 32\nimgsize = (768, 768) #(height, width)\n\nloss = dict(\n name='BCEWithLogitsLoss',\n params=dict(),\n)\n\noptim = dict(\n name='AdamW',\n params=dict(\n lr=0.0003,\n betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=0.01,\n ),\n)\n\nmodel = dict(\n name='se_resnext50_32x4d'\n)\n\n\nnormalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225],}\ntotensor = dict(name='ToTensor', params=dict(normalize=normalize))\ncrop = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))\ncrop_test = dict(name='RandomResizedCrop', params=dict(height=imgsize[0], width=imgsize[1], scale=(0.7,1.0), p=1.0))\nrotate_test = dict(name='Rotate', params=dict(limit=25, border_mode=0, p=0.7))\nhflip = dict(name='HorizontalFlip', params=dict(p=0.5))\n\n'''\nAdditional augmentarions\n------------------------\n\nvflip = dict(name='VerticalFlip', params=dict(p=0.5,))\nrandom_brightness_contrast = dict(name='RandomBrightnessContrast', params=dict(brightness_limit=0.2, contrast_limit=0.2, p=0.5))\n#gaussian_blur = dict(name='GaussianBlur', params=dict(blur_limit=7, always_apply=False, p=0.5))\n#iaa_emboss = dict(name='IAAEmboss', params=dict(alpha=(0.2, 0.5), strength=(0.2, 0.7), always_apply=False, p=0.5))\n#iaa_sharpen = dict(name='IAASharpen', params=dict(alpha=(0.2, 0.5), lightness=(0.5, 1.0), always_apply=False, p=0.5))\nhue_saturation_value = dict(name='HueSaturationValue', params=dict(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=0.4))\ncut_out = dict(name='Cutout', params=dict(num_holes=8, max_h_size=546//8, max_w_size=546//8, fill_value=0, p=0.3))\nblur = dict(name='Blur', params=dict(blur_limit=4, p=.25))\nshift_scale_rotate = dict(name='ShiftScaleRotate', params=dict(shift_limit=0.2, scale_limit=0.2, rotate_limit=20, p=1))\n'''\nrotate = dict(name='Rotate', params=dict(limit=30, border_mode=0, p=0.7))\ndicomnoise = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.06, p=0.9))\ndicomnoise_test = dict(name='RandomDicomNoise', params=dict(limit_ratio=0.05, p=0.7))\nelastic_transform = dict(name='ElasticTransform', params=dict(alpha=1, sigma=50, p=0.5))\ngrid_distortion = dict(name='GridDistortion', params=dict(), p=0.5)\n\n\nwindow_policy = 1\n\ndata = dict(\n train=dict(\n dataset_type='CustomDataset',\n annotations='./cache/train-runmila_2folds_seed123.pkl',\n imgdir='./input/runmila_i768',\n imgsize=imgsize,\n n_grad_acc=2,\n loader=dict(\n shuffle=True,\n batch_size=batch_size,\n drop_last=True,\n num_workers=num_workers,\n pin_memory=False,\n ),\n transforms=[crop, hflip, rotate, dicomnoise, totensor],\n dataset_policy=1,\n window_policy=window_policy,\n ),\n valid = dict(\n dataset_type='CustomDataset',\n annotations='./cache/train-runmila_2folds_seed123.pkl',\n imgdir='./input/runmila_i768',\n imgsize=imgsize,\n loader=dict(\n shuffle=False,\n batch_size=batch_size,\n drop_last=False,\n num_workers=num_workers,\n pin_memory=False,\n ),\n transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],\n dataset_policy=1,\n window_policy=window_policy,\n ),\n test = dict(\n dataset_type='CustomDataset',\n annotations='./cache/test.pkl',\n imgdir='./input/test_runmila_i768',\n imgsize=imgsize,\n loader=dict(\n shuffle=False,\n batch_size=batch_size,\n drop_last=False,\n num_workers=num_workers,\n pin_memory=False,\n ),\n transforms=[crop_test, hflip, rotate_test, dicomnoise_test, totensor],\n dataset_policy=1,\n window_policy=window_policy,\n ),\n)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from utils import *
name = 'topological'
def topological(above):
"Topologically sort a DAG by removing a layer of sources until empty."
result = []
while above:
sources = set(above) - set(flatten(above.values()))
result.extend(sources)
for node in sources:
del above[node]
return result
above = defaultdict(list)
for edge in Array(Input(name))[1:]:
above[edge[0]].append(edge[1])
above[edge[1]]
print(rosalind_pretty(topological(above)))
|
normal
|
{
"blob_id": "a8ea91797942616779ae0acc884db1e521c7ad28",
"index": 3927,
"step-1": "<mask token>\n\n\ndef topological(above):\n \"\"\"Topologically sort a DAG by removing a layer of sources until empty.\"\"\"\n result = []\n while above:\n sources = set(above) - set(flatten(above.values()))\n result.extend(sources)\n for node in sources:\n del above[node]\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef topological(above):\n \"\"\"Topologically sort a DAG by removing a layer of sources until empty.\"\"\"\n result = []\n while above:\n sources = set(above) - set(flatten(above.values()))\n result.extend(sources)\n for node in sources:\n del above[node]\n return result\n\n\n<mask token>\nfor edge in Array(Input(name))[1:]:\n above[edge[0]].append(edge[1])\n above[edge[1]]\nprint(rosalind_pretty(topological(above)))\n",
"step-3": "<mask token>\nname = 'topological'\n\n\ndef topological(above):\n \"\"\"Topologically sort a DAG by removing a layer of sources until empty.\"\"\"\n result = []\n while above:\n sources = set(above) - set(flatten(above.values()))\n result.extend(sources)\n for node in sources:\n del above[node]\n return result\n\n\nabove = defaultdict(list)\nfor edge in Array(Input(name))[1:]:\n above[edge[0]].append(edge[1])\n above[edge[1]]\nprint(rosalind_pretty(topological(above)))\n",
"step-4": "from utils import *\nname = 'topological'\n\n\ndef topological(above):\n \"\"\"Topologically sort a DAG by removing a layer of sources until empty.\"\"\"\n result = []\n while above:\n sources = set(above) - set(flatten(above.values()))\n result.extend(sources)\n for node in sources:\n del above[node]\n return result\n\n\nabove = defaultdict(list)\nfor edge in Array(Input(name))[1:]:\n above[edge[0]].append(edge[1])\n above[edge[1]]\nprint(rosalind_pretty(topological(above)))\n",
"step-5": "from utils import *\n\nname = 'topological'\n\ndef topological(above):\n \"Topologically sort a DAG by removing a layer of sources until empty.\"\n result = []\n while above:\n sources = set(above) - set(flatten(above.values()))\n result.extend(sources)\n for node in sources:\n del above[node]\n return result\n\nabove = defaultdict(list)\nfor edge in Array(Input(name))[1:]:\n above[edge[0]].append(edge[1])\n above[edge[1]]\nprint(rosalind_pretty(topological(above)))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def PlotUndirectedGraph(A, color):
NodesNames = list(string.ascii_uppercase)
NNodes = A.shape[0]
G = nx.DiGraph()
for i in range(NNodes):
G.add_node(NodesNames[i])
for i in range(NNodes):
for j in range(i + 1, NNodes):
if A[i, j] != 0:
G.add_edge(NodesNames[i], NodesNames[j], weight=A[i, j])
pos = nx.spring_layout(G)
edge_labels = dict([((u, v), d['weight']) for u, v, d in G.edges(data=
True)])
if len(color) == 0:
nx.draw_networkx_nodes(G, pos, node_size=400, node_color='skyblue')
else:
nx.draw_networkx_nodes(G, pos, node_size=400, node_color=color,
cmap=plt.cm.Pastel1)
nx.draw_networkx_labels(G, pos, edge_labels=edge_labels)
nx.draw_networkx_edges(G, pos, arrows=False)
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
plt.show()
<|reserved_special_token_1|>
import numpy as np
import string
import networkx as nx
import matplotlib.pyplot as plt
def PlotUndirectedGraph(A, color):
NodesNames = list(string.ascii_uppercase)
NNodes = A.shape[0]
G = nx.DiGraph()
for i in range(NNodes):
G.add_node(NodesNames[i])
for i in range(NNodes):
for j in range(i + 1, NNodes):
if A[i, j] != 0:
G.add_edge(NodesNames[i], NodesNames[j], weight=A[i, j])
pos = nx.spring_layout(G)
edge_labels = dict([((u, v), d['weight']) for u, v, d in G.edges(data=
True)])
if len(color) == 0:
nx.draw_networkx_nodes(G, pos, node_size=400, node_color='skyblue')
else:
nx.draw_networkx_nodes(G, pos, node_size=400, node_color=color,
cmap=plt.cm.Pastel1)
nx.draw_networkx_labels(G, pos, edge_labels=edge_labels)
nx.draw_networkx_edges(G, pos, arrows=False)
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
plt.show()
<|reserved_special_token_1|>
import numpy as np
import string
import networkx as nx
import matplotlib.pyplot as plt
def PlotUndirectedGraph(A,color):
NodesNames = list(string.ascii_uppercase);
NNodes = A.shape[0]
G = nx.DiGraph()
for i in range(NNodes):
G.add_node(NodesNames[i])
for i in range(NNodes):
for j in range(i+1,NNodes):
if A[i,j] != 0:
G.add_edge(NodesNames[i],NodesNames[j],weight=A[i,j])
pos = nx.spring_layout(G)
edge_labels=dict([((u,v,),d['weight'])
for u,v,d in G.edges(data=True)])
if len(color)==0:
#edge_colors = ['black' if not edge in red_edges else 'red' for edge in G.edges()]
nx.draw_networkx_nodes(G, pos, node_size=400, node_color = 'skyblue')
else:
nx.draw_networkx_nodes(G, pos, node_size=400, node_color = color, cmap=plt.cm.Pastel1)
#nx.draw(G,pos, node_color = values, node_size=1500,edge_color=edge_colors,edge_cmap=plt.cm.Reds)
nx.draw_networkx_labels(G, pos, edge_labels=edge_labels)
nx.draw_networkx_edges(G, pos, arrows = False)
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
plt.show()
|
flexible
|
{
"blob_id": "61388b2edb35055cccbdc98ed52caedcd0b02983",
"index": 5624,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef PlotUndirectedGraph(A, color):\n NodesNames = list(string.ascii_uppercase)\n NNodes = A.shape[0]\n G = nx.DiGraph()\n for i in range(NNodes):\n G.add_node(NodesNames[i])\n for i in range(NNodes):\n for j in range(i + 1, NNodes):\n if A[i, j] != 0:\n G.add_edge(NodesNames[i], NodesNames[j], weight=A[i, j])\n pos = nx.spring_layout(G)\n edge_labels = dict([((u, v), d['weight']) for u, v, d in G.edges(data=\n True)])\n if len(color) == 0:\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color='skyblue')\n else:\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color=color,\n cmap=plt.cm.Pastel1)\n nx.draw_networkx_labels(G, pos, edge_labels=edge_labels)\n nx.draw_networkx_edges(G, pos, arrows=False)\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\n plt.show()\n",
"step-3": "import numpy as np\nimport string\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\ndef PlotUndirectedGraph(A, color):\n NodesNames = list(string.ascii_uppercase)\n NNodes = A.shape[0]\n G = nx.DiGraph()\n for i in range(NNodes):\n G.add_node(NodesNames[i])\n for i in range(NNodes):\n for j in range(i + 1, NNodes):\n if A[i, j] != 0:\n G.add_edge(NodesNames[i], NodesNames[j], weight=A[i, j])\n pos = nx.spring_layout(G)\n edge_labels = dict([((u, v), d['weight']) for u, v, d in G.edges(data=\n True)])\n if len(color) == 0:\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color='skyblue')\n else:\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color=color,\n cmap=plt.cm.Pastel1)\n nx.draw_networkx_labels(G, pos, edge_labels=edge_labels)\n nx.draw_networkx_edges(G, pos, arrows=False)\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\n plt.show()\n",
"step-4": "import numpy as np \nimport string \nimport networkx as nx\nimport matplotlib.pyplot as plt\n\n\n\ndef PlotUndirectedGraph(A,color):\n NodesNames = list(string.ascii_uppercase);\n NNodes = A.shape[0]\n G = nx.DiGraph()\n for i in range(NNodes):\n G.add_node(NodesNames[i])\n for i in range(NNodes):\n for j in range(i+1,NNodes):\n if A[i,j] != 0:\n G.add_edge(NodesNames[i],NodesNames[j],weight=A[i,j])\n pos = nx.spring_layout(G)\n edge_labels=dict([((u,v,),d['weight'])\n for u,v,d in G.edges(data=True)])\n if len(color)==0:\n #edge_colors = ['black' if not edge in red_edges else 'red' for edge in G.edges()]\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color = 'skyblue')\n else:\n nx.draw_networkx_nodes(G, pos, node_size=400, node_color = color, cmap=plt.cm.Pastel1)\n #nx.draw(G,pos, node_color = values, node_size=1500,edge_color=edge_colors,edge_cmap=plt.cm.Reds)\n\n nx.draw_networkx_labels(G, pos, edge_labels=edge_labels)\n nx.draw_networkx_edges(G, pos, arrows = False)\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\n plt.show()\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#
# Copyright 2021 Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Concurrent executor provides concurrent executing function either in
a thread pool or a process pool
"""
import splunktalib.concurrent.process_pool as pp
import splunktalib.concurrent.thread_pool as tp
class ConcurrentExecutor:
def __init__(self, config):
"""
:param config: dict like object, contains thread_min_size (int),
thread_max_size (int), daemonize_thread (bool),
process_size (int)
"""
self._io_executor = tp.ThreadPool(
config.get("thread_min_size", 0),
config.get("thread_max_size", 0),
config.get("task_queue_size", 1024),
config.get("daemonize_thread", True),
)
self._compute_executor = None
if config.get("process_size", 0):
self._compute_executor = pp.ProcessPool(config.get("process_size", 0))
def start(self):
self._io_executor.start()
def tear_down(self):
self._io_executor.tear_down()
if self._compute_executor is not None:
self._compute_executor.tear_down()
def run_io_func_sync(self, func, args=(), kwargs=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:return whatever the func returns
"""
return self._io_executor.apply(func, args, kwargs)
def run_io_func_async(self, func, args=(), kwargs=None, callback=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:calllback: when func is done and without exception, call the callback
:return whatever the func returns
"""
return self._io_executor.apply_async(func, args, kwargs, callback)
def enqueue_io_funcs(self, funcs, block=True):
"""
run jobs in a fire and forget way, no result will be handled
over to clients
:param funcs: tuple/list-like or generator like object, func shall be
callable
"""
return self._io_executor.enqueue_funcs(funcs, block)
def run_compute_func_sync(self, func, args=(), kwargs={}):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:return whatever the func returns
"""
assert self._compute_executor is not None
return self._compute_executor.apply(func, args, kwargs)
def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):
"""
:param func: callable
:param args: free params
:param kwargs: named params
:calllback: when func is done and without exception, call the callback
:return whatever the func returns
"""
assert self._compute_executor is not None
return self._compute_executor.apply_async(func, args, kwargs, callback)
|
normal
|
{
"blob_id": "24b1afb18e1cfdc8d5a62f5ee0147b2d73bc10d8",
"index": 7492,
"step-1": "<mask token>\n\n\nclass ConcurrentExecutor:\n <mask token>\n <mask token>\n <mask token>\n\n def run_io_func_sync(self, func, args=(), kwargs=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n return self._io_executor.apply(func, args, kwargs)\n <mask token>\n <mask token>\n <mask token>\n\n def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply_async(func, args, kwargs, callback)\n",
"step-2": "<mask token>\n\n\nclass ConcurrentExecutor:\n <mask token>\n\n def start(self):\n self._io_executor.start()\n <mask token>\n\n def run_io_func_sync(self, func, args=(), kwargs=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n return self._io_executor.apply(func, args, kwargs)\n <mask token>\n\n def enqueue_io_funcs(self, funcs, block=True):\n \"\"\"\n run jobs in a fire and forget way, no result will be handled\n over to clients\n :param funcs: tuple/list-like or generator like object, func shall be\n callable\n \"\"\"\n return self._io_executor.enqueue_funcs(funcs, block)\n\n def run_compute_func_sync(self, func, args=(), kwargs={}):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply(func, args, kwargs)\n\n def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply_async(func, args, kwargs, callback)\n",
"step-3": "<mask token>\n\n\nclass ConcurrentExecutor:\n\n def __init__(self, config):\n \"\"\"\n :param config: dict like object, contains thread_min_size (int),\n thread_max_size (int), daemonize_thread (bool),\n process_size (int)\n \"\"\"\n self._io_executor = tp.ThreadPool(config.get('thread_min_size', 0),\n config.get('thread_max_size', 0), config.get('task_queue_size',\n 1024), config.get('daemonize_thread', True))\n self._compute_executor = None\n if config.get('process_size', 0):\n self._compute_executor = pp.ProcessPool(config.get(\n 'process_size', 0))\n\n def start(self):\n self._io_executor.start()\n\n def tear_down(self):\n self._io_executor.tear_down()\n if self._compute_executor is not None:\n self._compute_executor.tear_down()\n\n def run_io_func_sync(self, func, args=(), kwargs=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n return self._io_executor.apply(func, args, kwargs)\n <mask token>\n\n def enqueue_io_funcs(self, funcs, block=True):\n \"\"\"\n run jobs in a fire and forget way, no result will be handled\n over to clients\n :param funcs: tuple/list-like or generator like object, func shall be\n callable\n \"\"\"\n return self._io_executor.enqueue_funcs(funcs, block)\n\n def run_compute_func_sync(self, func, args=(), kwargs={}):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply(func, args, kwargs)\n\n def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply_async(func, args, kwargs, callback)\n",
"step-4": "<mask token>\nimport splunktalib.concurrent.process_pool as pp\nimport splunktalib.concurrent.thread_pool as tp\n\n\nclass ConcurrentExecutor:\n\n def __init__(self, config):\n \"\"\"\n :param config: dict like object, contains thread_min_size (int),\n thread_max_size (int), daemonize_thread (bool),\n process_size (int)\n \"\"\"\n self._io_executor = tp.ThreadPool(config.get('thread_min_size', 0),\n config.get('thread_max_size', 0), config.get('task_queue_size',\n 1024), config.get('daemonize_thread', True))\n self._compute_executor = None\n if config.get('process_size', 0):\n self._compute_executor = pp.ProcessPool(config.get(\n 'process_size', 0))\n\n def start(self):\n self._io_executor.start()\n\n def tear_down(self):\n self._io_executor.tear_down()\n if self._compute_executor is not None:\n self._compute_executor.tear_down()\n\n def run_io_func_sync(self, func, args=(), kwargs=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n return self._io_executor.apply(func, args, kwargs)\n\n def run_io_func_async(self, func, args=(), kwargs=None, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n return self._io_executor.apply_async(func, args, kwargs, callback)\n\n def enqueue_io_funcs(self, funcs, block=True):\n \"\"\"\n run jobs in a fire and forget way, no result will be handled\n over to clients\n :param funcs: tuple/list-like or generator like object, func shall be\n callable\n \"\"\"\n return self._io_executor.enqueue_funcs(funcs, block)\n\n def run_compute_func_sync(self, func, args=(), kwargs={}):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply(func, args, kwargs)\n\n def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n assert self._compute_executor is not None\n return self._compute_executor.apply_async(func, args, kwargs, callback)\n",
"step-5": "#\n# Copyright 2021 Splunk Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nConcurrent executor provides concurrent executing function either in\na thread pool or a process pool\n\"\"\"\n\nimport splunktalib.concurrent.process_pool as pp\nimport splunktalib.concurrent.thread_pool as tp\n\n\nclass ConcurrentExecutor:\n def __init__(self, config):\n \"\"\"\n :param config: dict like object, contains thread_min_size (int),\n thread_max_size (int), daemonize_thread (bool),\n process_size (int)\n \"\"\"\n\n self._io_executor = tp.ThreadPool(\n config.get(\"thread_min_size\", 0),\n config.get(\"thread_max_size\", 0),\n config.get(\"task_queue_size\", 1024),\n config.get(\"daemonize_thread\", True),\n )\n self._compute_executor = None\n if config.get(\"process_size\", 0):\n self._compute_executor = pp.ProcessPool(config.get(\"process_size\", 0))\n\n def start(self):\n self._io_executor.start()\n\n def tear_down(self):\n self._io_executor.tear_down()\n if self._compute_executor is not None:\n self._compute_executor.tear_down()\n\n def run_io_func_sync(self, func, args=(), kwargs=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n\n return self._io_executor.apply(func, args, kwargs)\n\n def run_io_func_async(self, func, args=(), kwargs=None, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n\n return self._io_executor.apply_async(func, args, kwargs, callback)\n\n def enqueue_io_funcs(self, funcs, block=True):\n \"\"\"\n run jobs in a fire and forget way, no result will be handled\n over to clients\n :param funcs: tuple/list-like or generator like object, func shall be\n callable\n \"\"\"\n\n return self._io_executor.enqueue_funcs(funcs, block)\n\n def run_compute_func_sync(self, func, args=(), kwargs={}):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :return whatever the func returns\n \"\"\"\n\n assert self._compute_executor is not None\n return self._compute_executor.apply(func, args, kwargs)\n\n def run_compute_func_async(self, func, args=(), kwargs={}, callback=None):\n \"\"\"\n :param func: callable\n :param args: free params\n :param kwargs: named params\n :calllback: when func is done and without exception, call the callback\n :return whatever the func returns\n \"\"\"\n\n assert self._compute_executor is not None\n return self._compute_executor.apply_async(func, args, kwargs, callback)\n",
"step-ids": [
3,
6,
8,
10,
11
]
}
|
[
3,
6,
8,
10,
11
] |
# -*- coding:utf-8 -*-
import sys
from PyQt4 import QtGui,QtCore
import experiment
class Node(QtGui.QGraphicsEllipseItem):
def __init__(self,name):
super(Node, self).__init__()
self.__name = name
def getName(self):
return self.__name
def changeBrush(self, color, style):
b = QtGui.QBrush()
b.setStyle(style)
c = b.color()
c.setRgb(color[0],color[1],color[2])
b.setColor(c)
self.setBrush(b)
class Link(QtGui.QGraphicsLineItem):
def __init__(self,name,link_type):
super(Link, self).__init__()
self.__link_type = link_type
self.__name = name
def getName(self):
return self.__name
def getType(self):
return self.__link_type
def changeType(self,link_type):
self.__link_type = link_type
def changeColor(self,color):
p = QtGui.QPen()
c = p.color()
c.setRgb(color[0],color[1],color[2])
p.setColor(c)
self.setPen(p)
class Text(QtGui.QGraphicsTextItem):
def __init__(self,name,text):
super(Text, self).__init__(text)
self.__name = name
def getName(self):
return self.__name
class GUI(QtGui.QWidget):
def __init__(self):
super(GUI, self).__init__()
self.exp = experiment.Experiments(20,3)
self.matching = self.exp.unidirectional_match()
self.man_rank, self.woman_rank = self.matching.get_avg_rank()
self.man_spouse, self.woman_spouse = self.matching.get_spouse_rank()
self.initUI()
self.showMaximized()
def initUI(self):
self.setWindowTitle(' Stable Matching ')
grid = QtGui.QGridLayout()
step_button = QtGui.QPushButton('STEP',self)
epoch_button = QtGui.QPushButton('EPOCH',self)
end_button = QtGui.QPushButton('END',self)
self.showText = QtGui.QTextEdit(self)
self.showText.setText('START! ')
self.statu_scene = QtGui.QGraphicsScene(self)
self.initScene(self.statu_scene)
self.statu_view = QtGui.QGraphicsView()
self.statu_view.setScene(self.statu_scene)
self.statu_view.setMinimumSize(600,600)
self.statu_view.show()
self.history_scene = QtGui.QGraphicsScene(self)
self.initScene(self.history_scene)
self.history_view = QtGui.QGraphicsView()
self.history_view.setScene(self.history_scene)
self.history_view.setMinimumSize(600,600)
self.history_view.show()
grid.addWidget(step_button,1,1)
grid.addWidget(epoch_button,2,1)
grid.addWidget(end_button,3,1)
grid.addWidget(self.showText,1,2,4,1)
grid.addWidget(self.statu_view,1,3,4,1)
grid.addWidget(self.history_view,1,4,4,1)
self.setLayout(grid)
self.connect(step_button,QtCore.SIGNAL('clicked()'),self.nextStep)
self.connect(epoch_button,QtCore.SIGNAL('clicked()'),self.nextEpoch)
self.connect(end_button,QtCore.SIGNAL('clicked()'),self.exeToEnd)
def initScene(self,scene):
man_num = self.exp.get_man_num()
woman_num = self.exp.get_woman_num()
length = max(man_num,woman_num) * 30
scene.setSceneRect(0,0,600,length)
for i in range(man_num):
node = self.__addNode(scene, 'M_'+str(i),120,i*30,20,20,(0,0,255))
for i in range(woman_num):
node = self.__addNode(scene, 'W_'+str(i),480,i*30,20,20,(255,0,0))
def __addNode(self, scene, name, x, y, w, h, color=(0,0,0)):
sex = name.split('_')[0]
number = name.split('_')[1]
rank_bias = spouse_bias = rank = 0
if sex == 'M':
rank = self.man_rank[int(number)]
rank_bias = -2.0
spouse_bias = -4.0
elif sex == 'W':
rank = self.woman_rank[int(number)]
rank_bias = 2.0
spouse_bias = 4.0
node = Node(name)
node.setRect(x,y,w,h)
node.changeBrush(color,1)
if int(number) < 10:
number = '0' + number
text = QtGui.QGraphicsTextItem (number, node)
text.setPos(x,y)
text.setTextWidth(1.5*w)
font = QtGui.QFont('Times',8)
font.setWeight(99)
text.setFont(font)
rank_text = QtGui.QGraphicsTextItem (str(rank), node)
rank_text.setPos(x + rank_bias*w,y)
rank_text.setTextWidth(2*w)
font = QtGui.QFont('Times',8)
font.setWeight(99)
rank_text.setFont(font)
spouse_text = Text(name+'_S', '-1')
spouse_text.setPos(x + spouse_bias*w,y)
spouse_text.setTextWidth(1.5*w)
font = QtGui.QFont('Times',8)
font.setWeight(99)
spouse_text.setFont(font)
scene.addItem(node)
scene.addItem(spouse_text)
def __addLink(self, scene, name, node1, node2, color = (0,0,0), link_type = ''):
center1 = node1.boundingRect().center()
center2 = node2.boundingRect().center()
name1 = node1.getName().split('_')[1]
name2 = node2.getName().split('_')[1]
link = Link(name1 + '-' + name2, link_type)
link.setLine(center1.x(),center1.y(),center2.x(),center2.y())
link.changeColor(color)
scene.addItem(link)
def __deleteLink(self, scene, name):
link = self.__findItem(name, Link, scene.items())
scene.removeItem(link)
def __changeText(self, scene, name, text):
txt = self.__findItem(name, Text, scene.items())
txt.setPlainText(text)
def __findItem(self, name, _type, items):
for item in items:
if isinstance(item, _type) and name == item.getName():
return item
return False
def __clearLinks(self, scene):
for item in scene.items():
if isinstance(item,Link) and item.getType() != 'marry':
scene.removeItem(item)
def __clearUpLinks(self, scene):
for item in scene.items():
if isinstance(item, Link):
scene.removeItem(item)
def __refreshViewStep(self, info):
record = info.split('\n')
length = len(record)
lineiter = 0
epoch = record[lineiter].strip().split(':')[1]
lineiter += 1
step = record[lineiter].strip().split(':')[1]
lineiter += 1
statu = record[lineiter].strip()
if 'DONE' in statu:
return 0
elif 'is not activity' in statu:
return 1
elif 'is married' in statu:
return 2
couple = statu.replace(' ','').split('target')
man = self.__findItem('M_'+couple[0], Node, self.statu_scene.items())
woman = self.__findItem('W_'+couple[1], Node, self.statu_scene.items())
lineiter += 1
sui_rank = record[lineiter].replace(' ','').split(':')[1]
lineiter += 1
if 'Husband Rank' in record[lineiter]:
husband_rank = record[lineiter].replace(' ','').split(':')[1]
lineiter += 1
if 'Succeed' in record[lineiter]:
self.__addLink(self.statu_scene, couple[0] + '-' + couple[1], man, woman, link_type = 'marry')
self.__addLink(self.history_scene, couple[0] + '-' + couple[1], man, woman, link_type = 'marry')
self.__changeText(self.statu_scene, 'M_' + couple[0] + '_S', str(self.matching.get_spouse_rank(int(couple[0]) + 1)))
self.__changeText(self.statu_scene, 'W_' + couple[1] + '_S', str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))
self.__changeText(self.history_scene, 'M_' + couple[0] + '_S', str(self.matching.get_spouse_rank(int(couple[0]) + 1)))
self.__changeText(self.history_scene, 'W_' + couple[1] + '_S', str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))
lineiter += 1
if lineiter <= length:
if 'threw away' in record[lineiter]:
throwCouple = record[lineiter].replace(' ','').split('threwaway')
node1 = self.__findItem('M_' + throwCouple[1], Node, self.history_scene.items())
node2 = self.__findItem('W_' + throwCouple[0], Node, self.history_scene.items())
self.__addLink(self.history_scene, throwCouple[1] + '-' + throwCouple[0], node1, node2, (0,255,0) , 'break')
self.__deleteLink(self.statu_scene, throwCouple[1] + '-' + throwCouple[0])
self.__changeText(self.statu_scene, 'M_' + throwCouple[1] + '_S', '-1')
self.__changeText(self.history_scene, 'M_' + throwCouple[1] + '_S', '-1')
self.statu_view.update()
self.history_view.update()
elif 'Failed' in record[lineiter]:
self.__addLink(self.history_scene, couple[0] + '-' + couple[1], man, woman, (0,0,255) , 'failed')
self.statu_view.update()
self.history_view.update()
def nextStep(self):
info = self.matching.step()
self.showText.setText(info)
self.__clearLinks(self.statu_scene)
self.__clearUpLinks(self.history_scene)
self.__refreshViewStep(info)
def nextEpoch(self):
info = self.matching.epoch()
self.__clearLinks(self.statu_scene)
self.__clearUpLinks(self.history_scene)
sep = info.split('\n')[0]
records = info.split(sep+'\n')
del records[0]
for record in records:
self.__refreshViewStep(sep+'\n'+record)
self.showText.setText(info)
def exeToEnd(self):
info = self.matching.exe_to_end()
self.__clearLinks(self.statu_scene)
self.__clearUpLinks(self.history_scene)
records = info.split('EPOCH')
del records[0]
for record in records:
self.__refreshViewStep('EPOCH'+record)
self.showText.setText(info)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Message',
'Are you sure to quit?', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
if __name__ == '__main__':
app = QtGui.QApplication(sys.argv)
gui = GUI()
gui.show()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "edbb721784dff81e3e1ab5e0458a4080508807fe",
"index": 4335,
"step-1": "<mask token>\n\n\nclass Text(QtGui.QGraphicsTextItem):\n <mask token>\n\n def getName(self):\n return self.__name\n\n\nclass GUI(QtGui.QWidget):\n\n def __init__(self):\n super(GUI, self).__init__()\n self.exp = experiment.Experiments(20, 3)\n self.matching = self.exp.unidirectional_match()\n self.man_rank, self.woman_rank = self.matching.get_avg_rank()\n self.man_spouse, self.woman_spouse = self.matching.get_spouse_rank()\n self.initUI()\n self.showMaximized()\n\n def initUI(self):\n self.setWindowTitle(' Stable Matching ')\n grid = QtGui.QGridLayout()\n step_button = QtGui.QPushButton('STEP', self)\n epoch_button = QtGui.QPushButton('EPOCH', self)\n end_button = QtGui.QPushButton('END', self)\n self.showText = QtGui.QTextEdit(self)\n self.showText.setText('START! ')\n self.statu_scene = QtGui.QGraphicsScene(self)\n self.initScene(self.statu_scene)\n self.statu_view = QtGui.QGraphicsView()\n self.statu_view.setScene(self.statu_scene)\n self.statu_view.setMinimumSize(600, 600)\n self.statu_view.show()\n self.history_scene = QtGui.QGraphicsScene(self)\n self.initScene(self.history_scene)\n self.history_view = QtGui.QGraphicsView()\n self.history_view.setScene(self.history_scene)\n self.history_view.setMinimumSize(600, 600)\n self.history_view.show()\n grid.addWidget(step_button, 1, 1)\n grid.addWidget(epoch_button, 2, 1)\n grid.addWidget(end_button, 3, 1)\n grid.addWidget(self.showText, 1, 2, 4, 1)\n grid.addWidget(self.statu_view, 1, 3, 4, 1)\n grid.addWidget(self.history_view, 1, 4, 4, 1)\n self.setLayout(grid)\n self.connect(step_button, QtCore.SIGNAL('clicked()'), self.nextStep)\n self.connect(epoch_button, QtCore.SIGNAL('clicked()'), self.nextEpoch)\n self.connect(end_button, QtCore.SIGNAL('clicked()'), self.exeToEnd)\n\n def initScene(self, scene):\n man_num = self.exp.get_man_num()\n woman_num = self.exp.get_woman_num()\n length = max(man_num, woman_num) * 30\n scene.setSceneRect(0, 0, 600, length)\n for i in range(man_num):\n node = self.__addNode(scene, 'M_' + str(i), 120, i * 30, 20, 20,\n (0, 0, 255))\n for i in range(woman_num):\n node = self.__addNode(scene, 'W_' + str(i), 480, i * 30, 20, 20,\n (255, 0, 0))\n\n def __addNode(self, scene, name, x, y, w, h, color=(0, 0, 0)):\n sex = name.split('_')[0]\n number = name.split('_')[1]\n rank_bias = spouse_bias = rank = 0\n if sex == 'M':\n rank = self.man_rank[int(number)]\n rank_bias = -2.0\n spouse_bias = -4.0\n elif sex == 'W':\n rank = self.woman_rank[int(number)]\n rank_bias = 2.0\n spouse_bias = 4.0\n node = Node(name)\n node.setRect(x, y, w, h)\n node.changeBrush(color, 1)\n if int(number) < 10:\n number = '0' + number\n text = QtGui.QGraphicsTextItem(number, node)\n text.setPos(x, y)\n text.setTextWidth(1.5 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n text.setFont(font)\n rank_text = QtGui.QGraphicsTextItem(str(rank), node)\n rank_text.setPos(x + rank_bias * w, y)\n rank_text.setTextWidth(2 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n rank_text.setFont(font)\n spouse_text = Text(name + '_S', '-1')\n spouse_text.setPos(x + spouse_bias * w, y)\n spouse_text.setTextWidth(1.5 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n spouse_text.setFont(font)\n scene.addItem(node)\n scene.addItem(spouse_text)\n\n def __addLink(self, scene, name, node1, node2, color=(0, 0, 0),\n link_type=''):\n center1 = node1.boundingRect().center()\n center2 = node2.boundingRect().center()\n name1 = node1.getName().split('_')[1]\n name2 = node2.getName().split('_')[1]\n link = Link(name1 + '-' + name2, link_type)\n link.setLine(center1.x(), center1.y(), center2.x(), center2.y())\n link.changeColor(color)\n scene.addItem(link)\n\n def __deleteLink(self, scene, name):\n link = self.__findItem(name, Link, scene.items())\n scene.removeItem(link)\n\n def __changeText(self, scene, name, text):\n txt = self.__findItem(name, Text, scene.items())\n txt.setPlainText(text)\n\n def __findItem(self, name, _type, items):\n for item in items:\n if isinstance(item, _type) and name == item.getName():\n return item\n return False\n\n def __clearLinks(self, scene):\n for item in scene.items():\n if isinstance(item, Link) and item.getType() != 'marry':\n scene.removeItem(item)\n\n def __clearUpLinks(self, scene):\n for item in scene.items():\n if isinstance(item, Link):\n scene.removeItem(item)\n\n def __refreshViewStep(self, info):\n record = info.split('\\n')\n length = len(record)\n lineiter = 0\n epoch = record[lineiter].strip().split(':')[1]\n lineiter += 1\n step = record[lineiter].strip().split(':')[1]\n lineiter += 1\n statu = record[lineiter].strip()\n if 'DONE' in statu:\n return 0\n elif 'is not activity' in statu:\n return 1\n elif 'is married' in statu:\n return 2\n couple = statu.replace(' ', '').split('target')\n man = self.__findItem('M_' + couple[0], Node, self.statu_scene.items())\n woman = self.__findItem('W_' + couple[1], Node, self.statu_scene.\n items())\n lineiter += 1\n sui_rank = record[lineiter].replace(' ', '').split(':')[1]\n lineiter += 1\n if 'Husband Rank' in record[lineiter]:\n husband_rank = record[lineiter].replace(' ', '').split(':')[1]\n lineiter += 1\n if 'Succeed' in record[lineiter]:\n self.__addLink(self.statu_scene, couple[0] + '-' + couple[1],\n man, woman, link_type='marry')\n self.__addLink(self.history_scene, couple[0] + '-' + couple[1],\n man, woman, link_type='marry')\n self.__changeText(self.statu_scene, 'M_' + couple[0] + '_S',\n str(self.matching.get_spouse_rank(int(couple[0]) + 1)))\n self.__changeText(self.statu_scene, 'W_' + couple[1] + '_S',\n str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))\n self.__changeText(self.history_scene, 'M_' + couple[0] + '_S',\n str(self.matching.get_spouse_rank(int(couple[0]) + 1)))\n self.__changeText(self.history_scene, 'W_' + couple[1] + '_S',\n str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))\n lineiter += 1\n if lineiter <= length:\n if 'threw away' in record[lineiter]:\n throwCouple = record[lineiter].replace(' ', '').split(\n 'threwaway')\n node1 = self.__findItem('M_' + throwCouple[1], Node,\n self.history_scene.items())\n node2 = self.__findItem('W_' + throwCouple[0], Node,\n self.history_scene.items())\n self.__addLink(self.history_scene, throwCouple[1] + '-' +\n throwCouple[0], node1, node2, (0, 255, 0), 'break')\n self.__deleteLink(self.statu_scene, throwCouple[1] +\n '-' + throwCouple[0])\n self.__changeText(self.statu_scene, 'M_' + throwCouple[\n 1] + '_S', '-1')\n self.__changeText(self.history_scene, 'M_' +\n throwCouple[1] + '_S', '-1')\n self.statu_view.update()\n self.history_view.update()\n elif 'Failed' in record[lineiter]:\n self.__addLink(self.history_scene, couple[0] + '-' + couple[1],\n man, woman, (0, 0, 255), 'failed')\n self.statu_view.update()\n self.history_view.update()\n\n def nextStep(self):\n info = self.matching.step()\n self.showText.setText(info)\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n self.__refreshViewStep(info)\n\n def nextEpoch(self):\n info = self.matching.epoch()\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n sep = info.split('\\n')[0]\n records = info.split(sep + '\\n')\n del records[0]\n for record in records:\n self.__refreshViewStep(sep + '\\n' + record)\n self.showText.setText(info)\n\n def exeToEnd(self):\n info = self.matching.exe_to_end()\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n records = info.split('EPOCH')\n del records[0]\n for record in records:\n self.__refreshViewStep('EPOCH' + record)\n self.showText.setText(info)\n\n def closeEvent(self, event):\n reply = QtGui.QMessageBox.question(self, 'Message',\n 'Are you sure to quit?', QtGui.QMessageBox.Yes, QtGui.\n QMessageBox.No)\n if reply == QtGui.QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Link(QtGui.QGraphicsLineItem):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Text(QtGui.QGraphicsTextItem):\n\n def __init__(self, name, text):\n super(Text, self).__init__(text)\n self.__name = name\n\n def getName(self):\n return self.__name\n\n\nclass GUI(QtGui.QWidget):\n\n def __init__(self):\n super(GUI, self).__init__()\n self.exp = experiment.Experiments(20, 3)\n self.matching = self.exp.unidirectional_match()\n self.man_rank, self.woman_rank = self.matching.get_avg_rank()\n self.man_spouse, self.woman_spouse = self.matching.get_spouse_rank()\n self.initUI()\n self.showMaximized()\n\n def initUI(self):\n self.setWindowTitle(' Stable Matching ')\n grid = QtGui.QGridLayout()\n step_button = QtGui.QPushButton('STEP', self)\n epoch_button = QtGui.QPushButton('EPOCH', self)\n end_button = QtGui.QPushButton('END', self)\n self.showText = QtGui.QTextEdit(self)\n self.showText.setText('START! ')\n self.statu_scene = QtGui.QGraphicsScene(self)\n self.initScene(self.statu_scene)\n self.statu_view = QtGui.QGraphicsView()\n self.statu_view.setScene(self.statu_scene)\n self.statu_view.setMinimumSize(600, 600)\n self.statu_view.show()\n self.history_scene = QtGui.QGraphicsScene(self)\n self.initScene(self.history_scene)\n self.history_view = QtGui.QGraphicsView()\n self.history_view.setScene(self.history_scene)\n self.history_view.setMinimumSize(600, 600)\n self.history_view.show()\n grid.addWidget(step_button, 1, 1)\n grid.addWidget(epoch_button, 2, 1)\n grid.addWidget(end_button, 3, 1)\n grid.addWidget(self.showText, 1, 2, 4, 1)\n grid.addWidget(self.statu_view, 1, 3, 4, 1)\n grid.addWidget(self.history_view, 1, 4, 4, 1)\n self.setLayout(grid)\n self.connect(step_button, QtCore.SIGNAL('clicked()'), self.nextStep)\n self.connect(epoch_button, QtCore.SIGNAL('clicked()'), self.nextEpoch)\n self.connect(end_button, QtCore.SIGNAL('clicked()'), self.exeToEnd)\n\n def initScene(self, scene):\n man_num = self.exp.get_man_num()\n woman_num = self.exp.get_woman_num()\n length = max(man_num, woman_num) * 30\n scene.setSceneRect(0, 0, 600, length)\n for i in range(man_num):\n node = self.__addNode(scene, 'M_' + str(i), 120, i * 30, 20, 20,\n (0, 0, 255))\n for i in range(woman_num):\n node = self.__addNode(scene, 'W_' + str(i), 480, i * 30, 20, 20,\n (255, 0, 0))\n\n def __addNode(self, scene, name, x, y, w, h, color=(0, 0, 0)):\n sex = name.split('_')[0]\n number = name.split('_')[1]\n rank_bias = spouse_bias = rank = 0\n if sex == 'M':\n rank = self.man_rank[int(number)]\n rank_bias = -2.0\n spouse_bias = -4.0\n elif sex == 'W':\n rank = self.woman_rank[int(number)]\n rank_bias = 2.0\n spouse_bias = 4.0\n node = Node(name)\n node.setRect(x, y, w, h)\n node.changeBrush(color, 1)\n if int(number) < 10:\n number = '0' + number\n text = QtGui.QGraphicsTextItem(number, node)\n text.setPos(x, y)\n text.setTextWidth(1.5 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n text.setFont(font)\n rank_text = QtGui.QGraphicsTextItem(str(rank), node)\n rank_text.setPos(x + rank_bias * w, y)\n rank_text.setTextWidth(2 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n rank_text.setFont(font)\n spouse_text = Text(name + '_S', '-1')\n spouse_text.setPos(x + spouse_bias * w, y)\n spouse_text.setTextWidth(1.5 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n spouse_text.setFont(font)\n scene.addItem(node)\n scene.addItem(spouse_text)\n\n def __addLink(self, scene, name, node1, node2, color=(0, 0, 0),\n link_type=''):\n center1 = node1.boundingRect().center()\n center2 = node2.boundingRect().center()\n name1 = node1.getName().split('_')[1]\n name2 = node2.getName().split('_')[1]\n link = Link(name1 + '-' + name2, link_type)\n link.setLine(center1.x(), center1.y(), center2.x(), center2.y())\n link.changeColor(color)\n scene.addItem(link)\n\n def __deleteLink(self, scene, name):\n link = self.__findItem(name, Link, scene.items())\n scene.removeItem(link)\n\n def __changeText(self, scene, name, text):\n txt = self.__findItem(name, Text, scene.items())\n txt.setPlainText(text)\n\n def __findItem(self, name, _type, items):\n for item in items:\n if isinstance(item, _type) and name == item.getName():\n return item\n return False\n\n def __clearLinks(self, scene):\n for item in scene.items():\n if isinstance(item, Link) and item.getType() != 'marry':\n scene.removeItem(item)\n\n def __clearUpLinks(self, scene):\n for item in scene.items():\n if isinstance(item, Link):\n scene.removeItem(item)\n\n def __refreshViewStep(self, info):\n record = info.split('\\n')\n length = len(record)\n lineiter = 0\n epoch = record[lineiter].strip().split(':')[1]\n lineiter += 1\n step = record[lineiter].strip().split(':')[1]\n lineiter += 1\n statu = record[lineiter].strip()\n if 'DONE' in statu:\n return 0\n elif 'is not activity' in statu:\n return 1\n elif 'is married' in statu:\n return 2\n couple = statu.replace(' ', '').split('target')\n man = self.__findItem('M_' + couple[0], Node, self.statu_scene.items())\n woman = self.__findItem('W_' + couple[1], Node, self.statu_scene.\n items())\n lineiter += 1\n sui_rank = record[lineiter].replace(' ', '').split(':')[1]\n lineiter += 1\n if 'Husband Rank' in record[lineiter]:\n husband_rank = record[lineiter].replace(' ', '').split(':')[1]\n lineiter += 1\n if 'Succeed' in record[lineiter]:\n self.__addLink(self.statu_scene, couple[0] + '-' + couple[1],\n man, woman, link_type='marry')\n self.__addLink(self.history_scene, couple[0] + '-' + couple[1],\n man, woman, link_type='marry')\n self.__changeText(self.statu_scene, 'M_' + couple[0] + '_S',\n str(self.matching.get_spouse_rank(int(couple[0]) + 1)))\n self.__changeText(self.statu_scene, 'W_' + couple[1] + '_S',\n str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))\n self.__changeText(self.history_scene, 'M_' + couple[0] + '_S',\n str(self.matching.get_spouse_rank(int(couple[0]) + 1)))\n self.__changeText(self.history_scene, 'W_' + couple[1] + '_S',\n str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))\n lineiter += 1\n if lineiter <= length:\n if 'threw away' in record[lineiter]:\n throwCouple = record[lineiter].replace(' ', '').split(\n 'threwaway')\n node1 = self.__findItem('M_' + throwCouple[1], Node,\n self.history_scene.items())\n node2 = self.__findItem('W_' + throwCouple[0], Node,\n self.history_scene.items())\n self.__addLink(self.history_scene, throwCouple[1] + '-' +\n throwCouple[0], node1, node2, (0, 255, 0), 'break')\n self.__deleteLink(self.statu_scene, throwCouple[1] +\n '-' + throwCouple[0])\n self.__changeText(self.statu_scene, 'M_' + throwCouple[\n 1] + '_S', '-1')\n self.__changeText(self.history_scene, 'M_' +\n throwCouple[1] + '_S', '-1')\n self.statu_view.update()\n self.history_view.update()\n elif 'Failed' in record[lineiter]:\n self.__addLink(self.history_scene, couple[0] + '-' + couple[1],\n man, woman, (0, 0, 255), 'failed')\n self.statu_view.update()\n self.history_view.update()\n\n def nextStep(self):\n info = self.matching.step()\n self.showText.setText(info)\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n self.__refreshViewStep(info)\n\n def nextEpoch(self):\n info = self.matching.epoch()\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n sep = info.split('\\n')[0]\n records = info.split(sep + '\\n')\n del records[0]\n for record in records:\n self.__refreshViewStep(sep + '\\n' + record)\n self.showText.setText(info)\n\n def exeToEnd(self):\n info = self.matching.exe_to_end()\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n records = info.split('EPOCH')\n del records[0]\n for record in records:\n self.__refreshViewStep('EPOCH' + record)\n self.showText.setText(info)\n\n def closeEvent(self, event):\n reply = QtGui.QMessageBox.question(self, 'Message',\n 'Are you sure to quit?', QtGui.QMessageBox.Yes, QtGui.\n QMessageBox.No)\n if reply == QtGui.QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Link(QtGui.QGraphicsLineItem):\n\n def __init__(self, name, link_type):\n super(Link, self).__init__()\n self.__link_type = link_type\n self.__name = name\n <mask token>\n\n def getType(self):\n return self.__link_type\n\n def changeType(self, link_type):\n self.__link_type = link_type\n <mask token>\n\n\nclass Text(QtGui.QGraphicsTextItem):\n\n def __init__(self, name, text):\n super(Text, self).__init__(text)\n self.__name = name\n\n def getName(self):\n return self.__name\n\n\nclass GUI(QtGui.QWidget):\n\n def __init__(self):\n super(GUI, self).__init__()\n self.exp = experiment.Experiments(20, 3)\n self.matching = self.exp.unidirectional_match()\n self.man_rank, self.woman_rank = self.matching.get_avg_rank()\n self.man_spouse, self.woman_spouse = self.matching.get_spouse_rank()\n self.initUI()\n self.showMaximized()\n\n def initUI(self):\n self.setWindowTitle(' Stable Matching ')\n grid = QtGui.QGridLayout()\n step_button = QtGui.QPushButton('STEP', self)\n epoch_button = QtGui.QPushButton('EPOCH', self)\n end_button = QtGui.QPushButton('END', self)\n self.showText = QtGui.QTextEdit(self)\n self.showText.setText('START! ')\n self.statu_scene = QtGui.QGraphicsScene(self)\n self.initScene(self.statu_scene)\n self.statu_view = QtGui.QGraphicsView()\n self.statu_view.setScene(self.statu_scene)\n self.statu_view.setMinimumSize(600, 600)\n self.statu_view.show()\n self.history_scene = QtGui.QGraphicsScene(self)\n self.initScene(self.history_scene)\n self.history_view = QtGui.QGraphicsView()\n self.history_view.setScene(self.history_scene)\n self.history_view.setMinimumSize(600, 600)\n self.history_view.show()\n grid.addWidget(step_button, 1, 1)\n grid.addWidget(epoch_button, 2, 1)\n grid.addWidget(end_button, 3, 1)\n grid.addWidget(self.showText, 1, 2, 4, 1)\n grid.addWidget(self.statu_view, 1, 3, 4, 1)\n grid.addWidget(self.history_view, 1, 4, 4, 1)\n self.setLayout(grid)\n self.connect(step_button, QtCore.SIGNAL('clicked()'), self.nextStep)\n self.connect(epoch_button, QtCore.SIGNAL('clicked()'), self.nextEpoch)\n self.connect(end_button, QtCore.SIGNAL('clicked()'), self.exeToEnd)\n\n def initScene(self, scene):\n man_num = self.exp.get_man_num()\n woman_num = self.exp.get_woman_num()\n length = max(man_num, woman_num) * 30\n scene.setSceneRect(0, 0, 600, length)\n for i in range(man_num):\n node = self.__addNode(scene, 'M_' + str(i), 120, i * 30, 20, 20,\n (0, 0, 255))\n for i in range(woman_num):\n node = self.__addNode(scene, 'W_' + str(i), 480, i * 30, 20, 20,\n (255, 0, 0))\n\n def __addNode(self, scene, name, x, y, w, h, color=(0, 0, 0)):\n sex = name.split('_')[0]\n number = name.split('_')[1]\n rank_bias = spouse_bias = rank = 0\n if sex == 'M':\n rank = self.man_rank[int(number)]\n rank_bias = -2.0\n spouse_bias = -4.0\n elif sex == 'W':\n rank = self.woman_rank[int(number)]\n rank_bias = 2.0\n spouse_bias = 4.0\n node = Node(name)\n node.setRect(x, y, w, h)\n node.changeBrush(color, 1)\n if int(number) < 10:\n number = '0' + number\n text = QtGui.QGraphicsTextItem(number, node)\n text.setPos(x, y)\n text.setTextWidth(1.5 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n text.setFont(font)\n rank_text = QtGui.QGraphicsTextItem(str(rank), node)\n rank_text.setPos(x + rank_bias * w, y)\n rank_text.setTextWidth(2 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n rank_text.setFont(font)\n spouse_text = Text(name + '_S', '-1')\n spouse_text.setPos(x + spouse_bias * w, y)\n spouse_text.setTextWidth(1.5 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n spouse_text.setFont(font)\n scene.addItem(node)\n scene.addItem(spouse_text)\n\n def __addLink(self, scene, name, node1, node2, color=(0, 0, 0),\n link_type=''):\n center1 = node1.boundingRect().center()\n center2 = node2.boundingRect().center()\n name1 = node1.getName().split('_')[1]\n name2 = node2.getName().split('_')[1]\n link = Link(name1 + '-' + name2, link_type)\n link.setLine(center1.x(), center1.y(), center2.x(), center2.y())\n link.changeColor(color)\n scene.addItem(link)\n\n def __deleteLink(self, scene, name):\n link = self.__findItem(name, Link, scene.items())\n scene.removeItem(link)\n\n def __changeText(self, scene, name, text):\n txt = self.__findItem(name, Text, scene.items())\n txt.setPlainText(text)\n\n def __findItem(self, name, _type, items):\n for item in items:\n if isinstance(item, _type) and name == item.getName():\n return item\n return False\n\n def __clearLinks(self, scene):\n for item in scene.items():\n if isinstance(item, Link) and item.getType() != 'marry':\n scene.removeItem(item)\n\n def __clearUpLinks(self, scene):\n for item in scene.items():\n if isinstance(item, Link):\n scene.removeItem(item)\n\n def __refreshViewStep(self, info):\n record = info.split('\\n')\n length = len(record)\n lineiter = 0\n epoch = record[lineiter].strip().split(':')[1]\n lineiter += 1\n step = record[lineiter].strip().split(':')[1]\n lineiter += 1\n statu = record[lineiter].strip()\n if 'DONE' in statu:\n return 0\n elif 'is not activity' in statu:\n return 1\n elif 'is married' in statu:\n return 2\n couple = statu.replace(' ', '').split('target')\n man = self.__findItem('M_' + couple[0], Node, self.statu_scene.items())\n woman = self.__findItem('W_' + couple[1], Node, self.statu_scene.\n items())\n lineiter += 1\n sui_rank = record[lineiter].replace(' ', '').split(':')[1]\n lineiter += 1\n if 'Husband Rank' in record[lineiter]:\n husband_rank = record[lineiter].replace(' ', '').split(':')[1]\n lineiter += 1\n if 'Succeed' in record[lineiter]:\n self.__addLink(self.statu_scene, couple[0] + '-' + couple[1],\n man, woman, link_type='marry')\n self.__addLink(self.history_scene, couple[0] + '-' + couple[1],\n man, woman, link_type='marry')\n self.__changeText(self.statu_scene, 'M_' + couple[0] + '_S',\n str(self.matching.get_spouse_rank(int(couple[0]) + 1)))\n self.__changeText(self.statu_scene, 'W_' + couple[1] + '_S',\n str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))\n self.__changeText(self.history_scene, 'M_' + couple[0] + '_S',\n str(self.matching.get_spouse_rank(int(couple[0]) + 1)))\n self.__changeText(self.history_scene, 'W_' + couple[1] + '_S',\n str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))\n lineiter += 1\n if lineiter <= length:\n if 'threw away' in record[lineiter]:\n throwCouple = record[lineiter].replace(' ', '').split(\n 'threwaway')\n node1 = self.__findItem('M_' + throwCouple[1], Node,\n self.history_scene.items())\n node2 = self.__findItem('W_' + throwCouple[0], Node,\n self.history_scene.items())\n self.__addLink(self.history_scene, throwCouple[1] + '-' +\n throwCouple[0], node1, node2, (0, 255, 0), 'break')\n self.__deleteLink(self.statu_scene, throwCouple[1] +\n '-' + throwCouple[0])\n self.__changeText(self.statu_scene, 'M_' + throwCouple[\n 1] + '_S', '-1')\n self.__changeText(self.history_scene, 'M_' +\n throwCouple[1] + '_S', '-1')\n self.statu_view.update()\n self.history_view.update()\n elif 'Failed' in record[lineiter]:\n self.__addLink(self.history_scene, couple[0] + '-' + couple[1],\n man, woman, (0, 0, 255), 'failed')\n self.statu_view.update()\n self.history_view.update()\n\n def nextStep(self):\n info = self.matching.step()\n self.showText.setText(info)\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n self.__refreshViewStep(info)\n\n def nextEpoch(self):\n info = self.matching.epoch()\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n sep = info.split('\\n')[0]\n records = info.split(sep + '\\n')\n del records[0]\n for record in records:\n self.__refreshViewStep(sep + '\\n' + record)\n self.showText.setText(info)\n\n def exeToEnd(self):\n info = self.matching.exe_to_end()\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n records = info.split('EPOCH')\n del records[0]\n for record in records:\n self.__refreshViewStep('EPOCH' + record)\n self.showText.setText(info)\n\n def closeEvent(self, event):\n reply = QtGui.QMessageBox.question(self, 'Message',\n 'Are you sure to quit?', QtGui.QMessageBox.Yes, QtGui.\n QMessageBox.No)\n if reply == QtGui.QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Link(QtGui.QGraphicsLineItem):\n\n def __init__(self, name, link_type):\n super(Link, self).__init__()\n self.__link_type = link_type\n self.__name = name\n\n def getName(self):\n return self.__name\n\n def getType(self):\n return self.__link_type\n\n def changeType(self, link_type):\n self.__link_type = link_type\n\n def changeColor(self, color):\n p = QtGui.QPen()\n c = p.color()\n c.setRgb(color[0], color[1], color[2])\n p.setColor(c)\n self.setPen(p)\n\n\nclass Text(QtGui.QGraphicsTextItem):\n\n def __init__(self, name, text):\n super(Text, self).__init__(text)\n self.__name = name\n\n def getName(self):\n return self.__name\n\n\nclass GUI(QtGui.QWidget):\n\n def __init__(self):\n super(GUI, self).__init__()\n self.exp = experiment.Experiments(20, 3)\n self.matching = self.exp.unidirectional_match()\n self.man_rank, self.woman_rank = self.matching.get_avg_rank()\n self.man_spouse, self.woman_spouse = self.matching.get_spouse_rank()\n self.initUI()\n self.showMaximized()\n\n def initUI(self):\n self.setWindowTitle(' Stable Matching ')\n grid = QtGui.QGridLayout()\n step_button = QtGui.QPushButton('STEP', self)\n epoch_button = QtGui.QPushButton('EPOCH', self)\n end_button = QtGui.QPushButton('END', self)\n self.showText = QtGui.QTextEdit(self)\n self.showText.setText('START! ')\n self.statu_scene = QtGui.QGraphicsScene(self)\n self.initScene(self.statu_scene)\n self.statu_view = QtGui.QGraphicsView()\n self.statu_view.setScene(self.statu_scene)\n self.statu_view.setMinimumSize(600, 600)\n self.statu_view.show()\n self.history_scene = QtGui.QGraphicsScene(self)\n self.initScene(self.history_scene)\n self.history_view = QtGui.QGraphicsView()\n self.history_view.setScene(self.history_scene)\n self.history_view.setMinimumSize(600, 600)\n self.history_view.show()\n grid.addWidget(step_button, 1, 1)\n grid.addWidget(epoch_button, 2, 1)\n grid.addWidget(end_button, 3, 1)\n grid.addWidget(self.showText, 1, 2, 4, 1)\n grid.addWidget(self.statu_view, 1, 3, 4, 1)\n grid.addWidget(self.history_view, 1, 4, 4, 1)\n self.setLayout(grid)\n self.connect(step_button, QtCore.SIGNAL('clicked()'), self.nextStep)\n self.connect(epoch_button, QtCore.SIGNAL('clicked()'), self.nextEpoch)\n self.connect(end_button, QtCore.SIGNAL('clicked()'), self.exeToEnd)\n\n def initScene(self, scene):\n man_num = self.exp.get_man_num()\n woman_num = self.exp.get_woman_num()\n length = max(man_num, woman_num) * 30\n scene.setSceneRect(0, 0, 600, length)\n for i in range(man_num):\n node = self.__addNode(scene, 'M_' + str(i), 120, i * 30, 20, 20,\n (0, 0, 255))\n for i in range(woman_num):\n node = self.__addNode(scene, 'W_' + str(i), 480, i * 30, 20, 20,\n (255, 0, 0))\n\n def __addNode(self, scene, name, x, y, w, h, color=(0, 0, 0)):\n sex = name.split('_')[0]\n number = name.split('_')[1]\n rank_bias = spouse_bias = rank = 0\n if sex == 'M':\n rank = self.man_rank[int(number)]\n rank_bias = -2.0\n spouse_bias = -4.0\n elif sex == 'W':\n rank = self.woman_rank[int(number)]\n rank_bias = 2.0\n spouse_bias = 4.0\n node = Node(name)\n node.setRect(x, y, w, h)\n node.changeBrush(color, 1)\n if int(number) < 10:\n number = '0' + number\n text = QtGui.QGraphicsTextItem(number, node)\n text.setPos(x, y)\n text.setTextWidth(1.5 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n text.setFont(font)\n rank_text = QtGui.QGraphicsTextItem(str(rank), node)\n rank_text.setPos(x + rank_bias * w, y)\n rank_text.setTextWidth(2 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n rank_text.setFont(font)\n spouse_text = Text(name + '_S', '-1')\n spouse_text.setPos(x + spouse_bias * w, y)\n spouse_text.setTextWidth(1.5 * w)\n font = QtGui.QFont('Times', 8)\n font.setWeight(99)\n spouse_text.setFont(font)\n scene.addItem(node)\n scene.addItem(spouse_text)\n\n def __addLink(self, scene, name, node1, node2, color=(0, 0, 0),\n link_type=''):\n center1 = node1.boundingRect().center()\n center2 = node2.boundingRect().center()\n name1 = node1.getName().split('_')[1]\n name2 = node2.getName().split('_')[1]\n link = Link(name1 + '-' + name2, link_type)\n link.setLine(center1.x(), center1.y(), center2.x(), center2.y())\n link.changeColor(color)\n scene.addItem(link)\n\n def __deleteLink(self, scene, name):\n link = self.__findItem(name, Link, scene.items())\n scene.removeItem(link)\n\n def __changeText(self, scene, name, text):\n txt = self.__findItem(name, Text, scene.items())\n txt.setPlainText(text)\n\n def __findItem(self, name, _type, items):\n for item in items:\n if isinstance(item, _type) and name == item.getName():\n return item\n return False\n\n def __clearLinks(self, scene):\n for item in scene.items():\n if isinstance(item, Link) and item.getType() != 'marry':\n scene.removeItem(item)\n\n def __clearUpLinks(self, scene):\n for item in scene.items():\n if isinstance(item, Link):\n scene.removeItem(item)\n\n def __refreshViewStep(self, info):\n record = info.split('\\n')\n length = len(record)\n lineiter = 0\n epoch = record[lineiter].strip().split(':')[1]\n lineiter += 1\n step = record[lineiter].strip().split(':')[1]\n lineiter += 1\n statu = record[lineiter].strip()\n if 'DONE' in statu:\n return 0\n elif 'is not activity' in statu:\n return 1\n elif 'is married' in statu:\n return 2\n couple = statu.replace(' ', '').split('target')\n man = self.__findItem('M_' + couple[0], Node, self.statu_scene.items())\n woman = self.__findItem('W_' + couple[1], Node, self.statu_scene.\n items())\n lineiter += 1\n sui_rank = record[lineiter].replace(' ', '').split(':')[1]\n lineiter += 1\n if 'Husband Rank' in record[lineiter]:\n husband_rank = record[lineiter].replace(' ', '').split(':')[1]\n lineiter += 1\n if 'Succeed' in record[lineiter]:\n self.__addLink(self.statu_scene, couple[0] + '-' + couple[1],\n man, woman, link_type='marry')\n self.__addLink(self.history_scene, couple[0] + '-' + couple[1],\n man, woman, link_type='marry')\n self.__changeText(self.statu_scene, 'M_' + couple[0] + '_S',\n str(self.matching.get_spouse_rank(int(couple[0]) + 1)))\n self.__changeText(self.statu_scene, 'W_' + couple[1] + '_S',\n str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))\n self.__changeText(self.history_scene, 'M_' + couple[0] + '_S',\n str(self.matching.get_spouse_rank(int(couple[0]) + 1)))\n self.__changeText(self.history_scene, 'W_' + couple[1] + '_S',\n str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))\n lineiter += 1\n if lineiter <= length:\n if 'threw away' in record[lineiter]:\n throwCouple = record[lineiter].replace(' ', '').split(\n 'threwaway')\n node1 = self.__findItem('M_' + throwCouple[1], Node,\n self.history_scene.items())\n node2 = self.__findItem('W_' + throwCouple[0], Node,\n self.history_scene.items())\n self.__addLink(self.history_scene, throwCouple[1] + '-' +\n throwCouple[0], node1, node2, (0, 255, 0), 'break')\n self.__deleteLink(self.statu_scene, throwCouple[1] +\n '-' + throwCouple[0])\n self.__changeText(self.statu_scene, 'M_' + throwCouple[\n 1] + '_S', '-1')\n self.__changeText(self.history_scene, 'M_' +\n throwCouple[1] + '_S', '-1')\n self.statu_view.update()\n self.history_view.update()\n elif 'Failed' in record[lineiter]:\n self.__addLink(self.history_scene, couple[0] + '-' + couple[1],\n man, woman, (0, 0, 255), 'failed')\n self.statu_view.update()\n self.history_view.update()\n\n def nextStep(self):\n info = self.matching.step()\n self.showText.setText(info)\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n self.__refreshViewStep(info)\n\n def nextEpoch(self):\n info = self.matching.epoch()\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n sep = info.split('\\n')[0]\n records = info.split(sep + '\\n')\n del records[0]\n for record in records:\n self.__refreshViewStep(sep + '\\n' + record)\n self.showText.setText(info)\n\n def exeToEnd(self):\n info = self.matching.exe_to_end()\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n records = info.split('EPOCH')\n del records[0]\n for record in records:\n self.__refreshViewStep('EPOCH' + record)\n self.showText.setText(info)\n\n def closeEvent(self, event):\n reply = QtGui.QMessageBox.question(self, 'Message',\n 'Are you sure to quit?', QtGui.QMessageBox.Yes, QtGui.\n QMessageBox.No)\n if reply == QtGui.QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\n<mask token>\n",
"step-5": "# -*- coding:utf-8 -*- \nimport sys\nfrom PyQt4 import QtGui,QtCore\nimport experiment\n\nclass Node(QtGui.QGraphicsEllipseItem):\n def __init__(self,name):\n super(Node, self).__init__()\n self.__name = name\n \n def getName(self):\n return self.__name\n def changeBrush(self, color, style):\n b = QtGui.QBrush()\n b.setStyle(style)\n c = b.color()\n c.setRgb(color[0],color[1],color[2])\n b.setColor(c)\n self.setBrush(b)\n\nclass Link(QtGui.QGraphicsLineItem):\n def __init__(self,name,link_type):\n super(Link, self).__init__()\n self.__link_type = link_type\n self.__name = name\n def getName(self):\n return self.__name\n def getType(self):\n return self.__link_type\n def changeType(self,link_type):\n self.__link_type = link_type\n def changeColor(self,color):\n p = QtGui.QPen()\n c = p.color()\n c.setRgb(color[0],color[1],color[2])\n p.setColor(c)\n self.setPen(p)\n\nclass Text(QtGui.QGraphicsTextItem):\n def __init__(self,name,text):\n super(Text, self).__init__(text)\n self.__name = name\n def getName(self):\n return self.__name\n \nclass GUI(QtGui.QWidget):\n\n def __init__(self):\n super(GUI, self).__init__()\n self.exp = experiment.Experiments(20,3)\n self.matching = self.exp.unidirectional_match()\n self.man_rank, self.woman_rank = self.matching.get_avg_rank()\n self.man_spouse, self.woman_spouse = self.matching.get_spouse_rank()\n self.initUI()\n self.showMaximized()\n \n def initUI(self):\n self.setWindowTitle(' Stable Matching ')\n grid = QtGui.QGridLayout()\n step_button = QtGui.QPushButton('STEP',self)\n epoch_button = QtGui.QPushButton('EPOCH',self)\n end_button = QtGui.QPushButton('END',self)\n self.showText = QtGui.QTextEdit(self)\n self.showText.setText('START! ')\n\n self.statu_scene = QtGui.QGraphicsScene(self)\n self.initScene(self.statu_scene)\n self.statu_view = QtGui.QGraphicsView()\n self.statu_view.setScene(self.statu_scene) \n self.statu_view.setMinimumSize(600,600)\n self.statu_view.show()\n \n self.history_scene = QtGui.QGraphicsScene(self)\n self.initScene(self.history_scene)\n self.history_view = QtGui.QGraphicsView()\n self.history_view.setScene(self.history_scene)\n self.history_view.setMinimumSize(600,600)\n self.history_view.show()\n \n grid.addWidget(step_button,1,1)\n grid.addWidget(epoch_button,2,1)\n grid.addWidget(end_button,3,1)\n grid.addWidget(self.showText,1,2,4,1)\n grid.addWidget(self.statu_view,1,3,4,1)\n grid.addWidget(self.history_view,1,4,4,1)\n self.setLayout(grid)\n \n self.connect(step_button,QtCore.SIGNAL('clicked()'),self.nextStep)\n self.connect(epoch_button,QtCore.SIGNAL('clicked()'),self.nextEpoch)\n self.connect(end_button,QtCore.SIGNAL('clicked()'),self.exeToEnd)\n\n def initScene(self,scene):\n man_num = self.exp.get_man_num()\n woman_num = self.exp.get_woman_num()\n length = max(man_num,woman_num) * 30\n scene.setSceneRect(0,0,600,length)\n for i in range(man_num):\n node = self.__addNode(scene, 'M_'+str(i),120,i*30,20,20,(0,0,255))\n \n for i in range(woman_num):\n node = self.__addNode(scene, 'W_'+str(i),480,i*30,20,20,(255,0,0))\n \n def __addNode(self, scene, name, x, y, w, h, color=(0,0,0)):\n sex = name.split('_')[0]\n number = name.split('_')[1]\n rank_bias = spouse_bias = rank = 0\n if sex == 'M':\n rank = self.man_rank[int(number)]\n rank_bias = -2.0\n spouse_bias = -4.0\n elif sex == 'W':\n rank = self.woman_rank[int(number)]\n rank_bias = 2.0\n spouse_bias = 4.0\n node = Node(name)\n node.setRect(x,y,w,h)\n node.changeBrush(color,1)\n if int(number) < 10:\n number = '0' + number\n \n text = QtGui.QGraphicsTextItem (number, node)\n text.setPos(x,y)\n text.setTextWidth(1.5*w)\n font = QtGui.QFont('Times',8)\n font.setWeight(99)\n text.setFont(font)\n\n rank_text = QtGui.QGraphicsTextItem (str(rank), node)\n rank_text.setPos(x + rank_bias*w,y)\n rank_text.setTextWidth(2*w)\n font = QtGui.QFont('Times',8)\n font.setWeight(99)\n rank_text.setFont(font)\n\n spouse_text = Text(name+'_S', '-1')\n spouse_text.setPos(x + spouse_bias*w,y)\n spouse_text.setTextWidth(1.5*w)\n font = QtGui.QFont('Times',8)\n font.setWeight(99)\n spouse_text.setFont(font)\n \n scene.addItem(node)\n scene.addItem(spouse_text)\n\n def __addLink(self, scene, name, node1, node2, color = (0,0,0), link_type = ''):\n center1 = node1.boundingRect().center()\n center2 = node2.boundingRect().center()\n name1 = node1.getName().split('_')[1]\n name2 = node2.getName().split('_')[1]\n link = Link(name1 + '-' + name2, link_type)\n link.setLine(center1.x(),center1.y(),center2.x(),center2.y())\n link.changeColor(color)\n scene.addItem(link)\n \n def __deleteLink(self, scene, name):\n link = self.__findItem(name, Link, scene.items())\n scene.removeItem(link)\n\n def __changeText(self, scene, name, text):\n txt = self.__findItem(name, Text, scene.items())\n txt.setPlainText(text)\n \n def __findItem(self, name, _type, items):\n for item in items:\n if isinstance(item, _type) and name == item.getName():\n return item\n return False\n\n def __clearLinks(self, scene):\n for item in scene.items():\n if isinstance(item,Link) and item.getType() != 'marry':\n scene.removeItem(item)\n\n def __clearUpLinks(self, scene):\n for item in scene.items():\n if isinstance(item, Link):\n scene.removeItem(item)\n\n def __refreshViewStep(self, info):\n record = info.split('\\n')\n length = len(record)\n lineiter = 0\n epoch = record[lineiter].strip().split(':')[1]\n lineiter += 1\n step = record[lineiter].strip().split(':')[1]\n lineiter += 1\n statu = record[lineiter].strip()\n if 'DONE' in statu:\n return 0\n elif 'is not activity' in statu:\n return 1\n elif 'is married' in statu:\n return 2 \n couple = statu.replace(' ','').split('target')\n man = self.__findItem('M_'+couple[0], Node, self.statu_scene.items())\n woman = self.__findItem('W_'+couple[1], Node, self.statu_scene.items())\n lineiter += 1\n sui_rank = record[lineiter].replace(' ','').split(':')[1]\n lineiter += 1\n if 'Husband Rank' in record[lineiter]:\n husband_rank = record[lineiter].replace(' ','').split(':')[1]\n lineiter += 1\n if 'Succeed' in record[lineiter]:\n self.__addLink(self.statu_scene, couple[0] + '-' + couple[1], man, woman, link_type = 'marry')\n self.__addLink(self.history_scene, couple[0] + '-' + couple[1], man, woman, link_type = 'marry')\n self.__changeText(self.statu_scene, 'M_' + couple[0] + '_S', str(self.matching.get_spouse_rank(int(couple[0]) + 1)))\n self.__changeText(self.statu_scene, 'W_' + couple[1] + '_S', str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))\n self.__changeText(self.history_scene, 'M_' + couple[0] + '_S', str(self.matching.get_spouse_rank(int(couple[0]) + 1)))\n self.__changeText(self.history_scene, 'W_' + couple[1] + '_S', str(self.matching.get_spouse_rank(-(int(couple[1]) + 1))))\n lineiter += 1\n if lineiter <= length:\n if 'threw away' in record[lineiter]:\n throwCouple = record[lineiter].replace(' ','').split('threwaway')\n node1 = self.__findItem('M_' + throwCouple[1], Node, self.history_scene.items())\n node2 = self.__findItem('W_' + throwCouple[0], Node, self.history_scene.items())\n self.__addLink(self.history_scene, throwCouple[1] + '-' + throwCouple[0], node1, node2, (0,255,0) , 'break')\n self.__deleteLink(self.statu_scene, throwCouple[1] + '-' + throwCouple[0])\n self.__changeText(self.statu_scene, 'M_' + throwCouple[1] + '_S', '-1')\n self.__changeText(self.history_scene, 'M_' + throwCouple[1] + '_S', '-1')\n self.statu_view.update()\n self.history_view.update()\n elif 'Failed' in record[lineiter]:\n self.__addLink(self.history_scene, couple[0] + '-' + couple[1], man, woman, (0,0,255) , 'failed')\n self.statu_view.update()\n self.history_view.update()\n \n def nextStep(self):\n info = self.matching.step()\n self.showText.setText(info)\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n self.__refreshViewStep(info) \n\n def nextEpoch(self):\n info = self.matching.epoch()\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n sep = info.split('\\n')[0]\n records = info.split(sep+'\\n')\n del records[0]\n for record in records:\n self.__refreshViewStep(sep+'\\n'+record) \n self.showText.setText(info)\n\n def exeToEnd(self):\n info = self.matching.exe_to_end()\n self.__clearLinks(self.statu_scene)\n self.__clearUpLinks(self.history_scene)\n records = info.split('EPOCH')\n del records[0]\n for record in records:\n self.__refreshViewStep('EPOCH'+record)\n \n self.showText.setText(info)\n \n def closeEvent(self, event):\n reply = QtGui.QMessageBox.question(self, 'Message',\n 'Are you sure to quit?', QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)\n if reply == QtGui.QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n gui = GUI()\n gui.show()\n sys.exit(app.exec_())\n",
"step-ids": [
18,
20,
23,
25,
32
]
}
|
[
18,
20,
23,
25,
32
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def use_flask() ->bool:
env_var = BoolVar('USE_FLASK', False)
return EnvReader().safe_read(env_var)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def use_flask() ->bool:
env_var = BoolVar('USE_FLASK', False)
return EnvReader().safe_read(env_var)
if __name__ == '__main__':
if use_flask():
flask.run()
else:
gunicorn.run()
<|reserved_special_token_1|>
import server.wsgi as flask
import server.grunner as gunicorn
from utils.cfgreader import EnvReader, BoolVar
def use_flask() ->bool:
env_var = BoolVar('USE_FLASK', False)
return EnvReader().safe_read(env_var)
if __name__ == '__main__':
if use_flask():
flask.run()
else:
gunicorn.run()
<|reserved_special_token_1|>
import server.wsgi as flask
import server.grunner as gunicorn
from utils.cfgreader import EnvReader, BoolVar
def use_flask() -> bool:
env_var = BoolVar('USE_FLASK', False)
return EnvReader().safe_read(env_var)
if __name__ == '__main__':
if use_flask(): # dev mode, run the WSGI app in Flask dev server
flask.run()
else: # prod mode, run the WSGI app in Gunicorn
gunicorn.run()
|
flexible
|
{
"blob_id": "ffe10ee8b2ebaad565e9aef5047440a067d4e239",
"index": 7528,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef use_flask() ->bool:\n env_var = BoolVar('USE_FLASK', False)\n return EnvReader().safe_read(env_var)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef use_flask() ->bool:\n env_var = BoolVar('USE_FLASK', False)\n return EnvReader().safe_read(env_var)\n\n\nif __name__ == '__main__':\n if use_flask():\n flask.run()\n else:\n gunicorn.run()\n",
"step-4": "import server.wsgi as flask\nimport server.grunner as gunicorn\nfrom utils.cfgreader import EnvReader, BoolVar\n\n\ndef use_flask() ->bool:\n env_var = BoolVar('USE_FLASK', False)\n return EnvReader().safe_read(env_var)\n\n\nif __name__ == '__main__':\n if use_flask():\n flask.run()\n else:\n gunicorn.run()\n",
"step-5": "import server.wsgi as flask\nimport server.grunner as gunicorn\nfrom utils.cfgreader import EnvReader, BoolVar\n\n\ndef use_flask() -> bool:\n env_var = BoolVar('USE_FLASK', False)\n return EnvReader().safe_read(env_var)\n\n\nif __name__ == '__main__':\n if use_flask(): # dev mode, run the WSGI app in Flask dev server\n flask.run()\n else: # prod mode, run the WSGI app in Gunicorn\n gunicorn.run()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
ii = [('LeakWTI2.py', 6)]
|
normal
|
{
"blob_id": "997b68e42547b8f8a1059776c55c3ad16df494da",
"index": 1468,
"step-1": "<mask token>\n",
"step-2": "ii = [('LeakWTI2.py', 6)]\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
class PandaAgent:
def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,
use_platform=False, use_vision=False, real=False,
use_planning_server=False, use_learning_server=False,
alternate_orientations=False):
"""
Build the Panda world in PyBullet and set up the PDDLStream solver.
The Panda world should in include the given blocks as well as a
platform which can be used in experimentation.
:param use_platform: Boolean stating whether to include the platform to
push blocks off of or not.
:param use_vision: Boolean stating whether to use vision to detect blocks.
:param use_planning_server: Boolean stating whether to use the separate
ROS planning service server.
:param use_learning_server: Boolean stating whether to host a ROS service
server to drive planning from active learning script.
:param alternate_orientations: Boolean stating whether blocks can be replaced in
their home positions at alternate orientations.
If you are using the ROS action server, you must start it in a separate terminal:
rosrun stacking_ros planning_server.py
"""
self.real = real
self.use_vision = use_vision
self.use_platform = use_platform
self.use_planning_server = use_planning_server
self.use_learning_server = use_learning_server
self.alternate_orientations = alternate_orientations
self._planning_client_id = pb_robot.utils.connect(use_gui=False)
self.plan()
pb_robot.utils.set_default_camera()
self.robot = pb_robot.panda.Panda()
self.robot.arm.hand.Open()
self.belief_blocks = blocks
(self.pddl_blocks, self.platform_table, self.platform_leg, self.
table, self.frame, self.wall) = (setup_panda_world(self.robot,
blocks, block_init_xy_poses, use_platform=use_platform))
self.fixed = [self.platform_table, self.platform_leg, self.table,
self.frame, self.wall]
self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks
)
self.orig_joint_angles = self.robot.arm.GetJointValues()
self.orig_block_poses = [b.get_base_link_pose() for b in self.
pddl_blocks]
poses = [b.get_base_link_pose() for b in self.pddl_blocks]
poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]
self._execution_client_id = pb_robot.utils.connect(use_gui=True)
self.execute()
pb_robot.utils.set_default_camera()
self.execution_robot = pb_robot.panda.Panda()
self.execution_robot.arm.hand.Open()
setup_panda_world(self.execution_robot, blocks, poses, use_platform
=use_platform)
if (self.use_vision or self.use_planning_server or self.
use_learning_server or real):
import rospy
try:
rospy.init_node('panda_agent')
except:
print('ROS Node already created')
if real:
from franka_interface import ArmInterface
self.real_arm = ArmInterface()
from franka_core_msgs.msg import RobotState
state_topic = (
'/franka_ros_interface/custom_franka_state_controller/robot_state'
)
self.arm_last_error_time = time.time()
self.arm_error_check_time = 3.0
self.arm_state_subscriber = rospy.Subscriber(state_topic,
RobotState, self.robot_state_callback)
if self.use_vision:
from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist
rospy.wait_for_service('get_block_poses_world')
rospy.wait_for_service('get_block_poses_wrist')
self._get_block_poses_world = rospy.ServiceProxy(
'get_block_poses_world', GetBlockPosesWorld)
self._get_block_poses_wrist = rospy.ServiceProxy(
'get_block_poses_wrist', GetBlockPosesWrist)
self.last_obj_held = None
if self.use_planning_server:
from stacking_ros.srv import GetPlan, SetPlanningState
from tamp.ros_utils import goal_to_ros, ros_to_task_plan
print('Waiting for planning server...')
rospy.wait_for_service('get_latest_plan')
self.goal_to_ros = goal_to_ros
self.ros_to_task_plan = ros_to_task_plan
self.init_state_client = rospy.ServiceProxy('/reset_planning',
SetPlanningState)
self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',
GetPlan)
print('Done!')
if self.use_learning_server:
from stacking_ros.srv import PlanTower
self.learning_server = rospy.Service('/plan_tower', PlanTower,
self.learning_server_callback)
print('Learning server started!')
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=False, approach_frame='global',
use_vision=self.use_vision)
self.noise = noise
self.txt_id = None
self.plan()
def _add_text(self, txt):
self.execute()
pb_robot.viz.remove_all_debug()
self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),
size=2)
self.plan()
def execute(self):
self.state = 'execute'
pb_robot.aabb.set_client(self._execution_client_id)
pb_robot.body.set_client(self._execution_client_id)
pb_robot.collisions.set_client(self._execution_client_id)
pb_robot.geometry.set_client(self._execution_client_id)
pb_robot.grasp.set_client(self._execution_client_id)
pb_robot.joint.set_client(self._execution_client_id)
pb_robot.link.set_client(self._execution_client_id)
pb_robot.panda.set_client(self._execution_client_id)
pb_robot.planning.set_client(self._execution_client_id)
pb_robot.utils.set_client(self._execution_client_id)
pb_robot.viz.set_client(self._execution_client_id)
def plan(self):
if self.use_planning_server:
return
self.state = 'plan'
pb_robot.aabb.set_client(self._planning_client_id)
pb_robot.body.set_client(self._planning_client_id)
pb_robot.collisions.set_client(self._planning_client_id)
pb_robot.geometry.set_client(self._planning_client_id)
pb_robot.grasp.set_client(self._planning_client_id)
pb_robot.joint.set_client(self._planning_client_id)
pb_robot.link.set_client(self._planning_client_id)
pb_robot.panda.set_client(self._planning_client_id)
pb_robot.planning.set_client(self._planning_client_id)
pb_robot.utils.set_client(self._planning_client_id)
pb_robot.viz.set_client(self._planning_client_id)
<|reserved_special_token_0|>
def _get_initial_pddl_state(self):
"""
Get the PDDL representation of the world between experiments. This
method assumes that all blocks are on the table. We will always "clean
up" an experiment by moving blocks away from the platform after an
experiment.
"""
fixed = [self.table, self.platform_table, self.platform_leg, self.frame
]
conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.
GetJointValues())
print('Initial configuration:', conf.configuration)
init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (
'AtConf', conf), ('HandEmpty',)]
self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.
get_base_link_pose())
init += [('Pose', self.table, self.table_pose), ('AtPose', self.
table, self.table_pose)]
for body in self.pddl_blocks:
print(type(body), body)
pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())
init += [('Graspable', body), ('Pose', body, pose), ('AtPose',
body, pose), ('Block', body), ('On', body, self.table), (
'Supported', body, pose, self.table, self.table_pose)]
if not self.platform_table is None:
platform_pose = pb_robot.vobj.BodyPose(self.platform_table,
self.platform_table.get_base_link_pose())
init += [('Pose', self.platform_table, platform_pose), (
'AtPose', self.platform_table, platform_pose)]
init += [('Block', self.platform_table)]
init += [('Table', self.table)]
return init
def _get_observed_pose(self, pddl_block, action):
"""
This pose should be relative to the base of the platform leg to
agree with the simulation. The two block representations will have
different orientation but their positions should be the same.
"""
block_transform = pddl_block.get_base_link_transform()
platform_transform = self.platform_leg.get_base_link_transform()
platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0
rel_transform = numpy.linalg.inv(platform_transform) @ block_transform
end_pose = pb_robot.geometry.pose_from_tform(rel_transform)
end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))
end_pose = add_noise(end_pose, self.noise * numpy.eye(3))
return end_pose
<|reserved_special_token_0|>
def build_planning_problem(self, tower, base_xy):
""" Builds the initial conditions for planning """
self.moved_blocks = set()
tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]
tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self
.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
else:
pddl_problems = []
base_block = self.pddl_block_lookup[tower[0].name]
base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z
base_pose = base_pos, tower[0].rotation
base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)
if self.use_planning_server:
base_block_ros = BodyInfo()
base_block_ros.name = base_block.readableName
base_block_ros.stack = True
pose_to_ros(base_pose, base_block_ros.pose)
ros_req.goal_state.append(base_block_ros)
else:
pddl_problems.append((self.table, base_block, (base_pos, tower[
0].rotation)))
for b_ix in range(1, len(tower)):
bottom_block = tower[b_ix - 1]
bottom_pose = bottom_block.pose.pos, bottom_block.rotation
bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)
top_block = tower[b_ix]
top_pose = top_block.pose.pos, top_block.rotation
top_tform = pb_robot.geometry.tform_from_pose(top_pose)
rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform
top_pddl = self.pddl_block_lookup[top_block.name]
bottom_pddl = self.pddl_block_lookup[bottom_block.name]
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = top_pddl.readableName
block_ros.base_obj = bottom_pddl.readableName
transform_to_ros(rel_tform, block_ros.pose)
block_ros.is_rel_pose = True
block_ros.stack = True
ros_req.goal_state.append(block_ros)
else:
init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]
goal_terms = [('On', top_pddl, bottom_pddl)]
pddl_problems.append((bottom_pddl, top_pddl, rel_tform))
for ix in reversed(tower_block_order):
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def build_reset_problem(self):
""" Builds the initial conditions for a tower reset given a set of moved blocks """
print('Resetting blocks...')
print('Moved Blocks:', self.moved_blocks)
current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
block_ixs = range(len(self.pddl_blocks))
block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2
], reverse=True)
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self
.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
else:
pddl_problems = []
for ix in block_ixs:
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
if blk in self.moved_blocks:
if self.use_planning_server:
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0,
0.5), ignore_resets=False):
"""
Simulates a tower stacking and unstacking by requesting plans from a separate planning server
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
for block in tower:
print('Block:', block.name)
print('Pose:', block.pose)
print('Dims:', block.dimensions)
print('CoM:', block.com)
print('Rotations:', block.rotation)
print('-----')
if self.use_vision:
self._update_block_poses()
self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks
]
planning_prob = self.build_planning_problem(tower, base_xy)
success, stack_stable, reset_stable, num_success, fatal = (self.
plan_and_execute(planning_prob, real, T, stack=True,
ignore_resets=ignore_resets))
print(
f'Completed tower stack with success: {success}, stable: {stack_stable}'
)
if reset_stable:
print(f'Completed tower reset stable: {reset_stable}')
while not success and not fatal:
print(
f'Got recoverable failure. Replanning from step index {num_success}.'
)
if self.use_planning_server:
from tamp.ros_utils import block_init_to_ros
if self.real:
planning_prob.robot_config.angles = (self.real_arm.
convertToList(self.real_arm.joint_angles()))
else:
planning_prob.robot_config.angles = (self.robot.arm.
GetJointValues())
planning_prob.init_state = block_init_to_ros(self.pddl_blocks)
if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):
planning_prob.held_block.name = (self.last_obj_held.
body.readableName)
transform_to_ros(self.last_obj_held.grasp_objF,
planning_prob.held_block.pose)
success, stack_stable, reset_stable, num_success, fatal = (self
.plan_and_execute(planning_prob, real, T, stack=True,
start_idx=num_success, ignore_resets=ignore_resets))
print(
f'Completed tower stack with success: {success}, stable: {stack_stable}'
)
if reset_stable:
print(f'Completed tower reset stable: {reset_stable}')
num_stack_success = min(len(tower), num_success)
if not ignore_resets:
try:
if not (stack_stable and reset_stable):
if self.use_vision and not stack_stable:
self._update_block_poses(find_moved=True)
planning_prob = self.build_reset_problem()
reset_fatal = False
num_reset_success = 0
while len(self.moved_blocks) > 0 and not reset_fatal:
print(f'Resetting {len(self.moved_blocks)} blocks.')
(reset_success, _, reset_stable, num_reset_success,
reset_fatal) = (self.plan_and_execute(
planning_prob, real, T, stack=False, start_idx=
num_reset_success))
except Exception as e:
print('Planning/execution failed during tower reset.')
print(e)
return success, stack_stable, num_stack_success
def plan_and_execute(self, planning_prob, real=False, T=2500, stack=
True, start_idx=0, ignore_resets=False):
"""
Requests a PDDLStream plan from a planning server and executes the resulting plan
Returns:
success : Flag for whether the plan execution succeeded
stack_stable : Flag for whether stacking a stable tower was successful
reset_stable : Flag for whether resetting a tower was successful
num_success : Progress (in number of steps) of successful tasks
fatal : Flag for whether the error was fatal (True) or recoverable (False)
start_idx : Start index of planning (for recovering from partial plans)
ignore_resets : Flag for whether to stop after resets
"""
num_success = start_idx
stack_stable = False
reset_stable = False
planning_active = True
if self.use_planning_server:
ros_req = planning_prob
num_steps = len(ros_req.goal_state)
trimmed_ros_req = deepcopy(ros_req)
trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]
self.init_state_client.call(trimmed_ros_req)
else:
pddl_problems = planning_prob
num_steps = len(pddl_problems)
while num_success < num_steps:
try:
if self.use_planning_server:
query_block = self.pddl_block_lookup[ros_req.goal_state
[num_success].name]
plan = []
saved_world = pb_robot.utils.WorldSaver()
while len(plan) == 0 and planning_active:
time.sleep(5)
print('Getting a plan from server...')
ros_resp = self.get_plan_client.call()
if not ros_resp.planning_active:
print('Planning failed on server side.')
if ros_req.goal_state[num_success].stack:
print(f'Failed during stacking {query_block}')
fatal = True
else:
print(f'Failed during resetting {query_block}')
input(
'Manually reset the blocks and press Enter to continue'
)
if real:
self._update_block_poses()
fatal = False
return (False, stack_stable, reset_stable,
num_success, fatal)
if self.validate_ros_plan(ros_resp, query_block):
plan = self.ros_to_task_plan(ros_resp, self.
execution_robot, self.pddl_block_lookup)
else:
base, blk, pose = pddl_problems[num_success]
query_block = blk
self._add_text('Planning block placement')
self.plan()
saved_world = pb_robot.utils.WorldSaver()
self.robot.arm.hand.Open()
fixed_objs = self.fixed + [b for b in self.pddl_blocks if
b != blk]
init = self._get_initial_pddl_state()
goal_terms = []
if base == self.table:
blk_pose = pb_robot.vobj.BodyPose(blk, pose)
if (not stack or num_success >= num_steps / 2
) and self.alternate_orientations:
init += [('Reset',)]
goal_terms.append(('AtHome', blk))
else:
init += [('Pose', blk, blk_pose), ('Supported',
blk, blk_pose, self.table, self.table_pose)]
goal_terms.append(('AtPose', blk, blk_pose))
goal_terms.append(('On', blk, self.table))
else:
init += [('RelPose', blk, base, pose)]
goal_terms.append(('On', blk, base))
goal = tuple(['and'] + goal_terms)
pddl_info = get_pddlstream_info(self.robot, fixed_objs,
self.pddl_blocks, add_slanted_grasps=True,
approach_frame='global', use_vision=self.use_vision,
home_pose=pose)
plan, cost = pddlstream_plan(pddl_info, init, goal,
search_sample_ratio=1.0, max_time=INF)
if plan is None:
print('\nFailed to plan\n')
fatal = False
return (False, stack_stable, reset_stable,
num_success, fatal)
saved_world.restore()
print('\nGot plan:')
print(plan)
obstacles = [f for f in self.fixed if f is not None]
if not self.use_planning_server:
self.plan()
ExecuteActions(plan, real=False, pause=False, wait=
False, obstacles=obstacles)
self.execute()
ExecuteActions(plan, real=real, pause=True, wait=False,
prompt=False, obstacles=obstacles,
sim_fatal_failure_prob=0.0,
sim_recoverable_failure_prob=0.0)
desired_pose = query_block.get_base_link_pose()
if query_block not in self.moved_blocks:
self.moved_blocks.add(query_block)
else:
self.moved_blocks.remove(query_block)
if not real:
self.step_simulation(T, vis_frames=False)
if stack:
stable = self.check_stability(real, query_block,
desired_pose)
else:
stable = True
if stable == 0.0:
prompt = input(
'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'
)
if prompt == 'n':
stable = 1.0
if stable == 0.0:
print('Unstable after execution!')
return True, stack_stable, reset_stable, num_success, False
else:
num_success += 1
if stack and num_success == num_steps / 2:
print('Completed tower stack!')
stack_stable = True
stack = False
if ignore_resets:
return (True, stack_stable, reset_stable,
num_success, False)
elif num_success == num_steps:
print('Completed tower reset!')
reset_stable = True
return (True, stack_stable, reset_stable,
num_success, False)
except ExecutionFailure as e:
print('Planning/execution failed.')
print(e)
saved_world.restore()
if real:
self._update_block_poses()
self.robot.arm.SetJointValues(self.real_arm.
convertToList(self.real_arm.joint_angles()))
self.last_obj_held = e.obj_held
return False, stack_stable, reset_stable, num_success, e.fatal
def check_stability(self, real, block_pddl, desired_pose, max_tries=2):
if self.use_vision:
try:
poses = self._get_block_poses_wrist().poses
except:
print(
'Service call to get block poses failed during check stability. Exiting.'
)
sys.exit()
visible = False
for named_pose in poses:
if named_pose.block_id in block_pddl.readableName.split('_')[-1
]:
visible = True
pose = named_pose.pose.pose
des_pos = desired_pose[0]
obs_pos = pose.position.x, pose.position.y, pose.position.z
print('[Check Stability] Desired Pos:', des_pos)
print('[Check Stability] Detected Pos:', obs_pos)
dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.
array(des_pos))
print(
f'[Check Stability] Position Distance (>0.04): {dist}')
if dist > 0.04:
return 0.0
orn = desired_pose[1]
obs_orn = pyquaternion.Quaternion(pose.orientation.w,
pose.orientation.x, pose.orientation.y, pose.
orientation.z)
des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1
], orn[2])
angle = (des_orn.inverse * obs_orn).angle
angle = numpy.abs(numpy.rad2deg(angle))
print(
f'[Check Stability] Orientation Distance (> 15): {angle}'
)
if angle > 15:
return 0.0
if not visible:
print('[Check Stability] Object not visible to camera.')
return 0.0
else:
end_pose = block_pddl.get_base_link_point()
dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(
desired_pose[0]))
if dist > 0.01:
print('Unstable!')
return 0.0
return 1.0
def validate_ros_plan(self, ros_resp, tgt_block):
""" Validates a ROS plan to move a block against the expected target block name """
if len(ros_resp.plan) == 0:
return True
else:
plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']
if len(plan_blocks) > 0:
plan_block = plan_blocks[0]
else:
return False
print(
f'Received plan to move {plan_block} and expected to move {tgt_block}'
)
return tgt_block.readableName == plan_block
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def step_simulation(self, T, vis_frames=False, lifeTime=0.1):
p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)
p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)
q = self.robot.get_joint_positions()
for _ in range(T):
p.stepSimulation(physicsClientId=self._execution_client_id)
p.stepSimulation(physicsClientId=self._planning_client_id)
self.execute()
self.execution_robot.set_joint_positions(self.robot.joints, q)
self.plan()
self.robot.set_joint_positions(self.robot.joints, q)
time.sleep(1 / 2400.0)
if vis_frames:
length = 0.1
for pddl_block in self.pddl_blocks:
pos, quat = pddl_block.get_pose()
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
def simulate_action(self, action, block_ix, T=50, vis_sim=False,
vis_placement=False):
"""
Perform the given action to with the given block. An observation
should be returned in the reference frame of the platform.
:param action: Place action which describes the relative pose of the block to the platform surface.
:param real_block: Belief representation of the block to perform the action on.
:param T: How many timesteps to simulate the block falling for.
:param vis_sim: Ununsed.
:return: (action, T, end_pose) End pose should be TODO: what frame?
TODO: Not sure if this method works at the moment...
"""
assert self.platform_table is not None
real_block = self.belief_blocks[block_ix]
pddl_block = self.pddl_blocks[block_ix]
original_pose = pddl_block.get_base_link_pose()
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))
rotated_block = get_rotated_block(real_block)
x = action.pos[0]
y = action.pos[1]
z = self.platform_table.get_dimensions()[2
] / 2.0 + rotated_block.dimensions[2] / 2
tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0,
0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])
tform[0:3, 0:3] = action.rot.as_matrix()
if vis_placement:
surface_tform = pb_robot.geometry.tform_from_pose(self.
platform_table.get_base_link_pose())
body_tform = surface_tform @ tform
length, lifeTime = 0.2, 0.0
pos, quat = pb_robot.geometry.pose_from_tform(body_tform)
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)
init += [('RelPose', pddl_block, self.platform_table, tform)]
goal = 'On', pddl_block, self.platform_table
print('Init:', init)
print('Goal:', goal)
self.plan_and_execute(init, goal, search_sample_ratio=1000)
self.step_simulation(T)
end_pose = self._get_observed_pose(pddl_block, action)
observation = action, T, end_pose
self.step_simulation(500 - T)
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)
init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,
goal_pose, self.table, self.table_pose)]
goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,
self.table)
print('Init:', init)
print('Goal:', goal)
success = self.plan_and_execute(init, goal, max_time=100.0,
search_sample_ratio=1000)
return observation
class PandaClientAgent:
"""
Lightweight client to call a PandaAgent as a service for active learning
"""
def __init__(self):
import rospy
rospy.init_node('panda_client')
self.restart_services()
def restart_services(self):
import rospy
from stacking_ros.srv import PlanTower
print('Waiting for Panda Agent server...')
rospy.wait_for_service('/plan_tower')
print('Done')
self.client = rospy.ServiceProxy('/plan_tower', PlanTower)
def simulate_tower(self, tower, vis, real=False):
"""
Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
from stacking_ros.srv import PlanTowerRequest
from tamp.ros_utils import tower_to_ros, ros_to_tower
request = PlanTowerRequest()
request.tower_info = tower_to_ros(tower)
if vis:
w = World(tower)
env = Environment([w], vis_sim=True, vis_frames=True)
env.step(vis_frames=True)
for b in tower:
print('----- Block info -----')
print(b.name)
print(b.dimensions)
print(b.pose)
print(b.rotation)
response = self.client.call(request)
if vis:
env.disconnect()
return response.success, response.stable, response.num_stack_stable
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PandaAgent:
def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,
use_platform=False, use_vision=False, real=False,
use_planning_server=False, use_learning_server=False,
alternate_orientations=False):
"""
Build the Panda world in PyBullet and set up the PDDLStream solver.
The Panda world should in include the given blocks as well as a
platform which can be used in experimentation.
:param use_platform: Boolean stating whether to include the platform to
push blocks off of or not.
:param use_vision: Boolean stating whether to use vision to detect blocks.
:param use_planning_server: Boolean stating whether to use the separate
ROS planning service server.
:param use_learning_server: Boolean stating whether to host a ROS service
server to drive planning from active learning script.
:param alternate_orientations: Boolean stating whether blocks can be replaced in
their home positions at alternate orientations.
If you are using the ROS action server, you must start it in a separate terminal:
rosrun stacking_ros planning_server.py
"""
self.real = real
self.use_vision = use_vision
self.use_platform = use_platform
self.use_planning_server = use_planning_server
self.use_learning_server = use_learning_server
self.alternate_orientations = alternate_orientations
self._planning_client_id = pb_robot.utils.connect(use_gui=False)
self.plan()
pb_robot.utils.set_default_camera()
self.robot = pb_robot.panda.Panda()
self.robot.arm.hand.Open()
self.belief_blocks = blocks
(self.pddl_blocks, self.platform_table, self.platform_leg, self.
table, self.frame, self.wall) = (setup_panda_world(self.robot,
blocks, block_init_xy_poses, use_platform=use_platform))
self.fixed = [self.platform_table, self.platform_leg, self.table,
self.frame, self.wall]
self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks
)
self.orig_joint_angles = self.robot.arm.GetJointValues()
self.orig_block_poses = [b.get_base_link_pose() for b in self.
pddl_blocks]
poses = [b.get_base_link_pose() for b in self.pddl_blocks]
poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]
self._execution_client_id = pb_robot.utils.connect(use_gui=True)
self.execute()
pb_robot.utils.set_default_camera()
self.execution_robot = pb_robot.panda.Panda()
self.execution_robot.arm.hand.Open()
setup_panda_world(self.execution_robot, blocks, poses, use_platform
=use_platform)
if (self.use_vision or self.use_planning_server or self.
use_learning_server or real):
import rospy
try:
rospy.init_node('panda_agent')
except:
print('ROS Node already created')
if real:
from franka_interface import ArmInterface
self.real_arm = ArmInterface()
from franka_core_msgs.msg import RobotState
state_topic = (
'/franka_ros_interface/custom_franka_state_controller/robot_state'
)
self.arm_last_error_time = time.time()
self.arm_error_check_time = 3.0
self.arm_state_subscriber = rospy.Subscriber(state_topic,
RobotState, self.robot_state_callback)
if self.use_vision:
from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist
rospy.wait_for_service('get_block_poses_world')
rospy.wait_for_service('get_block_poses_wrist')
self._get_block_poses_world = rospy.ServiceProxy(
'get_block_poses_world', GetBlockPosesWorld)
self._get_block_poses_wrist = rospy.ServiceProxy(
'get_block_poses_wrist', GetBlockPosesWrist)
self.last_obj_held = None
if self.use_planning_server:
from stacking_ros.srv import GetPlan, SetPlanningState
from tamp.ros_utils import goal_to_ros, ros_to_task_plan
print('Waiting for planning server...')
rospy.wait_for_service('get_latest_plan')
self.goal_to_ros = goal_to_ros
self.ros_to_task_plan = ros_to_task_plan
self.init_state_client = rospy.ServiceProxy('/reset_planning',
SetPlanningState)
self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',
GetPlan)
print('Done!')
if self.use_learning_server:
from stacking_ros.srv import PlanTower
self.learning_server = rospy.Service('/plan_tower', PlanTower,
self.learning_server_callback)
print('Learning server started!')
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=False, approach_frame='global',
use_vision=self.use_vision)
self.noise = noise
self.txt_id = None
self.plan()
def _add_text(self, txt):
self.execute()
pb_robot.viz.remove_all_debug()
self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),
size=2)
self.plan()
def execute(self):
self.state = 'execute'
pb_robot.aabb.set_client(self._execution_client_id)
pb_robot.body.set_client(self._execution_client_id)
pb_robot.collisions.set_client(self._execution_client_id)
pb_robot.geometry.set_client(self._execution_client_id)
pb_robot.grasp.set_client(self._execution_client_id)
pb_robot.joint.set_client(self._execution_client_id)
pb_robot.link.set_client(self._execution_client_id)
pb_robot.panda.set_client(self._execution_client_id)
pb_robot.planning.set_client(self._execution_client_id)
pb_robot.utils.set_client(self._execution_client_id)
pb_robot.viz.set_client(self._execution_client_id)
def plan(self):
if self.use_planning_server:
return
self.state = 'plan'
pb_robot.aabb.set_client(self._planning_client_id)
pb_robot.body.set_client(self._planning_client_id)
pb_robot.collisions.set_client(self._planning_client_id)
pb_robot.geometry.set_client(self._planning_client_id)
pb_robot.grasp.set_client(self._planning_client_id)
pb_robot.joint.set_client(self._planning_client_id)
pb_robot.link.set_client(self._planning_client_id)
pb_robot.panda.set_client(self._planning_client_id)
pb_robot.planning.set_client(self._planning_client_id)
pb_robot.utils.set_client(self._planning_client_id)
pb_robot.viz.set_client(self._planning_client_id)
def reset_world(self):
""" Resets the planning world to its original configuration """
print('Resetting world')
if self.real:
angles = self.real_arm.convertToList(self.real_arm.joint_angles())
else:
angles = self.orig_joint_angles
self.plan()
self.robot.arm.SetJointValues(angles)
self.execute()
self.execution_robot.arm.SetJointValues(angles)
for bx, b in enumerate(self.pddl_blocks):
b.set_base_link_pose(self.orig_block_poses[bx])
print('Done')
def _get_initial_pddl_state(self):
"""
Get the PDDL representation of the world between experiments. This
method assumes that all blocks are on the table. We will always "clean
up" an experiment by moving blocks away from the platform after an
experiment.
"""
fixed = [self.table, self.platform_table, self.platform_leg, self.frame
]
conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.
GetJointValues())
print('Initial configuration:', conf.configuration)
init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (
'AtConf', conf), ('HandEmpty',)]
self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.
get_base_link_pose())
init += [('Pose', self.table, self.table_pose), ('AtPose', self.
table, self.table_pose)]
for body in self.pddl_blocks:
print(type(body), body)
pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())
init += [('Graspable', body), ('Pose', body, pose), ('AtPose',
body, pose), ('Block', body), ('On', body, self.table), (
'Supported', body, pose, self.table, self.table_pose)]
if not self.platform_table is None:
platform_pose = pb_robot.vobj.BodyPose(self.platform_table,
self.platform_table.get_base_link_pose())
init += [('Pose', self.platform_table, platform_pose), (
'AtPose', self.platform_table, platform_pose)]
init += [('Block', self.platform_table)]
init += [('Table', self.table)]
return init
def _get_observed_pose(self, pddl_block, action):
"""
This pose should be relative to the base of the platform leg to
agree with the simulation. The two block representations will have
different orientation but their positions should be the same.
"""
block_transform = pddl_block.get_base_link_transform()
platform_transform = self.platform_leg.get_base_link_transform()
platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0
rel_transform = numpy.linalg.inv(platform_transform) @ block_transform
end_pose = pb_robot.geometry.pose_from_tform(rel_transform)
end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))
end_pose = add_noise(end_pose, self.noise * numpy.eye(3))
return end_pose
<|reserved_special_token_0|>
def build_planning_problem(self, tower, base_xy):
""" Builds the initial conditions for planning """
self.moved_blocks = set()
tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]
tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self
.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
else:
pddl_problems = []
base_block = self.pddl_block_lookup[tower[0].name]
base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z
base_pose = base_pos, tower[0].rotation
base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)
if self.use_planning_server:
base_block_ros = BodyInfo()
base_block_ros.name = base_block.readableName
base_block_ros.stack = True
pose_to_ros(base_pose, base_block_ros.pose)
ros_req.goal_state.append(base_block_ros)
else:
pddl_problems.append((self.table, base_block, (base_pos, tower[
0].rotation)))
for b_ix in range(1, len(tower)):
bottom_block = tower[b_ix - 1]
bottom_pose = bottom_block.pose.pos, bottom_block.rotation
bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)
top_block = tower[b_ix]
top_pose = top_block.pose.pos, top_block.rotation
top_tform = pb_robot.geometry.tform_from_pose(top_pose)
rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform
top_pddl = self.pddl_block_lookup[top_block.name]
bottom_pddl = self.pddl_block_lookup[bottom_block.name]
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = top_pddl.readableName
block_ros.base_obj = bottom_pddl.readableName
transform_to_ros(rel_tform, block_ros.pose)
block_ros.is_rel_pose = True
block_ros.stack = True
ros_req.goal_state.append(block_ros)
else:
init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]
goal_terms = [('On', top_pddl, bottom_pddl)]
pddl_problems.append((bottom_pddl, top_pddl, rel_tform))
for ix in reversed(tower_block_order):
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def build_reset_problem(self):
""" Builds the initial conditions for a tower reset given a set of moved blocks """
print('Resetting blocks...')
print('Moved Blocks:', self.moved_blocks)
current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
block_ixs = range(len(self.pddl_blocks))
block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2
], reverse=True)
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self
.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
else:
pddl_problems = []
for ix in block_ixs:
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
if blk in self.moved_blocks:
if self.use_planning_server:
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0,
0.5), ignore_resets=False):
"""
Simulates a tower stacking and unstacking by requesting plans from a separate planning server
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
for block in tower:
print('Block:', block.name)
print('Pose:', block.pose)
print('Dims:', block.dimensions)
print('CoM:', block.com)
print('Rotations:', block.rotation)
print('-----')
if self.use_vision:
self._update_block_poses()
self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks
]
planning_prob = self.build_planning_problem(tower, base_xy)
success, stack_stable, reset_stable, num_success, fatal = (self.
plan_and_execute(planning_prob, real, T, stack=True,
ignore_resets=ignore_resets))
print(
f'Completed tower stack with success: {success}, stable: {stack_stable}'
)
if reset_stable:
print(f'Completed tower reset stable: {reset_stable}')
while not success and not fatal:
print(
f'Got recoverable failure. Replanning from step index {num_success}.'
)
if self.use_planning_server:
from tamp.ros_utils import block_init_to_ros
if self.real:
planning_prob.robot_config.angles = (self.real_arm.
convertToList(self.real_arm.joint_angles()))
else:
planning_prob.robot_config.angles = (self.robot.arm.
GetJointValues())
planning_prob.init_state = block_init_to_ros(self.pddl_blocks)
if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):
planning_prob.held_block.name = (self.last_obj_held.
body.readableName)
transform_to_ros(self.last_obj_held.grasp_objF,
planning_prob.held_block.pose)
success, stack_stable, reset_stable, num_success, fatal = (self
.plan_and_execute(planning_prob, real, T, stack=True,
start_idx=num_success, ignore_resets=ignore_resets))
print(
f'Completed tower stack with success: {success}, stable: {stack_stable}'
)
if reset_stable:
print(f'Completed tower reset stable: {reset_stable}')
num_stack_success = min(len(tower), num_success)
if not ignore_resets:
try:
if not (stack_stable and reset_stable):
if self.use_vision and not stack_stable:
self._update_block_poses(find_moved=True)
planning_prob = self.build_reset_problem()
reset_fatal = False
num_reset_success = 0
while len(self.moved_blocks) > 0 and not reset_fatal:
print(f'Resetting {len(self.moved_blocks)} blocks.')
(reset_success, _, reset_stable, num_reset_success,
reset_fatal) = (self.plan_and_execute(
planning_prob, real, T, stack=False, start_idx=
num_reset_success))
except Exception as e:
print('Planning/execution failed during tower reset.')
print(e)
return success, stack_stable, num_stack_success
def plan_and_execute(self, planning_prob, real=False, T=2500, stack=
True, start_idx=0, ignore_resets=False):
"""
Requests a PDDLStream plan from a planning server and executes the resulting plan
Returns:
success : Flag for whether the plan execution succeeded
stack_stable : Flag for whether stacking a stable tower was successful
reset_stable : Flag for whether resetting a tower was successful
num_success : Progress (in number of steps) of successful tasks
fatal : Flag for whether the error was fatal (True) or recoverable (False)
start_idx : Start index of planning (for recovering from partial plans)
ignore_resets : Flag for whether to stop after resets
"""
num_success = start_idx
stack_stable = False
reset_stable = False
planning_active = True
if self.use_planning_server:
ros_req = planning_prob
num_steps = len(ros_req.goal_state)
trimmed_ros_req = deepcopy(ros_req)
trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]
self.init_state_client.call(trimmed_ros_req)
else:
pddl_problems = planning_prob
num_steps = len(pddl_problems)
while num_success < num_steps:
try:
if self.use_planning_server:
query_block = self.pddl_block_lookup[ros_req.goal_state
[num_success].name]
plan = []
saved_world = pb_robot.utils.WorldSaver()
while len(plan) == 0 and planning_active:
time.sleep(5)
print('Getting a plan from server...')
ros_resp = self.get_plan_client.call()
if not ros_resp.planning_active:
print('Planning failed on server side.')
if ros_req.goal_state[num_success].stack:
print(f'Failed during stacking {query_block}')
fatal = True
else:
print(f'Failed during resetting {query_block}')
input(
'Manually reset the blocks and press Enter to continue'
)
if real:
self._update_block_poses()
fatal = False
return (False, stack_stable, reset_stable,
num_success, fatal)
if self.validate_ros_plan(ros_resp, query_block):
plan = self.ros_to_task_plan(ros_resp, self.
execution_robot, self.pddl_block_lookup)
else:
base, blk, pose = pddl_problems[num_success]
query_block = blk
self._add_text('Planning block placement')
self.plan()
saved_world = pb_robot.utils.WorldSaver()
self.robot.arm.hand.Open()
fixed_objs = self.fixed + [b for b in self.pddl_blocks if
b != blk]
init = self._get_initial_pddl_state()
goal_terms = []
if base == self.table:
blk_pose = pb_robot.vobj.BodyPose(blk, pose)
if (not stack or num_success >= num_steps / 2
) and self.alternate_orientations:
init += [('Reset',)]
goal_terms.append(('AtHome', blk))
else:
init += [('Pose', blk, blk_pose), ('Supported',
blk, blk_pose, self.table, self.table_pose)]
goal_terms.append(('AtPose', blk, blk_pose))
goal_terms.append(('On', blk, self.table))
else:
init += [('RelPose', blk, base, pose)]
goal_terms.append(('On', blk, base))
goal = tuple(['and'] + goal_terms)
pddl_info = get_pddlstream_info(self.robot, fixed_objs,
self.pddl_blocks, add_slanted_grasps=True,
approach_frame='global', use_vision=self.use_vision,
home_pose=pose)
plan, cost = pddlstream_plan(pddl_info, init, goal,
search_sample_ratio=1.0, max_time=INF)
if plan is None:
print('\nFailed to plan\n')
fatal = False
return (False, stack_stable, reset_stable,
num_success, fatal)
saved_world.restore()
print('\nGot plan:')
print(plan)
obstacles = [f for f in self.fixed if f is not None]
if not self.use_planning_server:
self.plan()
ExecuteActions(plan, real=False, pause=False, wait=
False, obstacles=obstacles)
self.execute()
ExecuteActions(plan, real=real, pause=True, wait=False,
prompt=False, obstacles=obstacles,
sim_fatal_failure_prob=0.0,
sim_recoverable_failure_prob=0.0)
desired_pose = query_block.get_base_link_pose()
if query_block not in self.moved_blocks:
self.moved_blocks.add(query_block)
else:
self.moved_blocks.remove(query_block)
if not real:
self.step_simulation(T, vis_frames=False)
if stack:
stable = self.check_stability(real, query_block,
desired_pose)
else:
stable = True
if stable == 0.0:
prompt = input(
'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'
)
if prompt == 'n':
stable = 1.0
if stable == 0.0:
print('Unstable after execution!')
return True, stack_stable, reset_stable, num_success, False
else:
num_success += 1
if stack and num_success == num_steps / 2:
print('Completed tower stack!')
stack_stable = True
stack = False
if ignore_resets:
return (True, stack_stable, reset_stable,
num_success, False)
elif num_success == num_steps:
print('Completed tower reset!')
reset_stable = True
return (True, stack_stable, reset_stable,
num_success, False)
except ExecutionFailure as e:
print('Planning/execution failed.')
print(e)
saved_world.restore()
if real:
self._update_block_poses()
self.robot.arm.SetJointValues(self.real_arm.
convertToList(self.real_arm.joint_angles()))
self.last_obj_held = e.obj_held
return False, stack_stable, reset_stable, num_success, e.fatal
def check_stability(self, real, block_pddl, desired_pose, max_tries=2):
if self.use_vision:
try:
poses = self._get_block_poses_wrist().poses
except:
print(
'Service call to get block poses failed during check stability. Exiting.'
)
sys.exit()
visible = False
for named_pose in poses:
if named_pose.block_id in block_pddl.readableName.split('_')[-1
]:
visible = True
pose = named_pose.pose.pose
des_pos = desired_pose[0]
obs_pos = pose.position.x, pose.position.y, pose.position.z
print('[Check Stability] Desired Pos:', des_pos)
print('[Check Stability] Detected Pos:', obs_pos)
dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.
array(des_pos))
print(
f'[Check Stability] Position Distance (>0.04): {dist}')
if dist > 0.04:
return 0.0
orn = desired_pose[1]
obs_orn = pyquaternion.Quaternion(pose.orientation.w,
pose.orientation.x, pose.orientation.y, pose.
orientation.z)
des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1
], orn[2])
angle = (des_orn.inverse * obs_orn).angle
angle = numpy.abs(numpy.rad2deg(angle))
print(
f'[Check Stability] Orientation Distance (> 15): {angle}'
)
if angle > 15:
return 0.0
if not visible:
print('[Check Stability] Object not visible to camera.')
return 0.0
else:
end_pose = block_pddl.get_base_link_point()
dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(
desired_pose[0]))
if dist > 0.01:
print('Unstable!')
return 0.0
return 1.0
def validate_ros_plan(self, ros_resp, tgt_block):
""" Validates a ROS plan to move a block against the expected target block name """
if len(ros_resp.plan) == 0:
return True
else:
plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']
if len(plan_blocks) > 0:
plan_block = plan_blocks[0]
else:
return False
print(
f'Received plan to move {plan_block} and expected to move {tgt_block}'
)
return tgt_block.readableName == plan_block
<|reserved_special_token_0|>
def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):
""" Service callback function to plan and execute a tower from active learning script """
from stacking_ros.srv import PlanTowerResponse
from tamp.ros_utils import ros_to_tower
tower = ros_to_tower(ros_req.tower_info)
success, stable, num_stack_stable = self.simulate_tower(tower, True,
real=self.real, base_xy=base_xy)
resp = PlanTowerResponse()
resp.success = success
resp.stable = stable
resp.num_stack_stable = num_stack_stable
return resp
def step_simulation(self, T, vis_frames=False, lifeTime=0.1):
p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)
p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)
q = self.robot.get_joint_positions()
for _ in range(T):
p.stepSimulation(physicsClientId=self._execution_client_id)
p.stepSimulation(physicsClientId=self._planning_client_id)
self.execute()
self.execution_robot.set_joint_positions(self.robot.joints, q)
self.plan()
self.robot.set_joint_positions(self.robot.joints, q)
time.sleep(1 / 2400.0)
if vis_frames:
length = 0.1
for pddl_block in self.pddl_blocks:
pos, quat = pddl_block.get_pose()
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
def simulate_action(self, action, block_ix, T=50, vis_sim=False,
vis_placement=False):
"""
Perform the given action to with the given block. An observation
should be returned in the reference frame of the platform.
:param action: Place action which describes the relative pose of the block to the platform surface.
:param real_block: Belief representation of the block to perform the action on.
:param T: How many timesteps to simulate the block falling for.
:param vis_sim: Ununsed.
:return: (action, T, end_pose) End pose should be TODO: what frame?
TODO: Not sure if this method works at the moment...
"""
assert self.platform_table is not None
real_block = self.belief_blocks[block_ix]
pddl_block = self.pddl_blocks[block_ix]
original_pose = pddl_block.get_base_link_pose()
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))
rotated_block = get_rotated_block(real_block)
x = action.pos[0]
y = action.pos[1]
z = self.platform_table.get_dimensions()[2
] / 2.0 + rotated_block.dimensions[2] / 2
tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0,
0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])
tform[0:3, 0:3] = action.rot.as_matrix()
if vis_placement:
surface_tform = pb_robot.geometry.tform_from_pose(self.
platform_table.get_base_link_pose())
body_tform = surface_tform @ tform
length, lifeTime = 0.2, 0.0
pos, quat = pb_robot.geometry.pose_from_tform(body_tform)
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)
init += [('RelPose', pddl_block, self.platform_table, tform)]
goal = 'On', pddl_block, self.platform_table
print('Init:', init)
print('Goal:', goal)
self.plan_and_execute(init, goal, search_sample_ratio=1000)
self.step_simulation(T)
end_pose = self._get_observed_pose(pddl_block, action)
observation = action, T, end_pose
self.step_simulation(500 - T)
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)
init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,
goal_pose, self.table, self.table_pose)]
goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,
self.table)
print('Init:', init)
print('Goal:', goal)
success = self.plan_and_execute(init, goal, max_time=100.0,
search_sample_ratio=1000)
return observation
class PandaClientAgent:
"""
Lightweight client to call a PandaAgent as a service for active learning
"""
def __init__(self):
import rospy
rospy.init_node('panda_client')
self.restart_services()
def restart_services(self):
import rospy
from stacking_ros.srv import PlanTower
print('Waiting for Panda Agent server...')
rospy.wait_for_service('/plan_tower')
print('Done')
self.client = rospy.ServiceProxy('/plan_tower', PlanTower)
def simulate_tower(self, tower, vis, real=False):
"""
Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
from stacking_ros.srv import PlanTowerRequest
from tamp.ros_utils import tower_to_ros, ros_to_tower
request = PlanTowerRequest()
request.tower_info = tower_to_ros(tower)
if vis:
w = World(tower)
env = Environment([w], vis_sim=True, vis_frames=True)
env.step(vis_frames=True)
for b in tower:
print('----- Block info -----')
print(b.name)
print(b.dimensions)
print(b.pose)
print(b.rotation)
response = self.client.call(request)
if vis:
env.disconnect()
return response.success, response.stable, response.num_stack_stable
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class PandaAgent:
def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,
use_platform=False, use_vision=False, real=False,
use_planning_server=False, use_learning_server=False,
alternate_orientations=False):
"""
Build the Panda world in PyBullet and set up the PDDLStream solver.
The Panda world should in include the given blocks as well as a
platform which can be used in experimentation.
:param use_platform: Boolean stating whether to include the platform to
push blocks off of or not.
:param use_vision: Boolean stating whether to use vision to detect blocks.
:param use_planning_server: Boolean stating whether to use the separate
ROS planning service server.
:param use_learning_server: Boolean stating whether to host a ROS service
server to drive planning from active learning script.
:param alternate_orientations: Boolean stating whether blocks can be replaced in
their home positions at alternate orientations.
If you are using the ROS action server, you must start it in a separate terminal:
rosrun stacking_ros planning_server.py
"""
self.real = real
self.use_vision = use_vision
self.use_platform = use_platform
self.use_planning_server = use_planning_server
self.use_learning_server = use_learning_server
self.alternate_orientations = alternate_orientations
self._planning_client_id = pb_robot.utils.connect(use_gui=False)
self.plan()
pb_robot.utils.set_default_camera()
self.robot = pb_robot.panda.Panda()
self.robot.arm.hand.Open()
self.belief_blocks = blocks
(self.pddl_blocks, self.platform_table, self.platform_leg, self.
table, self.frame, self.wall) = (setup_panda_world(self.robot,
blocks, block_init_xy_poses, use_platform=use_platform))
self.fixed = [self.platform_table, self.platform_leg, self.table,
self.frame, self.wall]
self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks
)
self.orig_joint_angles = self.robot.arm.GetJointValues()
self.orig_block_poses = [b.get_base_link_pose() for b in self.
pddl_blocks]
poses = [b.get_base_link_pose() for b in self.pddl_blocks]
poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]
self._execution_client_id = pb_robot.utils.connect(use_gui=True)
self.execute()
pb_robot.utils.set_default_camera()
self.execution_robot = pb_robot.panda.Panda()
self.execution_robot.arm.hand.Open()
setup_panda_world(self.execution_robot, blocks, poses, use_platform
=use_platform)
if (self.use_vision or self.use_planning_server or self.
use_learning_server or real):
import rospy
try:
rospy.init_node('panda_agent')
except:
print('ROS Node already created')
if real:
from franka_interface import ArmInterface
self.real_arm = ArmInterface()
from franka_core_msgs.msg import RobotState
state_topic = (
'/franka_ros_interface/custom_franka_state_controller/robot_state'
)
self.arm_last_error_time = time.time()
self.arm_error_check_time = 3.0
self.arm_state_subscriber = rospy.Subscriber(state_topic,
RobotState, self.robot_state_callback)
if self.use_vision:
from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist
rospy.wait_for_service('get_block_poses_world')
rospy.wait_for_service('get_block_poses_wrist')
self._get_block_poses_world = rospy.ServiceProxy(
'get_block_poses_world', GetBlockPosesWorld)
self._get_block_poses_wrist = rospy.ServiceProxy(
'get_block_poses_wrist', GetBlockPosesWrist)
self.last_obj_held = None
if self.use_planning_server:
from stacking_ros.srv import GetPlan, SetPlanningState
from tamp.ros_utils import goal_to_ros, ros_to_task_plan
print('Waiting for planning server...')
rospy.wait_for_service('get_latest_plan')
self.goal_to_ros = goal_to_ros
self.ros_to_task_plan = ros_to_task_plan
self.init_state_client = rospy.ServiceProxy('/reset_planning',
SetPlanningState)
self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',
GetPlan)
print('Done!')
if self.use_learning_server:
from stacking_ros.srv import PlanTower
self.learning_server = rospy.Service('/plan_tower', PlanTower,
self.learning_server_callback)
print('Learning server started!')
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=False, approach_frame='global',
use_vision=self.use_vision)
self.noise = noise
self.txt_id = None
self.plan()
def _add_text(self, txt):
self.execute()
pb_robot.viz.remove_all_debug()
self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),
size=2)
self.plan()
def execute(self):
self.state = 'execute'
pb_robot.aabb.set_client(self._execution_client_id)
pb_robot.body.set_client(self._execution_client_id)
pb_robot.collisions.set_client(self._execution_client_id)
pb_robot.geometry.set_client(self._execution_client_id)
pb_robot.grasp.set_client(self._execution_client_id)
pb_robot.joint.set_client(self._execution_client_id)
pb_robot.link.set_client(self._execution_client_id)
pb_robot.panda.set_client(self._execution_client_id)
pb_robot.planning.set_client(self._execution_client_id)
pb_robot.utils.set_client(self._execution_client_id)
pb_robot.viz.set_client(self._execution_client_id)
def plan(self):
if self.use_planning_server:
return
self.state = 'plan'
pb_robot.aabb.set_client(self._planning_client_id)
pb_robot.body.set_client(self._planning_client_id)
pb_robot.collisions.set_client(self._planning_client_id)
pb_robot.geometry.set_client(self._planning_client_id)
pb_robot.grasp.set_client(self._planning_client_id)
pb_robot.joint.set_client(self._planning_client_id)
pb_robot.link.set_client(self._planning_client_id)
pb_robot.panda.set_client(self._planning_client_id)
pb_robot.planning.set_client(self._planning_client_id)
pb_robot.utils.set_client(self._planning_client_id)
pb_robot.viz.set_client(self._planning_client_id)
def reset_world(self):
""" Resets the planning world to its original configuration """
print('Resetting world')
if self.real:
angles = self.real_arm.convertToList(self.real_arm.joint_angles())
else:
angles = self.orig_joint_angles
self.plan()
self.robot.arm.SetJointValues(angles)
self.execute()
self.execution_robot.arm.SetJointValues(angles)
for bx, b in enumerate(self.pddl_blocks):
b.set_base_link_pose(self.orig_block_poses[bx])
print('Done')
def _get_initial_pddl_state(self):
"""
Get the PDDL representation of the world between experiments. This
method assumes that all blocks are on the table. We will always "clean
up" an experiment by moving blocks away from the platform after an
experiment.
"""
fixed = [self.table, self.platform_table, self.platform_leg, self.frame
]
conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.
GetJointValues())
print('Initial configuration:', conf.configuration)
init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (
'AtConf', conf), ('HandEmpty',)]
self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.
get_base_link_pose())
init += [('Pose', self.table, self.table_pose), ('AtPose', self.
table, self.table_pose)]
for body in self.pddl_blocks:
print(type(body), body)
pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())
init += [('Graspable', body), ('Pose', body, pose), ('AtPose',
body, pose), ('Block', body), ('On', body, self.table), (
'Supported', body, pose, self.table, self.table_pose)]
if not self.platform_table is None:
platform_pose = pb_robot.vobj.BodyPose(self.platform_table,
self.platform_table.get_base_link_pose())
init += [('Pose', self.platform_table, platform_pose), (
'AtPose', self.platform_table, platform_pose)]
init += [('Block', self.platform_table)]
init += [('Table', self.table)]
return init
def _get_observed_pose(self, pddl_block, action):
"""
This pose should be relative to the base of the platform leg to
agree with the simulation. The two block representations will have
different orientation but their positions should be the same.
"""
block_transform = pddl_block.get_base_link_transform()
platform_transform = self.platform_leg.get_base_link_transform()
platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0
rel_transform = numpy.linalg.inv(platform_transform) @ block_transform
end_pose = pb_robot.geometry.pose_from_tform(rel_transform)
end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))
end_pose = add_noise(end_pose, self.noise * numpy.eye(3))
return end_pose
def _update_block_poses(self, find_moved=False):
""" Use the global world cameras to update the positions of the blocks """
try:
resp = self._get_block_poses_world()
named_poses = resp.poses
except:
import sys
print('Service call to get block poses failed. Exiting.')
sys.exit()
n_found = 0
for pddl_block_name, pddl_block in self.pddl_block_lookup.items():
for named_pose in named_poses:
if named_pose.block_id == pddl_block_name.split('_')[-1]:
pose = named_pose.pose.pose
if pose.position.x < 0.05:
continue
n_found += 1
position = (pose.position.x, pose.position.y, pose.
position.z)
orientation = (pose.orientation.x, pose.orientation.y,
pose.orientation.z, pose.orientation.w)
self.execute()
pddl_block.set_base_link_pose((position, orientation))
if not self.use_planning_server:
self.plan()
pddl_block.set_base_link_pose((position, orientation))
if find_moved and n_found != len(self.moved_blocks):
input(
'Could not find all the moved blocks. Please reposition blocks outside of the camera view and hit enter to continue.'
)
self._update_block_poses(find_moved=True)
return
for _, pddl_block in self.pddl_block_lookup.items():
if pb_robot.collisions.body_collision(pddl_block, self.table):
print('Collision with table and block:', pddl_block.
readableName)
position, orientation = pddl_block.get_base_link_pose()
stable_z = pb_robot.placements.stable_z(pddl_block, self.table)
position = position[0], position[1], stable_z
self.execute()
pddl_block.set_base_link_pose((position, orientation))
self.plan()
pddl_block.set_base_link_pose((position, orientation))
current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
block_ixs = range(len(self.pddl_blocks))
block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2
], reverse=False)
for ix in range(len(block_ixs)):
bottom_block = self.pddl_blocks[block_ixs[ix]]
for jx in range(ix + 1, len(block_ixs)):
top_block = self.pddl_blocks[block_ixs[jx]]
dist_moved = 0
while pb_robot.collisions.body_collision(bottom_block,
top_block):
print('Collision with bottom %s and top %s:' % (
bottom_block.readableName, top_block.readableName))
position, orientation = top_block.get_base_link_pose()
stable_z = position[2] + 0.001
dist_moved += 0.001
if self.real and dist_moved > 0.04:
print(
f'Found blocks {bottom_block} and {top_block} in collision'
)
input(
'Manually move the blocks and press Enter to continue'
)
self._update_block_poses(find_moved=False)
return
position = position[0], position[1], stable_z
self.execute()
top_block.set_base_link_pose((position, orientation))
self.plan()
top_block.set_base_link_pose((position, orientation))
def build_planning_problem(self, tower, base_xy):
""" Builds the initial conditions for planning """
self.moved_blocks = set()
tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]
tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self
.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
else:
pddl_problems = []
base_block = self.pddl_block_lookup[tower[0].name]
base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z
base_pose = base_pos, tower[0].rotation
base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)
if self.use_planning_server:
base_block_ros = BodyInfo()
base_block_ros.name = base_block.readableName
base_block_ros.stack = True
pose_to_ros(base_pose, base_block_ros.pose)
ros_req.goal_state.append(base_block_ros)
else:
pddl_problems.append((self.table, base_block, (base_pos, tower[
0].rotation)))
for b_ix in range(1, len(tower)):
bottom_block = tower[b_ix - 1]
bottom_pose = bottom_block.pose.pos, bottom_block.rotation
bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)
top_block = tower[b_ix]
top_pose = top_block.pose.pos, top_block.rotation
top_tform = pb_robot.geometry.tform_from_pose(top_pose)
rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform
top_pddl = self.pddl_block_lookup[top_block.name]
bottom_pddl = self.pddl_block_lookup[bottom_block.name]
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = top_pddl.readableName
block_ros.base_obj = bottom_pddl.readableName
transform_to_ros(rel_tform, block_ros.pose)
block_ros.is_rel_pose = True
block_ros.stack = True
ros_req.goal_state.append(block_ros)
else:
init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]
goal_terms = [('On', top_pddl, bottom_pddl)]
pddl_problems.append((bottom_pddl, top_pddl, rel_tform))
for ix in reversed(tower_block_order):
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def build_reset_problem(self):
""" Builds the initial conditions for a tower reset given a set of moved blocks """
print('Resetting blocks...')
print('Moved Blocks:', self.moved_blocks)
current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
block_ixs = range(len(self.pddl_blocks))
block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2
], reverse=True)
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self
.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
else:
pddl_problems = []
for ix in block_ixs:
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
if blk in self.moved_blocks:
if self.use_planning_server:
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0,
0.5), ignore_resets=False):
"""
Simulates a tower stacking and unstacking by requesting plans from a separate planning server
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
for block in tower:
print('Block:', block.name)
print('Pose:', block.pose)
print('Dims:', block.dimensions)
print('CoM:', block.com)
print('Rotations:', block.rotation)
print('-----')
if self.use_vision:
self._update_block_poses()
self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks
]
planning_prob = self.build_planning_problem(tower, base_xy)
success, stack_stable, reset_stable, num_success, fatal = (self.
plan_and_execute(planning_prob, real, T, stack=True,
ignore_resets=ignore_resets))
print(
f'Completed tower stack with success: {success}, stable: {stack_stable}'
)
if reset_stable:
print(f'Completed tower reset stable: {reset_stable}')
while not success and not fatal:
print(
f'Got recoverable failure. Replanning from step index {num_success}.'
)
if self.use_planning_server:
from tamp.ros_utils import block_init_to_ros
if self.real:
planning_prob.robot_config.angles = (self.real_arm.
convertToList(self.real_arm.joint_angles()))
else:
planning_prob.robot_config.angles = (self.robot.arm.
GetJointValues())
planning_prob.init_state = block_init_to_ros(self.pddl_blocks)
if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):
planning_prob.held_block.name = (self.last_obj_held.
body.readableName)
transform_to_ros(self.last_obj_held.grasp_objF,
planning_prob.held_block.pose)
success, stack_stable, reset_stable, num_success, fatal = (self
.plan_and_execute(planning_prob, real, T, stack=True,
start_idx=num_success, ignore_resets=ignore_resets))
print(
f'Completed tower stack with success: {success}, stable: {stack_stable}'
)
if reset_stable:
print(f'Completed tower reset stable: {reset_stable}')
num_stack_success = min(len(tower), num_success)
if not ignore_resets:
try:
if not (stack_stable and reset_stable):
if self.use_vision and not stack_stable:
self._update_block_poses(find_moved=True)
planning_prob = self.build_reset_problem()
reset_fatal = False
num_reset_success = 0
while len(self.moved_blocks) > 0 and not reset_fatal:
print(f'Resetting {len(self.moved_blocks)} blocks.')
(reset_success, _, reset_stable, num_reset_success,
reset_fatal) = (self.plan_and_execute(
planning_prob, real, T, stack=False, start_idx=
num_reset_success))
except Exception as e:
print('Planning/execution failed during tower reset.')
print(e)
return success, stack_stable, num_stack_success
def plan_and_execute(self, planning_prob, real=False, T=2500, stack=
True, start_idx=0, ignore_resets=False):
"""
Requests a PDDLStream plan from a planning server and executes the resulting plan
Returns:
success : Flag for whether the plan execution succeeded
stack_stable : Flag for whether stacking a stable tower was successful
reset_stable : Flag for whether resetting a tower was successful
num_success : Progress (in number of steps) of successful tasks
fatal : Flag for whether the error was fatal (True) or recoverable (False)
start_idx : Start index of planning (for recovering from partial plans)
ignore_resets : Flag for whether to stop after resets
"""
num_success = start_idx
stack_stable = False
reset_stable = False
planning_active = True
if self.use_planning_server:
ros_req = planning_prob
num_steps = len(ros_req.goal_state)
trimmed_ros_req = deepcopy(ros_req)
trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]
self.init_state_client.call(trimmed_ros_req)
else:
pddl_problems = planning_prob
num_steps = len(pddl_problems)
while num_success < num_steps:
try:
if self.use_planning_server:
query_block = self.pddl_block_lookup[ros_req.goal_state
[num_success].name]
plan = []
saved_world = pb_robot.utils.WorldSaver()
while len(plan) == 0 and planning_active:
time.sleep(5)
print('Getting a plan from server...')
ros_resp = self.get_plan_client.call()
if not ros_resp.planning_active:
print('Planning failed on server side.')
if ros_req.goal_state[num_success].stack:
print(f'Failed during stacking {query_block}')
fatal = True
else:
print(f'Failed during resetting {query_block}')
input(
'Manually reset the blocks and press Enter to continue'
)
if real:
self._update_block_poses()
fatal = False
return (False, stack_stable, reset_stable,
num_success, fatal)
if self.validate_ros_plan(ros_resp, query_block):
plan = self.ros_to_task_plan(ros_resp, self.
execution_robot, self.pddl_block_lookup)
else:
base, blk, pose = pddl_problems[num_success]
query_block = blk
self._add_text('Planning block placement')
self.plan()
saved_world = pb_robot.utils.WorldSaver()
self.robot.arm.hand.Open()
fixed_objs = self.fixed + [b for b in self.pddl_blocks if
b != blk]
init = self._get_initial_pddl_state()
goal_terms = []
if base == self.table:
blk_pose = pb_robot.vobj.BodyPose(blk, pose)
if (not stack or num_success >= num_steps / 2
) and self.alternate_orientations:
init += [('Reset',)]
goal_terms.append(('AtHome', blk))
else:
init += [('Pose', blk, blk_pose), ('Supported',
blk, blk_pose, self.table, self.table_pose)]
goal_terms.append(('AtPose', blk, blk_pose))
goal_terms.append(('On', blk, self.table))
else:
init += [('RelPose', blk, base, pose)]
goal_terms.append(('On', blk, base))
goal = tuple(['and'] + goal_terms)
pddl_info = get_pddlstream_info(self.robot, fixed_objs,
self.pddl_blocks, add_slanted_grasps=True,
approach_frame='global', use_vision=self.use_vision,
home_pose=pose)
plan, cost = pddlstream_plan(pddl_info, init, goal,
search_sample_ratio=1.0, max_time=INF)
if plan is None:
print('\nFailed to plan\n')
fatal = False
return (False, stack_stable, reset_stable,
num_success, fatal)
saved_world.restore()
print('\nGot plan:')
print(plan)
obstacles = [f for f in self.fixed if f is not None]
if not self.use_planning_server:
self.plan()
ExecuteActions(plan, real=False, pause=False, wait=
False, obstacles=obstacles)
self.execute()
ExecuteActions(plan, real=real, pause=True, wait=False,
prompt=False, obstacles=obstacles,
sim_fatal_failure_prob=0.0,
sim_recoverable_failure_prob=0.0)
desired_pose = query_block.get_base_link_pose()
if query_block not in self.moved_blocks:
self.moved_blocks.add(query_block)
else:
self.moved_blocks.remove(query_block)
if not real:
self.step_simulation(T, vis_frames=False)
if stack:
stable = self.check_stability(real, query_block,
desired_pose)
else:
stable = True
if stable == 0.0:
prompt = input(
'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'
)
if prompt == 'n':
stable = 1.0
if stable == 0.0:
print('Unstable after execution!')
return True, stack_stable, reset_stable, num_success, False
else:
num_success += 1
if stack and num_success == num_steps / 2:
print('Completed tower stack!')
stack_stable = True
stack = False
if ignore_resets:
return (True, stack_stable, reset_stable,
num_success, False)
elif num_success == num_steps:
print('Completed tower reset!')
reset_stable = True
return (True, stack_stable, reset_stable,
num_success, False)
except ExecutionFailure as e:
print('Planning/execution failed.')
print(e)
saved_world.restore()
if real:
self._update_block_poses()
self.robot.arm.SetJointValues(self.real_arm.
convertToList(self.real_arm.joint_angles()))
self.last_obj_held = e.obj_held
return False, stack_stable, reset_stable, num_success, e.fatal
def check_stability(self, real, block_pddl, desired_pose, max_tries=2):
if self.use_vision:
try:
poses = self._get_block_poses_wrist().poses
except:
print(
'Service call to get block poses failed during check stability. Exiting.'
)
sys.exit()
visible = False
for named_pose in poses:
if named_pose.block_id in block_pddl.readableName.split('_')[-1
]:
visible = True
pose = named_pose.pose.pose
des_pos = desired_pose[0]
obs_pos = pose.position.x, pose.position.y, pose.position.z
print('[Check Stability] Desired Pos:', des_pos)
print('[Check Stability] Detected Pos:', obs_pos)
dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.
array(des_pos))
print(
f'[Check Stability] Position Distance (>0.04): {dist}')
if dist > 0.04:
return 0.0
orn = desired_pose[1]
obs_orn = pyquaternion.Quaternion(pose.orientation.w,
pose.orientation.x, pose.orientation.y, pose.
orientation.z)
des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1
], orn[2])
angle = (des_orn.inverse * obs_orn).angle
angle = numpy.abs(numpy.rad2deg(angle))
print(
f'[Check Stability] Orientation Distance (> 15): {angle}'
)
if angle > 15:
return 0.0
if not visible:
print('[Check Stability] Object not visible to camera.')
return 0.0
else:
end_pose = block_pddl.get_base_link_point()
dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(
desired_pose[0]))
if dist > 0.01:
print('Unstable!')
return 0.0
return 1.0
def validate_ros_plan(self, ros_resp, tgt_block):
""" Validates a ROS plan to move a block against the expected target block name """
if len(ros_resp.plan) == 0:
return True
else:
plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']
if len(plan_blocks) > 0:
plan_block = plan_blocks[0]
else:
return False
print(
f'Received plan to move {plan_block} and expected to move {tgt_block}'
)
return tgt_block.readableName == plan_block
def robot_state_callback(self, msg):
""" Processes robot state errors and raises execution failures for planning """
cur_time = time.time()
if cur_time - self.arm_last_error_time < self.arm_error_check_time:
return
self.arm_last_error_time = cur_time
cur_errors = msg.current_errors
if cur_errors.communication_constraints_violation:
reason = 'Communication constraints violation detected!'
raise ExecutionFailure(reason=reason, fatal=True)
if cur_errors.joint_position_limits_violation:
reason = 'Joint position limits violation detected!'
raise ExecutionFailure(reason=reason, fatal=True)
if cur_errors.joint_motion_generator_position_limits_violation:
reason = (
'Joint motion generator position limits violation detected!')
raise ExecutionFailure(reason=reason, fatal=True)
def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):
""" Service callback function to plan and execute a tower from active learning script """
from stacking_ros.srv import PlanTowerResponse
from tamp.ros_utils import ros_to_tower
tower = ros_to_tower(ros_req.tower_info)
success, stable, num_stack_stable = self.simulate_tower(tower, True,
real=self.real, base_xy=base_xy)
resp = PlanTowerResponse()
resp.success = success
resp.stable = stable
resp.num_stack_stable = num_stack_stable
return resp
def step_simulation(self, T, vis_frames=False, lifeTime=0.1):
p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)
p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)
q = self.robot.get_joint_positions()
for _ in range(T):
p.stepSimulation(physicsClientId=self._execution_client_id)
p.stepSimulation(physicsClientId=self._planning_client_id)
self.execute()
self.execution_robot.set_joint_positions(self.robot.joints, q)
self.plan()
self.robot.set_joint_positions(self.robot.joints, q)
time.sleep(1 / 2400.0)
if vis_frames:
length = 0.1
for pddl_block in self.pddl_blocks:
pos, quat = pddl_block.get_pose()
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
def simulate_action(self, action, block_ix, T=50, vis_sim=False,
vis_placement=False):
"""
Perform the given action to with the given block. An observation
should be returned in the reference frame of the platform.
:param action: Place action which describes the relative pose of the block to the platform surface.
:param real_block: Belief representation of the block to perform the action on.
:param T: How many timesteps to simulate the block falling for.
:param vis_sim: Ununsed.
:return: (action, T, end_pose) End pose should be TODO: what frame?
TODO: Not sure if this method works at the moment...
"""
assert self.platform_table is not None
real_block = self.belief_blocks[block_ix]
pddl_block = self.pddl_blocks[block_ix]
original_pose = pddl_block.get_base_link_pose()
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))
rotated_block = get_rotated_block(real_block)
x = action.pos[0]
y = action.pos[1]
z = self.platform_table.get_dimensions()[2
] / 2.0 + rotated_block.dimensions[2] / 2
tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0,
0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])
tform[0:3, 0:3] = action.rot.as_matrix()
if vis_placement:
surface_tform = pb_robot.geometry.tform_from_pose(self.
platform_table.get_base_link_pose())
body_tform = surface_tform @ tform
length, lifeTime = 0.2, 0.0
pos, quat = pb_robot.geometry.pose_from_tform(body_tform)
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)
init += [('RelPose', pddl_block, self.platform_table, tform)]
goal = 'On', pddl_block, self.platform_table
print('Init:', init)
print('Goal:', goal)
self.plan_and_execute(init, goal, search_sample_ratio=1000)
self.step_simulation(T)
end_pose = self._get_observed_pose(pddl_block, action)
observation = action, T, end_pose
self.step_simulation(500 - T)
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)
init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,
goal_pose, self.table, self.table_pose)]
goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,
self.table)
print('Init:', init)
print('Goal:', goal)
success = self.plan_and_execute(init, goal, max_time=100.0,
search_sample_ratio=1000)
return observation
class PandaClientAgent:
"""
Lightweight client to call a PandaAgent as a service for active learning
"""
def __init__(self):
import rospy
rospy.init_node('panda_client')
self.restart_services()
def restart_services(self):
import rospy
from stacking_ros.srv import PlanTower
print('Waiting for Panda Agent server...')
rospy.wait_for_service('/plan_tower')
print('Done')
self.client = rospy.ServiceProxy('/plan_tower', PlanTower)
def simulate_tower(self, tower, vis, real=False):
"""
Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
from stacking_ros.srv import PlanTowerRequest
from tamp.ros_utils import tower_to_ros, ros_to_tower
request = PlanTowerRequest()
request.tower_info = tower_to_ros(tower)
if vis:
w = World(tower)
env = Environment([w], vis_sim=True, vis_frames=True)
env.step(vis_frames=True)
for b in tower:
print('----- Block info -----')
print(b.name)
print(b.dimensions)
print(b.pose)
print(b.rotation)
response = self.client.call(request)
if vis:
env.disconnect()
return response.success, response.stable, response.num_stack_stable
<|reserved_special_token_1|>
import sys
import time
import numpy
import pb_robot
import pyquaternion
import pybullet as p
from copy import deepcopy
from actions import PlaceAction, make_platform_world
from block_utils import get_adversarial_blocks, rotation_group, ZERO_POS, Quaternion, get_rotated_block, Pose, add_noise, Environment, Position, World
from pddlstream.utils import INF
from pybullet_utils import transformation
import tamp.primitives
from tamp.misc import setup_panda_world, get_pddl_block_lookup, print_planning_problem, ExecuteActions, ExecutionFailure
from tamp.pddlstream_utils import get_pddlstream_info, pddlstream_plan
class PandaAgent:
def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,
use_platform=False, use_vision=False, real=False,
use_planning_server=False, use_learning_server=False,
alternate_orientations=False):
"""
Build the Panda world in PyBullet and set up the PDDLStream solver.
The Panda world should in include the given blocks as well as a
platform which can be used in experimentation.
:param use_platform: Boolean stating whether to include the platform to
push blocks off of or not.
:param use_vision: Boolean stating whether to use vision to detect blocks.
:param use_planning_server: Boolean stating whether to use the separate
ROS planning service server.
:param use_learning_server: Boolean stating whether to host a ROS service
server to drive planning from active learning script.
:param alternate_orientations: Boolean stating whether blocks can be replaced in
their home positions at alternate orientations.
If you are using the ROS action server, you must start it in a separate terminal:
rosrun stacking_ros planning_server.py
"""
self.real = real
self.use_vision = use_vision
self.use_platform = use_platform
self.use_planning_server = use_planning_server
self.use_learning_server = use_learning_server
self.alternate_orientations = alternate_orientations
self._planning_client_id = pb_robot.utils.connect(use_gui=False)
self.plan()
pb_robot.utils.set_default_camera()
self.robot = pb_robot.panda.Panda()
self.robot.arm.hand.Open()
self.belief_blocks = blocks
(self.pddl_blocks, self.platform_table, self.platform_leg, self.
table, self.frame, self.wall) = (setup_panda_world(self.robot,
blocks, block_init_xy_poses, use_platform=use_platform))
self.fixed = [self.platform_table, self.platform_leg, self.table,
self.frame, self.wall]
self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks
)
self.orig_joint_angles = self.robot.arm.GetJointValues()
self.orig_block_poses = [b.get_base_link_pose() for b in self.
pddl_blocks]
poses = [b.get_base_link_pose() for b in self.pddl_blocks]
poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]
self._execution_client_id = pb_robot.utils.connect(use_gui=True)
self.execute()
pb_robot.utils.set_default_camera()
self.execution_robot = pb_robot.panda.Panda()
self.execution_robot.arm.hand.Open()
setup_panda_world(self.execution_robot, blocks, poses, use_platform
=use_platform)
if (self.use_vision or self.use_planning_server or self.
use_learning_server or real):
import rospy
try:
rospy.init_node('panda_agent')
except:
print('ROS Node already created')
if real:
from franka_interface import ArmInterface
self.real_arm = ArmInterface()
from franka_core_msgs.msg import RobotState
state_topic = (
'/franka_ros_interface/custom_franka_state_controller/robot_state'
)
self.arm_last_error_time = time.time()
self.arm_error_check_time = 3.0
self.arm_state_subscriber = rospy.Subscriber(state_topic,
RobotState, self.robot_state_callback)
if self.use_vision:
from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist
rospy.wait_for_service('get_block_poses_world')
rospy.wait_for_service('get_block_poses_wrist')
self._get_block_poses_world = rospy.ServiceProxy(
'get_block_poses_world', GetBlockPosesWorld)
self._get_block_poses_wrist = rospy.ServiceProxy(
'get_block_poses_wrist', GetBlockPosesWrist)
self.last_obj_held = None
if self.use_planning_server:
from stacking_ros.srv import GetPlan, SetPlanningState
from tamp.ros_utils import goal_to_ros, ros_to_task_plan
print('Waiting for planning server...')
rospy.wait_for_service('get_latest_plan')
self.goal_to_ros = goal_to_ros
self.ros_to_task_plan = ros_to_task_plan
self.init_state_client = rospy.ServiceProxy('/reset_planning',
SetPlanningState)
self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',
GetPlan)
print('Done!')
if self.use_learning_server:
from stacking_ros.srv import PlanTower
self.learning_server = rospy.Service('/plan_tower', PlanTower,
self.learning_server_callback)
print('Learning server started!')
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=False, approach_frame='global',
use_vision=self.use_vision)
self.noise = noise
self.txt_id = None
self.plan()
def _add_text(self, txt):
self.execute()
pb_robot.viz.remove_all_debug()
self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),
size=2)
self.plan()
def execute(self):
self.state = 'execute'
pb_robot.aabb.set_client(self._execution_client_id)
pb_robot.body.set_client(self._execution_client_id)
pb_robot.collisions.set_client(self._execution_client_id)
pb_robot.geometry.set_client(self._execution_client_id)
pb_robot.grasp.set_client(self._execution_client_id)
pb_robot.joint.set_client(self._execution_client_id)
pb_robot.link.set_client(self._execution_client_id)
pb_robot.panda.set_client(self._execution_client_id)
pb_robot.planning.set_client(self._execution_client_id)
pb_robot.utils.set_client(self._execution_client_id)
pb_robot.viz.set_client(self._execution_client_id)
def plan(self):
if self.use_planning_server:
return
self.state = 'plan'
pb_robot.aabb.set_client(self._planning_client_id)
pb_robot.body.set_client(self._planning_client_id)
pb_robot.collisions.set_client(self._planning_client_id)
pb_robot.geometry.set_client(self._planning_client_id)
pb_robot.grasp.set_client(self._planning_client_id)
pb_robot.joint.set_client(self._planning_client_id)
pb_robot.link.set_client(self._planning_client_id)
pb_robot.panda.set_client(self._planning_client_id)
pb_robot.planning.set_client(self._planning_client_id)
pb_robot.utils.set_client(self._planning_client_id)
pb_robot.viz.set_client(self._planning_client_id)
def reset_world(self):
""" Resets the planning world to its original configuration """
print('Resetting world')
if self.real:
angles = self.real_arm.convertToList(self.real_arm.joint_angles())
else:
angles = self.orig_joint_angles
self.plan()
self.robot.arm.SetJointValues(angles)
self.execute()
self.execution_robot.arm.SetJointValues(angles)
for bx, b in enumerate(self.pddl_blocks):
b.set_base_link_pose(self.orig_block_poses[bx])
print('Done')
def _get_initial_pddl_state(self):
"""
Get the PDDL representation of the world between experiments. This
method assumes that all blocks are on the table. We will always "clean
up" an experiment by moving blocks away from the platform after an
experiment.
"""
fixed = [self.table, self.platform_table, self.platform_leg, self.frame
]
conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.
GetJointValues())
print('Initial configuration:', conf.configuration)
init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (
'AtConf', conf), ('HandEmpty',)]
self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.
get_base_link_pose())
init += [('Pose', self.table, self.table_pose), ('AtPose', self.
table, self.table_pose)]
for body in self.pddl_blocks:
print(type(body), body)
pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())
init += [('Graspable', body), ('Pose', body, pose), ('AtPose',
body, pose), ('Block', body), ('On', body, self.table), (
'Supported', body, pose, self.table, self.table_pose)]
if not self.platform_table is None:
platform_pose = pb_robot.vobj.BodyPose(self.platform_table,
self.platform_table.get_base_link_pose())
init += [('Pose', self.platform_table, platform_pose), (
'AtPose', self.platform_table, platform_pose)]
init += [('Block', self.platform_table)]
init += [('Table', self.table)]
return init
def _get_observed_pose(self, pddl_block, action):
"""
This pose should be relative to the base of the platform leg to
agree with the simulation. The two block representations will have
different orientation but their positions should be the same.
"""
block_transform = pddl_block.get_base_link_transform()
platform_transform = self.platform_leg.get_base_link_transform()
platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0
rel_transform = numpy.linalg.inv(platform_transform) @ block_transform
end_pose = pb_robot.geometry.pose_from_tform(rel_transform)
end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))
end_pose = add_noise(end_pose, self.noise * numpy.eye(3))
return end_pose
def _update_block_poses(self, find_moved=False):
""" Use the global world cameras to update the positions of the blocks """
try:
resp = self._get_block_poses_world()
named_poses = resp.poses
except:
import sys
print('Service call to get block poses failed. Exiting.')
sys.exit()
n_found = 0
for pddl_block_name, pddl_block in self.pddl_block_lookup.items():
for named_pose in named_poses:
if named_pose.block_id == pddl_block_name.split('_')[-1]:
pose = named_pose.pose.pose
if pose.position.x < 0.05:
continue
n_found += 1
position = (pose.position.x, pose.position.y, pose.
position.z)
orientation = (pose.orientation.x, pose.orientation.y,
pose.orientation.z, pose.orientation.w)
self.execute()
pddl_block.set_base_link_pose((position, orientation))
if not self.use_planning_server:
self.plan()
pddl_block.set_base_link_pose((position, orientation))
if find_moved and n_found != len(self.moved_blocks):
input(
'Could not find all the moved blocks. Please reposition blocks outside of the camera view and hit enter to continue.'
)
self._update_block_poses(find_moved=True)
return
for _, pddl_block in self.pddl_block_lookup.items():
if pb_robot.collisions.body_collision(pddl_block, self.table):
print('Collision with table and block:', pddl_block.
readableName)
position, orientation = pddl_block.get_base_link_pose()
stable_z = pb_robot.placements.stable_z(pddl_block, self.table)
position = position[0], position[1], stable_z
self.execute()
pddl_block.set_base_link_pose((position, orientation))
self.plan()
pddl_block.set_base_link_pose((position, orientation))
current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
block_ixs = range(len(self.pddl_blocks))
block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2
], reverse=False)
for ix in range(len(block_ixs)):
bottom_block = self.pddl_blocks[block_ixs[ix]]
for jx in range(ix + 1, len(block_ixs)):
top_block = self.pddl_blocks[block_ixs[jx]]
dist_moved = 0
while pb_robot.collisions.body_collision(bottom_block,
top_block):
print('Collision with bottom %s and top %s:' % (
bottom_block.readableName, top_block.readableName))
position, orientation = top_block.get_base_link_pose()
stable_z = position[2] + 0.001
dist_moved += 0.001
if self.real and dist_moved > 0.04:
print(
f'Found blocks {bottom_block} and {top_block} in collision'
)
input(
'Manually move the blocks and press Enter to continue'
)
self._update_block_poses(find_moved=False)
return
position = position[0], position[1], stable_z
self.execute()
top_block.set_base_link_pose((position, orientation))
self.plan()
top_block.set_base_link_pose((position, orientation))
def build_planning_problem(self, tower, base_xy):
""" Builds the initial conditions for planning """
self.moved_blocks = set()
tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]
tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self
.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
else:
pddl_problems = []
base_block = self.pddl_block_lookup[tower[0].name]
base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z
base_pose = base_pos, tower[0].rotation
base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)
if self.use_planning_server:
base_block_ros = BodyInfo()
base_block_ros.name = base_block.readableName
base_block_ros.stack = True
pose_to_ros(base_pose, base_block_ros.pose)
ros_req.goal_state.append(base_block_ros)
else:
pddl_problems.append((self.table, base_block, (base_pos, tower[
0].rotation)))
for b_ix in range(1, len(tower)):
bottom_block = tower[b_ix - 1]
bottom_pose = bottom_block.pose.pos, bottom_block.rotation
bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)
top_block = tower[b_ix]
top_pose = top_block.pose.pos, top_block.rotation
top_tform = pb_robot.geometry.tform_from_pose(top_pose)
rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform
top_pddl = self.pddl_block_lookup[top_block.name]
bottom_pddl = self.pddl_block_lookup[bottom_block.name]
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = top_pddl.readableName
block_ros.base_obj = bottom_pddl.readableName
transform_to_ros(rel_tform, block_ros.pose)
block_ros.is_rel_pose = True
block_ros.stack = True
ros_req.goal_state.append(block_ros)
else:
init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]
goal_terms = [('On', top_pddl, bottom_pddl)]
pddl_problems.append((bottom_pddl, top_pddl, rel_tform))
for ix in reversed(tower_block_order):
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def build_reset_problem(self):
""" Builds the initial conditions for a tower reset given a set of moved blocks """
print('Resetting blocks...')
print('Moved Blocks:', self.moved_blocks)
current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
block_ixs = range(len(self.pddl_blocks))
block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2
], reverse=True)
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self
.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
else:
pddl_problems = []
for ix in block_ixs:
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
if blk in self.moved_blocks:
if self.use_planning_server:
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0,
0.5), ignore_resets=False):
"""
Simulates a tower stacking and unstacking by requesting plans from a separate planning server
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
for block in tower:
print('Block:', block.name)
print('Pose:', block.pose)
print('Dims:', block.dimensions)
print('CoM:', block.com)
print('Rotations:', block.rotation)
print('-----')
if self.use_vision:
self._update_block_poses()
self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks
]
planning_prob = self.build_planning_problem(tower, base_xy)
success, stack_stable, reset_stable, num_success, fatal = (self.
plan_and_execute(planning_prob, real, T, stack=True,
ignore_resets=ignore_resets))
print(
f'Completed tower stack with success: {success}, stable: {stack_stable}'
)
if reset_stable:
print(f'Completed tower reset stable: {reset_stable}')
while not success and not fatal:
print(
f'Got recoverable failure. Replanning from step index {num_success}.'
)
if self.use_planning_server:
from tamp.ros_utils import block_init_to_ros
if self.real:
planning_prob.robot_config.angles = (self.real_arm.
convertToList(self.real_arm.joint_angles()))
else:
planning_prob.robot_config.angles = (self.robot.arm.
GetJointValues())
planning_prob.init_state = block_init_to_ros(self.pddl_blocks)
if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):
planning_prob.held_block.name = (self.last_obj_held.
body.readableName)
transform_to_ros(self.last_obj_held.grasp_objF,
planning_prob.held_block.pose)
success, stack_stable, reset_stable, num_success, fatal = (self
.plan_and_execute(planning_prob, real, T, stack=True,
start_idx=num_success, ignore_resets=ignore_resets))
print(
f'Completed tower stack with success: {success}, stable: {stack_stable}'
)
if reset_stable:
print(f'Completed tower reset stable: {reset_stable}')
num_stack_success = min(len(tower), num_success)
if not ignore_resets:
try:
if not (stack_stable and reset_stable):
if self.use_vision and not stack_stable:
self._update_block_poses(find_moved=True)
planning_prob = self.build_reset_problem()
reset_fatal = False
num_reset_success = 0
while len(self.moved_blocks) > 0 and not reset_fatal:
print(f'Resetting {len(self.moved_blocks)} blocks.')
(reset_success, _, reset_stable, num_reset_success,
reset_fatal) = (self.plan_and_execute(
planning_prob, real, T, stack=False, start_idx=
num_reset_success))
except Exception as e:
print('Planning/execution failed during tower reset.')
print(e)
return success, stack_stable, num_stack_success
def plan_and_execute(self, planning_prob, real=False, T=2500, stack=
True, start_idx=0, ignore_resets=False):
"""
Requests a PDDLStream plan from a planning server and executes the resulting plan
Returns:
success : Flag for whether the plan execution succeeded
stack_stable : Flag for whether stacking a stable tower was successful
reset_stable : Flag for whether resetting a tower was successful
num_success : Progress (in number of steps) of successful tasks
fatal : Flag for whether the error was fatal (True) or recoverable (False)
start_idx : Start index of planning (for recovering from partial plans)
ignore_resets : Flag for whether to stop after resets
"""
num_success = start_idx
stack_stable = False
reset_stable = False
planning_active = True
if self.use_planning_server:
ros_req = planning_prob
num_steps = len(ros_req.goal_state)
trimmed_ros_req = deepcopy(ros_req)
trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]
self.init_state_client.call(trimmed_ros_req)
else:
pddl_problems = planning_prob
num_steps = len(pddl_problems)
while num_success < num_steps:
try:
if self.use_planning_server:
query_block = self.pddl_block_lookup[ros_req.goal_state
[num_success].name]
plan = []
saved_world = pb_robot.utils.WorldSaver()
while len(plan) == 0 and planning_active:
time.sleep(5)
print('Getting a plan from server...')
ros_resp = self.get_plan_client.call()
if not ros_resp.planning_active:
print('Planning failed on server side.')
if ros_req.goal_state[num_success].stack:
print(f'Failed during stacking {query_block}')
fatal = True
else:
print(f'Failed during resetting {query_block}')
input(
'Manually reset the blocks and press Enter to continue'
)
if real:
self._update_block_poses()
fatal = False
return (False, stack_stable, reset_stable,
num_success, fatal)
if self.validate_ros_plan(ros_resp, query_block):
plan = self.ros_to_task_plan(ros_resp, self.
execution_robot, self.pddl_block_lookup)
else:
base, blk, pose = pddl_problems[num_success]
query_block = blk
self._add_text('Planning block placement')
self.plan()
saved_world = pb_robot.utils.WorldSaver()
self.robot.arm.hand.Open()
fixed_objs = self.fixed + [b for b in self.pddl_blocks if
b != blk]
init = self._get_initial_pddl_state()
goal_terms = []
if base == self.table:
blk_pose = pb_robot.vobj.BodyPose(blk, pose)
if (not stack or num_success >= num_steps / 2
) and self.alternate_orientations:
init += [('Reset',)]
goal_terms.append(('AtHome', blk))
else:
init += [('Pose', blk, blk_pose), ('Supported',
blk, blk_pose, self.table, self.table_pose)]
goal_terms.append(('AtPose', blk, blk_pose))
goal_terms.append(('On', blk, self.table))
else:
init += [('RelPose', blk, base, pose)]
goal_terms.append(('On', blk, base))
goal = tuple(['and'] + goal_terms)
pddl_info = get_pddlstream_info(self.robot, fixed_objs,
self.pddl_blocks, add_slanted_grasps=True,
approach_frame='global', use_vision=self.use_vision,
home_pose=pose)
plan, cost = pddlstream_plan(pddl_info, init, goal,
search_sample_ratio=1.0, max_time=INF)
if plan is None:
print('\nFailed to plan\n')
fatal = False
return (False, stack_stable, reset_stable,
num_success, fatal)
saved_world.restore()
print('\nGot plan:')
print(plan)
obstacles = [f for f in self.fixed if f is not None]
if not self.use_planning_server:
self.plan()
ExecuteActions(plan, real=False, pause=False, wait=
False, obstacles=obstacles)
self.execute()
ExecuteActions(plan, real=real, pause=True, wait=False,
prompt=False, obstacles=obstacles,
sim_fatal_failure_prob=0.0,
sim_recoverable_failure_prob=0.0)
desired_pose = query_block.get_base_link_pose()
if query_block not in self.moved_blocks:
self.moved_blocks.add(query_block)
else:
self.moved_blocks.remove(query_block)
if not real:
self.step_simulation(T, vis_frames=False)
if stack:
stable = self.check_stability(real, query_block,
desired_pose)
else:
stable = True
if stable == 0.0:
prompt = input(
'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'
)
if prompt == 'n':
stable = 1.0
if stable == 0.0:
print('Unstable after execution!')
return True, stack_stable, reset_stable, num_success, False
else:
num_success += 1
if stack and num_success == num_steps / 2:
print('Completed tower stack!')
stack_stable = True
stack = False
if ignore_resets:
return (True, stack_stable, reset_stable,
num_success, False)
elif num_success == num_steps:
print('Completed tower reset!')
reset_stable = True
return (True, stack_stable, reset_stable,
num_success, False)
except ExecutionFailure as e:
print('Planning/execution failed.')
print(e)
saved_world.restore()
if real:
self._update_block_poses()
self.robot.arm.SetJointValues(self.real_arm.
convertToList(self.real_arm.joint_angles()))
self.last_obj_held = e.obj_held
return False, stack_stable, reset_stable, num_success, e.fatal
def check_stability(self, real, block_pddl, desired_pose, max_tries=2):
if self.use_vision:
try:
poses = self._get_block_poses_wrist().poses
except:
print(
'Service call to get block poses failed during check stability. Exiting.'
)
sys.exit()
visible = False
for named_pose in poses:
if named_pose.block_id in block_pddl.readableName.split('_')[-1
]:
visible = True
pose = named_pose.pose.pose
des_pos = desired_pose[0]
obs_pos = pose.position.x, pose.position.y, pose.position.z
print('[Check Stability] Desired Pos:', des_pos)
print('[Check Stability] Detected Pos:', obs_pos)
dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.
array(des_pos))
print(
f'[Check Stability] Position Distance (>0.04): {dist}')
if dist > 0.04:
return 0.0
orn = desired_pose[1]
obs_orn = pyquaternion.Quaternion(pose.orientation.w,
pose.orientation.x, pose.orientation.y, pose.
orientation.z)
des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1
], orn[2])
angle = (des_orn.inverse * obs_orn).angle
angle = numpy.abs(numpy.rad2deg(angle))
print(
f'[Check Stability] Orientation Distance (> 15): {angle}'
)
if angle > 15:
return 0.0
if not visible:
print('[Check Stability] Object not visible to camera.')
return 0.0
else:
end_pose = block_pddl.get_base_link_point()
dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(
desired_pose[0]))
if dist > 0.01:
print('Unstable!')
return 0.0
return 1.0
def validate_ros_plan(self, ros_resp, tgt_block):
""" Validates a ROS plan to move a block against the expected target block name """
if len(ros_resp.plan) == 0:
return True
else:
plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']
if len(plan_blocks) > 0:
plan_block = plan_blocks[0]
else:
return False
print(
f'Received plan to move {plan_block} and expected to move {tgt_block}'
)
return tgt_block.readableName == plan_block
def robot_state_callback(self, msg):
""" Processes robot state errors and raises execution failures for planning """
cur_time = time.time()
if cur_time - self.arm_last_error_time < self.arm_error_check_time:
return
self.arm_last_error_time = cur_time
cur_errors = msg.current_errors
if cur_errors.communication_constraints_violation:
reason = 'Communication constraints violation detected!'
raise ExecutionFailure(reason=reason, fatal=True)
if cur_errors.joint_position_limits_violation:
reason = 'Joint position limits violation detected!'
raise ExecutionFailure(reason=reason, fatal=True)
if cur_errors.joint_motion_generator_position_limits_violation:
reason = (
'Joint motion generator position limits violation detected!')
raise ExecutionFailure(reason=reason, fatal=True)
def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):
""" Service callback function to plan and execute a tower from active learning script """
from stacking_ros.srv import PlanTowerResponse
from tamp.ros_utils import ros_to_tower
tower = ros_to_tower(ros_req.tower_info)
success, stable, num_stack_stable = self.simulate_tower(tower, True,
real=self.real, base_xy=base_xy)
resp = PlanTowerResponse()
resp.success = success
resp.stable = stable
resp.num_stack_stable = num_stack_stable
return resp
def step_simulation(self, T, vis_frames=False, lifeTime=0.1):
p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)
p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)
q = self.robot.get_joint_positions()
for _ in range(T):
p.stepSimulation(physicsClientId=self._execution_client_id)
p.stepSimulation(physicsClientId=self._planning_client_id)
self.execute()
self.execution_robot.set_joint_positions(self.robot.joints, q)
self.plan()
self.robot.set_joint_positions(self.robot.joints, q)
time.sleep(1 / 2400.0)
if vis_frames:
length = 0.1
for pddl_block in self.pddl_blocks:
pos, quat = pddl_block.get_pose()
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,
lifeTime=lifeTime, physicsClientId=self.
_execution_client_id)
def simulate_action(self, action, block_ix, T=50, vis_sim=False,
vis_placement=False):
"""
Perform the given action to with the given block. An observation
should be returned in the reference frame of the platform.
:param action: Place action which describes the relative pose of the block to the platform surface.
:param real_block: Belief representation of the block to perform the action on.
:param T: How many timesteps to simulate the block falling for.
:param vis_sim: Ununsed.
:return: (action, T, end_pose) End pose should be TODO: what frame?
TODO: Not sure if this method works at the moment...
"""
assert self.platform_table is not None
real_block = self.belief_blocks[block_ix]
pddl_block = self.pddl_blocks[block_ix]
original_pose = pddl_block.get_base_link_pose()
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))
rotated_block = get_rotated_block(real_block)
x = action.pos[0]
y = action.pos[1]
z = self.platform_table.get_dimensions()[2
] / 2.0 + rotated_block.dimensions[2] / 2
tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0,
0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])
tform[0:3, 0:3] = action.rot.as_matrix()
if vis_placement:
surface_tform = pb_robot.geometry.tform_from_pose(self.
platform_table.get_base_link_pose())
body_tform = surface_tform @ tform
length, lifeTime = 0.2, 0.0
pos, quat = pb_robot.geometry.pose_from_tform(body_tform)
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)
init += [('RelPose', pddl_block, self.platform_table, tform)]
goal = 'On', pddl_block, self.platform_table
print('Init:', init)
print('Goal:', goal)
self.plan_and_execute(init, goal, search_sample_ratio=1000)
self.step_simulation(T)
end_pose = self._get_observed_pose(pddl_block, action)
observation = action, T, end_pose
self.step_simulation(500 - T)
self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.
pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)
init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,
goal_pose, self.table, self.table_pose)]
goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,
self.table)
print('Init:', init)
print('Goal:', goal)
success = self.plan_and_execute(init, goal, max_time=100.0,
search_sample_ratio=1000)
return observation
class PandaClientAgent:
"""
Lightweight client to call a PandaAgent as a service for active learning
"""
def __init__(self):
import rospy
rospy.init_node('panda_client')
self.restart_services()
def restart_services(self):
import rospy
from stacking_ros.srv import PlanTower
print('Waiting for Panda Agent server...')
rospy.wait_for_service('/plan_tower')
print('Done')
self.client = rospy.ServiceProxy('/plan_tower', PlanTower)
def simulate_tower(self, tower, vis, real=False):
"""
Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
from stacking_ros.srv import PlanTowerRequest
from tamp.ros_utils import tower_to_ros, ros_to_tower
request = PlanTowerRequest()
request.tower_info = tower_to_ros(tower)
if vis:
w = World(tower)
env = Environment([w], vis_sim=True, vis_frames=True)
env.step(vis_frames=True)
for b in tower:
print('----- Block info -----')
print(b.name)
print(b.dimensions)
print(b.pose)
print(b.rotation)
response = self.client.call(request)
if vis:
env.disconnect()
return response.success, response.stable, response.num_stack_stable
<|reserved_special_token_1|>
import sys
import time
import numpy
import pb_robot
import pyquaternion
import pybullet as p
from copy import deepcopy
from actions import PlaceAction, make_platform_world
from block_utils import get_adversarial_blocks, rotation_group, ZERO_POS, \
Quaternion, get_rotated_block, Pose, add_noise, \
Environment, Position, World
from pddlstream.utils import INF
from pybullet_utils import transformation
import tamp.primitives
from tamp.misc import setup_panda_world, get_pddl_block_lookup, \
print_planning_problem, ExecuteActions, ExecutionFailure
from tamp.pddlstream_utils import get_pddlstream_info, pddlstream_plan
class PandaAgent:
def __init__(self, blocks, noise=0.00005, block_init_xy_poses=None,
use_platform=False, use_vision=False, real=False,
use_planning_server=False, use_learning_server=False,
alternate_orientations=False):
"""
Build the Panda world in PyBullet and set up the PDDLStream solver.
The Panda world should in include the given blocks as well as a
platform which can be used in experimentation.
:param use_platform: Boolean stating whether to include the platform to
push blocks off of or not.
:param use_vision: Boolean stating whether to use vision to detect blocks.
:param use_planning_server: Boolean stating whether to use the separate
ROS planning service server.
:param use_learning_server: Boolean stating whether to host a ROS service
server to drive planning from active learning script.
:param alternate_orientations: Boolean stating whether blocks can be replaced in
their home positions at alternate orientations.
If you are using the ROS action server, you must start it in a separate terminal:
rosrun stacking_ros planning_server.py
"""
self.real = real
self.use_vision = use_vision
self.use_platform = use_platform
self.use_planning_server = use_planning_server
self.use_learning_server = use_learning_server
self.alternate_orientations = alternate_orientations
# Setup PyBullet instance to run in the background and handle planning/collision checking.
self._planning_client_id = pb_robot.utils.connect(use_gui=False)
self.plan()
pb_robot.utils.set_default_camera()
self.robot = pb_robot.panda.Panda()
self.robot.arm.hand.Open()
self.belief_blocks = blocks
self.pddl_blocks, self.platform_table, self.platform_leg, self.table, self.frame, self.wall = setup_panda_world(self.robot,
blocks,
block_init_xy_poses,
use_platform=use_platform)
self.fixed = [self.platform_table, self.platform_leg, self.table, self.frame, self.wall]
self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks)
self.orig_joint_angles = self.robot.arm.GetJointValues()
self.orig_block_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
# Setup PyBullet instance that only visualizes plan execution. State needs to match the planning instance.
poses = [b.get_base_link_pose() for b in self.pddl_blocks]
poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]
self._execution_client_id = pb_robot.utils.connect(use_gui=True)
self.execute()
pb_robot.utils.set_default_camera()
self.execution_robot = pb_robot.panda.Panda()
self.execution_robot.arm.hand.Open()
setup_panda_world(self.execution_robot, blocks, poses, use_platform=use_platform)
# Set up ROS plumbing if using features that require it
if self.use_vision or self.use_planning_server or self.use_learning_server or real:
import rospy
try:
rospy.init_node("panda_agent")
except:
print("ROS Node already created")
# Create an arm interface
if real:
from franka_interface import ArmInterface
self.real_arm = ArmInterface()
from franka_core_msgs.msg import RobotState
state_topic = "/franka_ros_interface/custom_franka_state_controller/robot_state"
self.arm_last_error_time = time.time()
self.arm_error_check_time = 3.0
self.arm_state_subscriber = rospy.Subscriber(
state_topic, RobotState, self.robot_state_callback)
# Set initial poses of all blocks and setup vision ROS services.
if self.use_vision:
from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist
rospy.wait_for_service('get_block_poses_world')
rospy.wait_for_service('get_block_poses_wrist')
self._get_block_poses_world = rospy.ServiceProxy('get_block_poses_world', GetBlockPosesWorld)
self._get_block_poses_wrist = rospy.ServiceProxy('get_block_poses_wrist', GetBlockPosesWrist)
# Start ROS clients and servers as needed
self.last_obj_held = None
if self.use_planning_server:
from stacking_ros.srv import GetPlan, SetPlanningState
from tamp.ros_utils import goal_to_ros, ros_to_task_plan
print("Waiting for planning server...")
rospy.wait_for_service("get_latest_plan")
self.goal_to_ros = goal_to_ros
self.ros_to_task_plan = ros_to_task_plan
self.init_state_client = rospy.ServiceProxy(
"/reset_planning", SetPlanningState)
self.get_plan_client = rospy.ServiceProxy(
"/get_latest_plan", GetPlan)
print("Done!")
if self.use_learning_server:
from stacking_ros.srv import PlanTower
self.learning_server = rospy.Service(
"/plan_tower", PlanTower, self.learning_server_callback)
print("Learning server started!")
self.pddl_info = get_pddlstream_info(self.robot,
self.fixed,
self.pddl_blocks,
add_slanted_grasps=False,
approach_frame='global',
use_vision=self.use_vision)
self.noise = noise
self.txt_id = None
self.plan()
def _add_text(self, txt):
self.execute()
pb_robot.viz.remove_all_debug()
self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75), size=2)
self.plan()
def execute(self):
self.state = 'execute'
pb_robot.aabb.set_client(self._execution_client_id)
pb_robot.body.set_client(self._execution_client_id)
pb_robot.collisions.set_client(self._execution_client_id)
pb_robot.geometry.set_client(self._execution_client_id)
pb_robot.grasp.set_client(self._execution_client_id)
pb_robot.joint.set_client(self._execution_client_id)
pb_robot.link.set_client(self._execution_client_id)
pb_robot.panda.set_client(self._execution_client_id)
pb_robot.planning.set_client(self._execution_client_id)
pb_robot.utils.set_client(self._execution_client_id)
pb_robot.viz.set_client(self._execution_client_id)
def plan(self):
if self.use_planning_server:
return
self.state = 'plan'
pb_robot.aabb.set_client(self._planning_client_id)
pb_robot.body.set_client(self._planning_client_id)
pb_robot.collisions.set_client(self._planning_client_id)
pb_robot.geometry.set_client(self._planning_client_id)
pb_robot.grasp.set_client(self._planning_client_id)
pb_robot.joint.set_client(self._planning_client_id)
pb_robot.link.set_client(self._planning_client_id)
pb_robot.panda.set_client(self._planning_client_id)
pb_robot.planning.set_client(self._planning_client_id)
pb_robot.utils.set_client(self._planning_client_id)
pb_robot.viz.set_client(self._planning_client_id)
def reset_world(self):
""" Resets the planning world to its original configuration """
print("Resetting world")
if self.real:
angles = self.real_arm.convertToList(self.real_arm.joint_angles())
else:
angles = self.orig_joint_angles
self.plan()
self.robot.arm.SetJointValues(angles)
self.execute()
self.execution_robot.arm.SetJointValues(angles)
for bx, b in enumerate(self.pddl_blocks):
b.set_base_link_pose(self.orig_block_poses[bx])
print("Done")
def _get_initial_pddl_state(self):
"""
Get the PDDL representation of the world between experiments. This
method assumes that all blocks are on the table. We will always "clean
up" an experiment by moving blocks away from the platform after an
experiment.
"""
fixed = [self.table, self.platform_table, self.platform_leg, self.frame]
conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.GetJointValues())
print('Initial configuration:', conf.configuration)
init = [('CanMove',),
('Conf', conf),
('StartConf', conf),
('AtConf', conf),
('HandEmpty',)]
self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.get_base_link_pose())
init += [('Pose', self.table, self.table_pose),
('AtPose', self.table, self.table_pose)]
for body in self.pddl_blocks:
print(type(body), body)
pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())
init += [('Graspable', body),
('Pose', body, pose),
('AtPose', body, pose),
('Block', body),
('On', body, self.table),
('Supported', body, pose, self.table, self.table_pose)]
if not self.platform_table is None:
platform_pose = pb_robot.vobj.BodyPose(self.platform_table, self.platform_table.get_base_link_pose())
init += [('Pose', self.platform_table, platform_pose),
('AtPose', self.platform_table, platform_pose)]
init += [('Block', self.platform_table)]
init += [('Table', self.table)]
return init
def _get_observed_pose(self, pddl_block, action):
"""
This pose should be relative to the base of the platform leg to
agree with the simulation. The two block representations will have
different orientation but their positions should be the same.
"""
block_transform = pddl_block.get_base_link_transform()
platform_transform = self.platform_leg.get_base_link_transform()
platform_transform[2,3] -= self.platform_leg.get_dimensions()[2]/2.
rel_transform = numpy.linalg.inv(platform_transform)@block_transform
end_pose = pb_robot.geometry.pose_from_tform(rel_transform)
# TODO: Add noise to the observation.
end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))
end_pose = add_noise(end_pose, self.noise*numpy.eye(3))
return end_pose
def _update_block_poses(self, find_moved=False):
""" Use the global world cameras to update the positions of the blocks """
try:
resp = self._get_block_poses_world()
named_poses = resp.poses
except:
import sys
print('Service call to get block poses failed. Exiting.')
sys.exit()
n_found = 0
for pddl_block_name, pddl_block in self.pddl_block_lookup.items():
for named_pose in named_poses:
if named_pose.block_id == pddl_block_name.split('_')[-1]:
pose = named_pose.pose.pose
# Skip changes the pose of objects in storage.
if pose.position.x < 0.05:
continue
n_found += 1
position = (pose.position.x, pose.position.y, pose.position.z)
orientation = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)
self.execute()
pddl_block.set_base_link_pose((position, orientation))
if not self.use_planning_server:
self.plan()
pddl_block.set_base_link_pose((position, orientation))
if find_moved and n_found != len(self.moved_blocks):
input('Could not find all the moved blocks. Please reposition blocks outside of the camera view and hit enter to continue.')
self._update_block_poses(find_moved=True)
return
# After loading from vision, objects may be in collision. Resolve this.
for _, pddl_block in self.pddl_block_lookup.items():
if pb_robot.collisions.body_collision(pddl_block, self.table):
print('Collision with table and block:', pddl_block.readableName)
position, orientation = pddl_block.get_base_link_pose()
stable_z = pb_robot.placements.stable_z(pddl_block, self.table)
position = (position[0], position[1], stable_z)
self.execute()
pddl_block.set_base_link_pose((position, orientation))
self.plan()
pddl_block.set_base_link_pose((position, orientation))
# Resolve from low to high blocks.
current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
block_ixs = range(len(self.pddl_blocks))
block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2], reverse=False)
for ix in range(len(block_ixs)):
bottom_block = self.pddl_blocks[block_ixs[ix]]
for jx in range(ix+1, len(block_ixs)):
top_block = self.pddl_blocks[block_ixs[jx]]
dist_moved = 0
while pb_robot.collisions.body_collision(bottom_block, top_block):
print('Collision with bottom %s and top %s:' % (bottom_block.readableName, top_block.readableName))
position, orientation = top_block.get_base_link_pose()
stable_z = position[2] + 0.001
dist_moved += 0.001
if self.real and dist_moved > 0.04:
print(f"Found blocks {bottom_block} and {top_block} in collision")
input("Manually move the blocks and press Enter to continue")
self._update_block_poses(find_moved=False)
return
position = (position[0], position[1], stable_z)
self.execute()
top_block.set_base_link_pose((position, orientation))
self.plan()
top_block.set_base_link_pose((position, orientation))
def build_planning_problem(self, tower, base_xy):
""" Builds the initial conditions for planning """
# Set up the list of original poses and order of blocks in the tower
self.moved_blocks = set()
tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]
tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]
# Build the initial data structures
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
# Initial poses and robot configuration
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
else:
pddl_problems = []
# Base block goal pose
# TODO: Set base block to be rotated in its current position.
base_block = self.pddl_block_lookup[tower[0].name]
base_pos = (base_xy[0], base_xy[1], tower[0].pose.pos.z)
base_pose = (base_pos, tower[0].rotation)
base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)
if self.use_planning_server:
base_block_ros = BodyInfo()
base_block_ros.name = base_block.readableName
base_block_ros.stack = True
pose_to_ros(base_pose, base_block_ros.pose)
ros_req.goal_state.append(base_block_ros)
else:
pddl_problems.append((self.table, base_block, (base_pos, tower[0].rotation)))
# Other block goal poses
for b_ix in range(1, len(tower)):
bottom_block = tower[b_ix-1]
bottom_pose = (bottom_block.pose.pos, bottom_block.rotation)
bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)
top_block = tower[b_ix]
top_pose = (top_block.pose.pos, top_block.rotation)
top_tform = pb_robot.geometry.tform_from_pose(top_pose)
rel_tform = numpy.linalg.inv(bottom_tform)@top_tform
top_pddl = self.pddl_block_lookup[top_block.name]
bottom_pddl = self.pddl_block_lookup[bottom_block.name]
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = top_pddl.readableName
block_ros.base_obj = bottom_pddl.readableName
transform_to_ros(rel_tform, block_ros.pose)
block_ros.is_rel_pose = True
block_ros.stack = True
ros_req.goal_state.append(block_ros)
else:
init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]
goal_terms = [('On', top_pddl, bottom_pddl)]
pddl_problems.append((bottom_pddl, top_pddl, rel_tform))
# Finally, tack on the tower resetting steps
for ix in reversed(tower_block_order):
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
if self.use_planning_server:
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
# Return the planning data structure
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def build_reset_problem(self):
""" Builds the initial conditions for a tower reset given a set of moved blocks """
print("Resetting blocks...")
print("Moved Blocks:", self.moved_blocks)
# Define block order by sorting by height
current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
block_ixs = range(len(self.pddl_blocks))
block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2], reverse=True)
# Build the initial data structures
if self.use_planning_server:
from stacking_ros.msg import BodyInfo
from stacking_ros.srv import SetPlanningStateRequest
from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros
ros_req = SetPlanningStateRequest()
ros_req.init_state = block_init_to_ros(self.pddl_blocks)
if self.real:
ros_req.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())
else:
ros_req.robot_config.angles = self.robot.arm.GetJointValues()
else:
pddl_problems = []
# Add all blocks to be moved to the data structure
for ix in block_ixs:
blk, pose = self.pddl_blocks[ix], self.original_poses[ix]
if blk in self.moved_blocks:
if self.use_planning_server:
goal_pose = pb_robot.vobj.BodyPose(blk, pose)
block_ros = BodyInfo()
block_ros.name = blk.readableName
block_ros.stack = False
pose_to_ros(goal_pose, block_ros.pose)
ros_req.goal_state.append(block_ros)
else:
pddl_problems.append((self.table, blk, pose))
# Return the planning data structure
if self.use_planning_server:
return ros_req
else:
return pddl_problems
def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0., 0.5), ignore_resets=False):
"""
Simulates a tower stacking and unstacking by requesting plans from a separate planning server
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
for block in tower:
print('Block:', block.name)
print('Pose:', block.pose)
print('Dims:', block.dimensions)
print('CoM:', block.com)
print('Rotations:', block.rotation)
print('-----')
if self.use_vision:
self._update_block_poses()
self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks]
planning_prob = self.build_planning_problem(tower, base_xy)
# Execute the stacking plan
success, stack_stable, reset_stable, num_success, fatal = \
self.plan_and_execute(planning_prob, real, T, stack=True, ignore_resets=ignore_resets)
print(f"Completed tower stack with success: {success}, stable: {stack_stable}")
if reset_stable:
print(f"Completed tower reset stable: {reset_stable}")
# If we have a nonfatal failure, replan from new state, removing successful goals
while (not success and not fatal):
print(f"Got recoverable failure. Replanning from step index {num_success}.")
if self.use_planning_server:
from tamp.ros_utils import block_init_to_ros
if self.real:
planning_prob.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())
else:
planning_prob.robot_config.angles = self.robot.arm.GetJointValues()
planning_prob.init_state = block_init_to_ros(self.pddl_blocks)
if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):
planning_prob.held_block.name = self.last_obj_held.body.readableName
transform_to_ros(self.last_obj_held.grasp_objF, planning_prob.held_block.pose)
success, stack_stable, reset_stable, num_success, fatal = \
self.plan_and_execute(planning_prob, real, T, stack=True, start_idx=num_success, ignore_resets=ignore_resets)
print(f"Completed tower stack with success: {success}, stable: {stack_stable}")
if reset_stable:
print(f"Completed tower reset stable: {reset_stable}")
# Write the number of successfully stacked blocks
num_stack_success = min(len(tower), num_success)
# If the full tower did not succeed, reset the moved blocks
if not ignore_resets:
try:
if not (stack_stable and reset_stable):
if self.use_vision and not stack_stable:
self._update_block_poses(find_moved=True)
# TODO: Return arm to home position to help with vision.
planning_prob = self.build_reset_problem()
reset_fatal = False
num_reset_success = 0
while len(self.moved_blocks) > 0 and not reset_fatal:
print(f"Resetting {len(self.moved_blocks)} blocks.")
reset_success, _, reset_stable, num_reset_success, reset_fatal = \
self.plan_and_execute(planning_prob, real, T, stack=False, start_idx=num_reset_success)
except Exception as e:
print("Planning/execution failed during tower reset.")
print(e)
# Return the final planning state
return success, stack_stable, num_stack_success
def plan_and_execute(self, planning_prob, real=False, T=2500, stack=True, start_idx=0, ignore_resets=False):
"""
Requests a PDDLStream plan from a planning server and executes the resulting plan
Returns:
success : Flag for whether the plan execution succeeded
stack_stable : Flag for whether stacking a stable tower was successful
reset_stable : Flag for whether resetting a tower was successful
num_success : Progress (in number of steps) of successful tasks
fatal : Flag for whether the error was fatal (True) or recoverable (False)
start_idx : Start index of planning (for recovering from partial plans)
ignore_resets : Flag for whether to stop after resets
"""
# Initialize variables
num_success = start_idx
stack_stable = False
reset_stable = False
planning_active = True
if self.use_planning_server:
# Send a reset request to the planning server
ros_req = planning_prob
num_steps = len(ros_req.goal_state)
trimmed_ros_req = deepcopy(ros_req)
trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]
self.init_state_client.call(trimmed_ros_req)
else:
pddl_problems = planning_prob
num_steps = len(pddl_problems)
while num_success < num_steps:
try:
# PLANNING
# If using planning server, request a plan from the server using ROS
if self.use_planning_server:
query_block = self.pddl_block_lookup[ros_req.goal_state[num_success].name]
# Wait for a valid plan
plan = []
saved_world = pb_robot.utils.WorldSaver()
while len(plan) == 0 and planning_active:
time.sleep(5)
print("Getting a plan from server...")
ros_resp = self.get_plan_client.call()
if not ros_resp.planning_active:
print("Planning failed on server side.")
# If failure happened during stacking, it is a fatal failure
if (ros_req.goal_state[num_success].stack):
print(f"Failed during stacking {query_block}")
fatal = True
# If failure happened during resetting, prompt user to manually reset blocks
else:
print(f"Failed during resetting {query_block}")
input("Manually reset the blocks and press Enter to continue")
if real:
self._update_block_poses()
fatal = False
return False, stack_stable, reset_stable, num_success, fatal
if self.validate_ros_plan(ros_resp, query_block):
plan = self.ros_to_task_plan(ros_resp, self.execution_robot, self.pddl_block_lookup)
# Otherwise, plan locally
else:
base, blk, pose = pddl_problems[num_success]
query_block = blk
self._add_text('Planning block placement')
self.plan()
saved_world = pb_robot.utils.WorldSaver()
self.robot.arm.hand.Open()
# Unpack initial conditions
fixed_objs = self.fixed + [b for b in self.pddl_blocks if b != blk]
init = self._get_initial_pddl_state()
goal_terms = []
if base == self.table:
blk_pose = pb_robot.vobj.BodyPose(blk, pose)
if (not stack or num_success >= num_steps/2) and self.alternate_orientations:
init += [("Reset",)]
goal_terms.append(("AtHome", blk))
else:
init += [('Pose', blk, blk_pose),
('Supported', blk, blk_pose, self.table, self.table_pose)]
goal_terms.append(('AtPose', blk, blk_pose))
goal_terms.append(('On', blk, self.table))
else:
init += [('RelPose', blk, base, pose)]
goal_terms.append(('On', blk, base))
goal = tuple(['and'] + goal_terms)
# Plan with PDDLStream
pddl_info = get_pddlstream_info(self.robot,
fixed_objs,
self.pddl_blocks,
add_slanted_grasps=True,
approach_frame='global',
use_vision=self.use_vision,
home_pose=pose)
plan, cost = pddlstream_plan(pddl_info, init, goal,
search_sample_ratio=1.0,
max_time=INF)
if plan is None:
print("\nFailed to plan\n")
fatal = False
return False, stack_stable, reset_stable, num_success, fatal
saved_world.restore()
print("\nGot plan:")
print(plan)
# Once we have a plan, execute it
obstacles = [f for f in self.fixed if f is not None]
if not self.use_planning_server:
self.plan()
ExecuteActions(plan, real=False, pause=False, wait=False, obstacles=obstacles)
self.execute()
ExecuteActions(plan, real=real, pause=True, wait=False, prompt=False, obstacles=obstacles,
sim_fatal_failure_prob=0.0, sim_recoverable_failure_prob=0.0)
# Manage the moved blocks (add to the set when stacking, remove when unstacking)
desired_pose = query_block.get_base_link_pose()
if query_block not in self.moved_blocks:
self.moved_blocks.add(query_block)
else:
self.moved_blocks.remove(query_block)
# Check stability
if not real:
self.step_simulation(T, vis_frames=False)
#input('Press enter to check stability.')
if stack:
stable = self.check_stability(real, query_block, desired_pose)
else:
stable = True # Don't care about stability on reset
if stable == 0.:
prompt = input('Tower NOT stable. Is this true? [y: Unstable / n: Stable]')
if prompt == 'n':
stable = 1.
#input('Continue?')
# Manage the success status of the plan
if stable == 0.:
print("Unstable after execution!")
return True, stack_stable, reset_stable, num_success, False
else:
num_success += 1
if stack and num_success == num_steps/2:
print("Completed tower stack!")
stack_stable = True
stack = False
if ignore_resets:
return True, stack_stable, reset_stable, num_success, False
elif num_success == num_steps:
print("Completed tower reset!")
reset_stable = True
return True, stack_stable, reset_stable, num_success, False
except ExecutionFailure as e:
print("Planning/execution failed.")
print(e)
saved_world.restore()
if real:
self._update_block_poses()
self.robot.arm.SetJointValues(self.real_arm.convertToList(self.real_arm.joint_angles()))
self.last_obj_held = e.obj_held
return False, stack_stable, reset_stable, num_success, e.fatal
def check_stability(self, real, block_pddl, desired_pose, max_tries=2):
if self.use_vision:
# Get pose of blocks using wrist camera.
try:
poses = self._get_block_poses_wrist().poses
except:
print('Service call to get block poses failed during check stability. Exiting.')
sys.exit()
# Check if pose is close to desired_pose.
visible = False
for named_pose in poses:
if named_pose.block_id in block_pddl.readableName.split('_')[-1]:
visible = True
pose = named_pose.pose.pose
des_pos = desired_pose[0]
obs_pos = (pose.position.x, pose.position.y, pose.position.z)
print('[Check Stability] Desired Pos:', des_pos)
print('[Check Stability] Detected Pos:', obs_pos)
# First check if the pose is too far away.
dist = numpy.linalg.norm(numpy.array(obs_pos)-numpy.array(des_pos))
print(f'[Check Stability] Position Distance (>0.04): {dist}')
if dist > 0.04:
return 0.
# Also check that the block is flat on the table.
orn = desired_pose[1]
obs_orn = pyquaternion.Quaternion(pose.orientation.w, pose.orientation.x, pose.orientation.y, pose.orientation.z)
des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1], orn[2])
angle = (des_orn.inverse*obs_orn).angle
angle = numpy.abs(numpy.rad2deg(angle))
print(f'[Check Stability] Orientation Distance (> 15): {angle}')
if angle > 15:
return 0.
# If block isn't visible, return 0.
if not visible:
print('[Check Stability] Object not visible to camera.')
return 0.
else:
end_pose = block_pddl.get_base_link_point()
dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(desired_pose[0]))
# print(f"Distance is {dist}")
# print(f"Block dimensions are {block_pddl.get_dimensions()}")
if dist > 0.01:
print('Unstable!')
return 0.
return 1.
def validate_ros_plan(self, ros_resp, tgt_block):
""" Validates a ROS plan to move a block against the expected target block name """
if len(ros_resp.plan) == 0:
return True
else:
plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == "pick"]
if len(plan_blocks) > 0:
plan_block = plan_blocks[0]
else:
return False
print(f"Received plan to move {plan_block} and expected to move {tgt_block}")
return (tgt_block.readableName == plan_block)
def robot_state_callback(self, msg):
""" Processes robot state errors and raises execution failures for planning """
cur_time = time.time()
if (cur_time - self.arm_last_error_time) < self.arm_error_check_time:
return
self.arm_last_error_time = cur_time
cur_errors = msg.current_errors
# if cur_errors.cartesian_reflex:
# reason = "Cartesian reflex error detected!"
# raise ExecutionFailure(reason=reason, fatal=False)
if cur_errors.communication_constraints_violation:
reason = "Communication constraints violation detected!"
raise ExecutionFailure(reason=reason, fatal=True)
if cur_errors.joint_position_limits_violation:
reason = "Joint position limits violation detected!"
raise ExecutionFailure(reason=reason, fatal=True)
if cur_errors.joint_motion_generator_position_limits_violation:
reason = "Joint motion generator position limits violation detected!"
raise ExecutionFailure(reason=reason, fatal=True)
def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):
""" Service callback function to plan and execute a tower from active learning script """
from stacking_ros.srv import PlanTowerResponse
from tamp.ros_utils import ros_to_tower
tower = ros_to_tower(ros_req.tower_info)
success, stable, num_stack_stable = self.simulate_tower(
tower, True, real=self.real, base_xy=base_xy)
resp = PlanTowerResponse()
resp.success = success
resp.stable = stable
resp.num_stack_stable = num_stack_stable
return resp
def step_simulation(self, T, vis_frames=False, lifeTime=0.1):
p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)
p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)
q = self.robot.get_joint_positions()
for _ in range(T):
p.stepSimulation(physicsClientId=self._execution_client_id)
p.stepSimulation(physicsClientId=self._planning_client_id)
self.execute()
self.execution_robot.set_joint_positions(self.robot.joints, q)
self.plan()
self.robot.set_joint_positions(self.robot.joints, q)
time.sleep(1/2400.)
if vis_frames:
length = 0.1
for pddl_block in self.pddl_blocks:
pos, quat = pddl_block.get_pose()
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1,0,0], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)
p.addUserDebugLine(pos, new_y, [0,1,0], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)
p.addUserDebugLine(pos, new_z, [0,0,1], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)
def simulate_action(self, action, block_ix, T=50, vis_sim=False, vis_placement=False):
"""
Perform the given action to with the given block. An observation
should be returned in the reference frame of the platform.
:param action: Place action which describes the relative pose of the block to the platform surface.
:param real_block: Belief representation of the block to perform the action on.
:param T: How many timesteps to simulate the block falling for.
:param vis_sim: Ununsed.
:return: (action, T, end_pose) End pose should be TODO: what frame?
TODO: Not sure if this method works at the moment...
"""
assert(self.platform_table is not None)
real_block = self.belief_blocks[block_ix]
pddl_block = self.pddl_blocks[block_ix]
original_pose = pddl_block.get_base_link_pose()
# Set up the PDDLStream problem for the placing the given block on the
# platform with the specified action.
self.pddl_info = get_pddlstream_info(self.robot,
self.fixed,
self.pddl_blocks,
add_slanted_grasps=False,
approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
# Figure out the correct transformation matrix based on the action.
real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))
rotated_block = get_rotated_block(real_block)
x = action.pos[0]
y = action.pos[1]
z = self.platform_table.get_dimensions()[2]/2. + rotated_block.dimensions[2]/2 #+ 1e-5
tform = numpy.array([[1., 0., 0., x],
[0., 1., 0., y],
[0., 0., 1., z],
[0., 0., 0., 1.]])
tform[0:3, 0:3] = action.rot.as_matrix()
# Code to visualize where the block will be placed.
if vis_placement:
surface_tform = pb_robot.geometry.tform_from_pose(self.platform_table.get_base_link_pose())
body_tform = surface_tform@tform
length, lifeTime = 0.2, 0.0
pos, quat = pb_robot.geometry.pose_from_tform(body_tform)
new_x = transformation([length, 0.0, 0.0], pos, quat)
new_y = transformation([0.0, length, 0.0], pos, quat)
new_z = transformation([0.0, 0.0, length], pos, quat)
p.addUserDebugLine(pos, new_x, [1,0,0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_y, [0,1,0], lifeTime=lifeTime)
p.addUserDebugLine(pos, new_z, [0,0,1], lifeTime=lifeTime)
init += [('RelPose', pddl_block, self.platform_table, tform)]
goal = ('On', pddl_block, self.platform_table)
# Solve the PDDLStream problem.
print('Init:', init)
print('Goal:', goal)
self.plan_and_execute(init, goal, search_sample_ratio=1000)
# Execute the action.
# TODO: Check gravity compensation in the arm.
self.step_simulation(T)
end_pose = self._get_observed_pose(pddl_block, action)
observation = (action, T, end_pose)
self.step_simulation(500-T)
# Put block back in original position.
# TODO: Check if block is on the table or platform to start.
self.pddl_info = get_pddlstream_info(self.robot,
self.fixed,
self.pddl_blocks,
add_slanted_grasps=True,
approach_frame='gripper',
use_vision=self.use_vision)
init = self._get_initial_pddl_state()
goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)
init += [('Pose', pddl_block, goal_pose),
('Supported', pddl_block, goal_pose, self.table, self.table_pose)]
goal = ('and', ('AtPose', pddl_block, goal_pose),
('On', pddl_block, self.table))
# Solve the PDDLStream problem.
print('Init:', init)
print('Goal:', goal)
success = self.plan_and_execute(init, goal, max_time=100., search_sample_ratio=1000)
return observation
class PandaClientAgent:
"""
Lightweight client to call a PandaAgent as a service for active learning
"""
def __init__(self):
import rospy
rospy.init_node("panda_client")
self.restart_services()
def restart_services(self):
import rospy
from stacking_ros.srv import PlanTower
print("Waiting for Panda Agent server...")
rospy.wait_for_service("/plan_tower")
print("Done")
self.client = rospy.ServiceProxy(
"/plan_tower", PlanTower)
def simulate_tower(self, tower, vis, real=False):
"""
Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.
Returns:
success : Flag indicating success of execution (True/False)
stable : Flag indicating (0 or 1)
num_stack_success : Number of blocks successfully stacked
"""
from stacking_ros.srv import PlanTowerRequest
from tamp.ros_utils import tower_to_ros, ros_to_tower
request = PlanTowerRequest()
request.tower_info = tower_to_ros(tower)
if vis:
w = World(tower)
env = Environment([w], vis_sim=True, vis_frames=True)
env.step(vis_frames=True)
for b in tower:
print('----- Block info -----')
print(b.name)
print(b.dimensions)
print(b.pose)
print(b.rotation)
response = self.client.call(request)
if vis:
env.disconnect()
return response.success, response.stable, response.num_stack_stable
|
flexible
|
{
"blob_id": "5c1465bc70010ecabc156a04ec9877bbf66a229d",
"index": 5150,
"step-1": "<mask token>\n\n\nclass PandaAgent:\n\n def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,\n use_platform=False, use_vision=False, real=False,\n use_planning_server=False, use_learning_server=False,\n alternate_orientations=False):\n \"\"\"\n Build the Panda world in PyBullet and set up the PDDLStream solver.\n The Panda world should in include the given blocks as well as a\n platform which can be used in experimentation.\n :param use_platform: Boolean stating whether to include the platform to\n push blocks off of or not.\n :param use_vision: Boolean stating whether to use vision to detect blocks.\n :param use_planning_server: Boolean stating whether to use the separate\n ROS planning service server.\n :param use_learning_server: Boolean stating whether to host a ROS service\n server to drive planning from active learning script.\n :param alternate_orientations: Boolean stating whether blocks can be replaced in \n their home positions at alternate orientations.\n\n If you are using the ROS action server, you must start it in a separate terminal:\n rosrun stacking_ros planning_server.py\n \"\"\"\n self.real = real\n self.use_vision = use_vision\n self.use_platform = use_platform\n self.use_planning_server = use_planning_server\n self.use_learning_server = use_learning_server\n self.alternate_orientations = alternate_orientations\n self._planning_client_id = pb_robot.utils.connect(use_gui=False)\n self.plan()\n pb_robot.utils.set_default_camera()\n self.robot = pb_robot.panda.Panda()\n self.robot.arm.hand.Open()\n self.belief_blocks = blocks\n (self.pddl_blocks, self.platform_table, self.platform_leg, self.\n table, self.frame, self.wall) = (setup_panda_world(self.robot,\n blocks, block_init_xy_poses, use_platform=use_platform))\n self.fixed = [self.platform_table, self.platform_leg, self.table,\n self.frame, self.wall]\n self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks\n )\n self.orig_joint_angles = self.robot.arm.GetJointValues()\n self.orig_block_poses = [b.get_base_link_pose() for b in self.\n pddl_blocks]\n poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]\n self._execution_client_id = pb_robot.utils.connect(use_gui=True)\n self.execute()\n pb_robot.utils.set_default_camera()\n self.execution_robot = pb_robot.panda.Panda()\n self.execution_robot.arm.hand.Open()\n setup_panda_world(self.execution_robot, blocks, poses, use_platform\n =use_platform)\n if (self.use_vision or self.use_planning_server or self.\n use_learning_server or real):\n import rospy\n try:\n rospy.init_node('panda_agent')\n except:\n print('ROS Node already created')\n if real:\n from franka_interface import ArmInterface\n self.real_arm = ArmInterface()\n from franka_core_msgs.msg import RobotState\n state_topic = (\n '/franka_ros_interface/custom_franka_state_controller/robot_state'\n )\n self.arm_last_error_time = time.time()\n self.arm_error_check_time = 3.0\n self.arm_state_subscriber = rospy.Subscriber(state_topic,\n RobotState, self.robot_state_callback)\n if self.use_vision:\n from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist\n rospy.wait_for_service('get_block_poses_world')\n rospy.wait_for_service('get_block_poses_wrist')\n self._get_block_poses_world = rospy.ServiceProxy(\n 'get_block_poses_world', GetBlockPosesWorld)\n self._get_block_poses_wrist = rospy.ServiceProxy(\n 'get_block_poses_wrist', GetBlockPosesWrist)\n self.last_obj_held = None\n if self.use_planning_server:\n from stacking_ros.srv import GetPlan, SetPlanningState\n from tamp.ros_utils import goal_to_ros, ros_to_task_plan\n print('Waiting for planning server...')\n rospy.wait_for_service('get_latest_plan')\n self.goal_to_ros = goal_to_ros\n self.ros_to_task_plan = ros_to_task_plan\n self.init_state_client = rospy.ServiceProxy('/reset_planning',\n SetPlanningState)\n self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',\n GetPlan)\n print('Done!')\n if self.use_learning_server:\n from stacking_ros.srv import PlanTower\n self.learning_server = rospy.Service('/plan_tower', PlanTower,\n self.learning_server_callback)\n print('Learning server started!')\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='global',\n use_vision=self.use_vision)\n self.noise = noise\n self.txt_id = None\n self.plan()\n\n def _add_text(self, txt):\n self.execute()\n pb_robot.viz.remove_all_debug()\n self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),\n size=2)\n self.plan()\n\n def execute(self):\n self.state = 'execute'\n pb_robot.aabb.set_client(self._execution_client_id)\n pb_robot.body.set_client(self._execution_client_id)\n pb_robot.collisions.set_client(self._execution_client_id)\n pb_robot.geometry.set_client(self._execution_client_id)\n pb_robot.grasp.set_client(self._execution_client_id)\n pb_robot.joint.set_client(self._execution_client_id)\n pb_robot.link.set_client(self._execution_client_id)\n pb_robot.panda.set_client(self._execution_client_id)\n pb_robot.planning.set_client(self._execution_client_id)\n pb_robot.utils.set_client(self._execution_client_id)\n pb_robot.viz.set_client(self._execution_client_id)\n\n def plan(self):\n if self.use_planning_server:\n return\n self.state = 'plan'\n pb_robot.aabb.set_client(self._planning_client_id)\n pb_robot.body.set_client(self._planning_client_id)\n pb_robot.collisions.set_client(self._planning_client_id)\n pb_robot.geometry.set_client(self._planning_client_id)\n pb_robot.grasp.set_client(self._planning_client_id)\n pb_robot.joint.set_client(self._planning_client_id)\n pb_robot.link.set_client(self._planning_client_id)\n pb_robot.panda.set_client(self._planning_client_id)\n pb_robot.planning.set_client(self._planning_client_id)\n pb_robot.utils.set_client(self._planning_client_id)\n pb_robot.viz.set_client(self._planning_client_id)\n <mask token>\n\n def _get_initial_pddl_state(self):\n \"\"\"\n Get the PDDL representation of the world between experiments. This\n method assumes that all blocks are on the table. We will always \"clean\n up\" an experiment by moving blocks away from the platform after an\n experiment.\n \"\"\"\n fixed = [self.table, self.platform_table, self.platform_leg, self.frame\n ]\n conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.\n GetJointValues())\n print('Initial configuration:', conf.configuration)\n init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (\n 'AtConf', conf), ('HandEmpty',)]\n self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.\n get_base_link_pose())\n init += [('Pose', self.table, self.table_pose), ('AtPose', self.\n table, self.table_pose)]\n for body in self.pddl_blocks:\n print(type(body), body)\n pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())\n init += [('Graspable', body), ('Pose', body, pose), ('AtPose',\n body, pose), ('Block', body), ('On', body, self.table), (\n 'Supported', body, pose, self.table, self.table_pose)]\n if not self.platform_table is None:\n platform_pose = pb_robot.vobj.BodyPose(self.platform_table,\n self.platform_table.get_base_link_pose())\n init += [('Pose', self.platform_table, platform_pose), (\n 'AtPose', self.platform_table, platform_pose)]\n init += [('Block', self.platform_table)]\n init += [('Table', self.table)]\n return init\n\n def _get_observed_pose(self, pddl_block, action):\n \"\"\"\n This pose should be relative to the base of the platform leg to\n agree with the simulation. The two block representations will have\n different orientation but their positions should be the same.\n \"\"\"\n block_transform = pddl_block.get_base_link_transform()\n platform_transform = self.platform_leg.get_base_link_transform()\n platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0\n rel_transform = numpy.linalg.inv(platform_transform) @ block_transform\n end_pose = pb_robot.geometry.pose_from_tform(rel_transform)\n end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))\n end_pose = add_noise(end_pose, self.noise * numpy.eye(3))\n return end_pose\n <mask token>\n\n def build_planning_problem(self, tower, base_xy):\n \"\"\" Builds the initial conditions for planning \"\"\"\n self.moved_blocks = set()\n tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]\n tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n else:\n pddl_problems = []\n base_block = self.pddl_block_lookup[tower[0].name]\n base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z\n base_pose = base_pos, tower[0].rotation\n base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)\n if self.use_planning_server:\n base_block_ros = BodyInfo()\n base_block_ros.name = base_block.readableName\n base_block_ros.stack = True\n pose_to_ros(base_pose, base_block_ros.pose)\n ros_req.goal_state.append(base_block_ros)\n else:\n pddl_problems.append((self.table, base_block, (base_pos, tower[\n 0].rotation)))\n for b_ix in range(1, len(tower)):\n bottom_block = tower[b_ix - 1]\n bottom_pose = bottom_block.pose.pos, bottom_block.rotation\n bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)\n top_block = tower[b_ix]\n top_pose = top_block.pose.pos, top_block.rotation\n top_tform = pb_robot.geometry.tform_from_pose(top_pose)\n rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform\n top_pddl = self.pddl_block_lookup[top_block.name]\n bottom_pddl = self.pddl_block_lookup[bottom_block.name]\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = top_pddl.readableName\n block_ros.base_obj = bottom_pddl.readableName\n transform_to_ros(rel_tform, block_ros.pose)\n block_ros.is_rel_pose = True\n block_ros.stack = True\n ros_req.goal_state.append(block_ros)\n else:\n init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]\n goal_terms = [('On', top_pddl, bottom_pddl)]\n pddl_problems.append((bottom_pddl, top_pddl, rel_tform))\n for ix in reversed(tower_block_order):\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def build_reset_problem(self):\n \"\"\" Builds the initial conditions for a tower reset given a set of moved blocks \"\"\"\n print('Resetting blocks...')\n print('Moved Blocks:', self.moved_blocks)\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=True)\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0, \n 0.5), ignore_resets=False):\n \"\"\"\n Simulates a tower stacking and unstacking by requesting plans from a separate planning server\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n for block in tower:\n print('Block:', block.name)\n print('Pose:', block.pose)\n print('Dims:', block.dimensions)\n print('CoM:', block.com)\n print('Rotations:', block.rotation)\n print('-----')\n if self.use_vision:\n self._update_block_poses()\n self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks\n ]\n planning_prob = self.build_planning_problem(tower, base_xy)\n success, stack_stable, reset_stable, num_success, fatal = (self.\n plan_and_execute(planning_prob, real, T, stack=True,\n ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n while not success and not fatal:\n print(\n f'Got recoverable failure. Replanning from step index {num_success}.'\n )\n if self.use_planning_server:\n from tamp.ros_utils import block_init_to_ros\n if self.real:\n planning_prob.robot_config.angles = (self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n else:\n planning_prob.robot_config.angles = (self.robot.arm.\n GetJointValues())\n planning_prob.init_state = block_init_to_ros(self.pddl_blocks)\n if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):\n planning_prob.held_block.name = (self.last_obj_held.\n body.readableName)\n transform_to_ros(self.last_obj_held.grasp_objF,\n planning_prob.held_block.pose)\n success, stack_stable, reset_stable, num_success, fatal = (self\n .plan_and_execute(planning_prob, real, T, stack=True,\n start_idx=num_success, ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n num_stack_success = min(len(tower), num_success)\n if not ignore_resets:\n try:\n if not (stack_stable and reset_stable):\n if self.use_vision and not stack_stable:\n self._update_block_poses(find_moved=True)\n planning_prob = self.build_reset_problem()\n reset_fatal = False\n num_reset_success = 0\n while len(self.moved_blocks) > 0 and not reset_fatal:\n print(f'Resetting {len(self.moved_blocks)} blocks.')\n (reset_success, _, reset_stable, num_reset_success,\n reset_fatal) = (self.plan_and_execute(\n planning_prob, real, T, stack=False, start_idx=\n num_reset_success))\n except Exception as e:\n print('Planning/execution failed during tower reset.')\n print(e)\n return success, stack_stable, num_stack_success\n\n def plan_and_execute(self, planning_prob, real=False, T=2500, stack=\n True, start_idx=0, ignore_resets=False):\n \"\"\"\n Requests a PDDLStream plan from a planning server and executes the resulting plan\n Returns:\n success : Flag for whether the plan execution succeeded\n stack_stable : Flag for whether stacking a stable tower was successful\n reset_stable : Flag for whether resetting a tower was successful\n num_success : Progress (in number of steps) of successful tasks\n fatal : Flag for whether the error was fatal (True) or recoverable (False)\n start_idx : Start index of planning (for recovering from partial plans)\n ignore_resets : Flag for whether to stop after resets\n \"\"\"\n num_success = start_idx\n stack_stable = False\n reset_stable = False\n planning_active = True\n if self.use_planning_server:\n ros_req = planning_prob\n num_steps = len(ros_req.goal_state)\n trimmed_ros_req = deepcopy(ros_req)\n trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]\n self.init_state_client.call(trimmed_ros_req)\n else:\n pddl_problems = planning_prob\n num_steps = len(pddl_problems)\n while num_success < num_steps:\n try:\n if self.use_planning_server:\n query_block = self.pddl_block_lookup[ros_req.goal_state\n [num_success].name]\n plan = []\n saved_world = pb_robot.utils.WorldSaver()\n while len(plan) == 0 and planning_active:\n time.sleep(5)\n print('Getting a plan from server...')\n ros_resp = self.get_plan_client.call()\n if not ros_resp.planning_active:\n print('Planning failed on server side.')\n if ros_req.goal_state[num_success].stack:\n print(f'Failed during stacking {query_block}')\n fatal = True\n else:\n print(f'Failed during resetting {query_block}')\n input(\n 'Manually reset the blocks and press Enter to continue'\n )\n if real:\n self._update_block_poses()\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n if self.validate_ros_plan(ros_resp, query_block):\n plan = self.ros_to_task_plan(ros_resp, self.\n execution_robot, self.pddl_block_lookup)\n else:\n base, blk, pose = pddl_problems[num_success]\n query_block = blk\n self._add_text('Planning block placement')\n self.plan()\n saved_world = pb_robot.utils.WorldSaver()\n self.robot.arm.hand.Open()\n fixed_objs = self.fixed + [b for b in self.pddl_blocks if\n b != blk]\n init = self._get_initial_pddl_state()\n goal_terms = []\n if base == self.table:\n blk_pose = pb_robot.vobj.BodyPose(blk, pose)\n if (not stack or num_success >= num_steps / 2\n ) and self.alternate_orientations:\n init += [('Reset',)]\n goal_terms.append(('AtHome', blk))\n else:\n init += [('Pose', blk, blk_pose), ('Supported',\n blk, blk_pose, self.table, self.table_pose)]\n goal_terms.append(('AtPose', blk, blk_pose))\n goal_terms.append(('On', blk, self.table))\n else:\n init += [('RelPose', blk, base, pose)]\n goal_terms.append(('On', blk, base))\n goal = tuple(['and'] + goal_terms)\n pddl_info = get_pddlstream_info(self.robot, fixed_objs,\n self.pddl_blocks, add_slanted_grasps=True,\n approach_frame='global', use_vision=self.use_vision,\n home_pose=pose)\n plan, cost = pddlstream_plan(pddl_info, init, goal,\n search_sample_ratio=1.0, max_time=INF)\n if plan is None:\n print('\\nFailed to plan\\n')\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n saved_world.restore()\n print('\\nGot plan:')\n print(plan)\n obstacles = [f for f in self.fixed if f is not None]\n if not self.use_planning_server:\n self.plan()\n ExecuteActions(plan, real=False, pause=False, wait=\n False, obstacles=obstacles)\n self.execute()\n ExecuteActions(plan, real=real, pause=True, wait=False,\n prompt=False, obstacles=obstacles,\n sim_fatal_failure_prob=0.0,\n sim_recoverable_failure_prob=0.0)\n desired_pose = query_block.get_base_link_pose()\n if query_block not in self.moved_blocks:\n self.moved_blocks.add(query_block)\n else:\n self.moved_blocks.remove(query_block)\n if not real:\n self.step_simulation(T, vis_frames=False)\n if stack:\n stable = self.check_stability(real, query_block,\n desired_pose)\n else:\n stable = True\n if stable == 0.0:\n prompt = input(\n 'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'\n )\n if prompt == 'n':\n stable = 1.0\n if stable == 0.0:\n print('Unstable after execution!')\n return True, stack_stable, reset_stable, num_success, False\n else:\n num_success += 1\n if stack and num_success == num_steps / 2:\n print('Completed tower stack!')\n stack_stable = True\n stack = False\n if ignore_resets:\n return (True, stack_stable, reset_stable,\n num_success, False)\n elif num_success == num_steps:\n print('Completed tower reset!')\n reset_stable = True\n return (True, stack_stable, reset_stable,\n num_success, False)\n except ExecutionFailure as e:\n print('Planning/execution failed.')\n print(e)\n saved_world.restore()\n if real:\n self._update_block_poses()\n self.robot.arm.SetJointValues(self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n self.last_obj_held = e.obj_held\n return False, stack_stable, reset_stable, num_success, e.fatal\n\n def check_stability(self, real, block_pddl, desired_pose, max_tries=2):\n if self.use_vision:\n try:\n poses = self._get_block_poses_wrist().poses\n except:\n print(\n 'Service call to get block poses failed during check stability. Exiting.'\n )\n sys.exit()\n visible = False\n for named_pose in poses:\n if named_pose.block_id in block_pddl.readableName.split('_')[-1\n ]:\n visible = True\n pose = named_pose.pose.pose\n des_pos = desired_pose[0]\n obs_pos = pose.position.x, pose.position.y, pose.position.z\n print('[Check Stability] Desired Pos:', des_pos)\n print('[Check Stability] Detected Pos:', obs_pos)\n dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.\n array(des_pos))\n print(\n f'[Check Stability] Position Distance (>0.04): {dist}')\n if dist > 0.04:\n return 0.0\n orn = desired_pose[1]\n obs_orn = pyquaternion.Quaternion(pose.orientation.w,\n pose.orientation.x, pose.orientation.y, pose.\n orientation.z)\n des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1\n ], orn[2])\n angle = (des_orn.inverse * obs_orn).angle\n angle = numpy.abs(numpy.rad2deg(angle))\n print(\n f'[Check Stability] Orientation Distance (> 15): {angle}'\n )\n if angle > 15:\n return 0.0\n if not visible:\n print('[Check Stability] Object not visible to camera.')\n return 0.0\n else:\n end_pose = block_pddl.get_base_link_point()\n dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(\n desired_pose[0]))\n if dist > 0.01:\n print('Unstable!')\n return 0.0\n return 1.0\n\n def validate_ros_plan(self, ros_resp, tgt_block):\n \"\"\" Validates a ROS plan to move a block against the expected target block name \"\"\"\n if len(ros_resp.plan) == 0:\n return True\n else:\n plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']\n if len(plan_blocks) > 0:\n plan_block = plan_blocks[0]\n else:\n return False\n print(\n f'Received plan to move {plan_block} and expected to move {tgt_block}'\n )\n return tgt_block.readableName == plan_block\n <mask token>\n <mask token>\n\n def step_simulation(self, T, vis_frames=False, lifeTime=0.1):\n p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)\n p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)\n q = self.robot.get_joint_positions()\n for _ in range(T):\n p.stepSimulation(physicsClientId=self._execution_client_id)\n p.stepSimulation(physicsClientId=self._planning_client_id)\n self.execute()\n self.execution_robot.set_joint_positions(self.robot.joints, q)\n self.plan()\n self.robot.set_joint_positions(self.robot.joints, q)\n time.sleep(1 / 2400.0)\n if vis_frames:\n length = 0.1\n for pddl_block in self.pddl_blocks:\n pos, quat = pddl_block.get_pose()\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n\n def simulate_action(self, action, block_ix, T=50, vis_sim=False,\n vis_placement=False):\n \"\"\"\n Perform the given action to with the given block. An observation\n should be returned in the reference frame of the platform.\n :param action: Place action which describes the relative pose of the block to the platform surface.\n :param real_block: Belief representation of the block to perform the action on.\n :param T: How many timesteps to simulate the block falling for.\n :param vis_sim: Ununsed.\n :return: (action, T, end_pose) End pose should be TODO: what frame?\n \n TODO: Not sure if this method works at the moment...\n \"\"\"\n assert self.platform_table is not None\n real_block = self.belief_blocks[block_ix]\n pddl_block = self.pddl_blocks[block_ix]\n original_pose = pddl_block.get_base_link_pose()\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))\n rotated_block = get_rotated_block(real_block)\n x = action.pos[0]\n y = action.pos[1]\n z = self.platform_table.get_dimensions()[2\n ] / 2.0 + rotated_block.dimensions[2] / 2\n tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0, \n 0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])\n tform[0:3, 0:3] = action.rot.as_matrix()\n if vis_placement:\n surface_tform = pb_robot.geometry.tform_from_pose(self.\n platform_table.get_base_link_pose())\n body_tform = surface_tform @ tform\n length, lifeTime = 0.2, 0.0\n pos, quat = pb_robot.geometry.pose_from_tform(body_tform)\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)\n init += [('RelPose', pddl_block, self.platform_table, tform)]\n goal = 'On', pddl_block, self.platform_table\n print('Init:', init)\n print('Goal:', goal)\n self.plan_and_execute(init, goal, search_sample_ratio=1000)\n self.step_simulation(T)\n end_pose = self._get_observed_pose(pddl_block, action)\n observation = action, T, end_pose\n self.step_simulation(500 - T)\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)\n init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,\n goal_pose, self.table, self.table_pose)]\n goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,\n self.table)\n print('Init:', init)\n print('Goal:', goal)\n success = self.plan_and_execute(init, goal, max_time=100.0,\n search_sample_ratio=1000)\n return observation\n\n\nclass PandaClientAgent:\n \"\"\"\n Lightweight client to call a PandaAgent as a service for active learning\n \"\"\"\n\n def __init__(self):\n import rospy\n rospy.init_node('panda_client')\n self.restart_services()\n\n def restart_services(self):\n import rospy\n from stacking_ros.srv import PlanTower\n print('Waiting for Panda Agent server...')\n rospy.wait_for_service('/plan_tower')\n print('Done')\n self.client = rospy.ServiceProxy('/plan_tower', PlanTower)\n\n def simulate_tower(self, tower, vis, real=False):\n \"\"\" \n Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n from stacking_ros.srv import PlanTowerRequest\n from tamp.ros_utils import tower_to_ros, ros_to_tower\n request = PlanTowerRequest()\n request.tower_info = tower_to_ros(tower)\n if vis:\n w = World(tower)\n env = Environment([w], vis_sim=True, vis_frames=True)\n env.step(vis_frames=True)\n for b in tower:\n print('----- Block info -----')\n print(b.name)\n print(b.dimensions)\n print(b.pose)\n print(b.rotation)\n response = self.client.call(request)\n if vis:\n env.disconnect()\n return response.success, response.stable, response.num_stack_stable\n",
"step-2": "<mask token>\n\n\nclass PandaAgent:\n\n def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,\n use_platform=False, use_vision=False, real=False,\n use_planning_server=False, use_learning_server=False,\n alternate_orientations=False):\n \"\"\"\n Build the Panda world in PyBullet and set up the PDDLStream solver.\n The Panda world should in include the given blocks as well as a\n platform which can be used in experimentation.\n :param use_platform: Boolean stating whether to include the platform to\n push blocks off of or not.\n :param use_vision: Boolean stating whether to use vision to detect blocks.\n :param use_planning_server: Boolean stating whether to use the separate\n ROS planning service server.\n :param use_learning_server: Boolean stating whether to host a ROS service\n server to drive planning from active learning script.\n :param alternate_orientations: Boolean stating whether blocks can be replaced in \n their home positions at alternate orientations.\n\n If you are using the ROS action server, you must start it in a separate terminal:\n rosrun stacking_ros planning_server.py\n \"\"\"\n self.real = real\n self.use_vision = use_vision\n self.use_platform = use_platform\n self.use_planning_server = use_planning_server\n self.use_learning_server = use_learning_server\n self.alternate_orientations = alternate_orientations\n self._planning_client_id = pb_robot.utils.connect(use_gui=False)\n self.plan()\n pb_robot.utils.set_default_camera()\n self.robot = pb_robot.panda.Panda()\n self.robot.arm.hand.Open()\n self.belief_blocks = blocks\n (self.pddl_blocks, self.platform_table, self.platform_leg, self.\n table, self.frame, self.wall) = (setup_panda_world(self.robot,\n blocks, block_init_xy_poses, use_platform=use_platform))\n self.fixed = [self.platform_table, self.platform_leg, self.table,\n self.frame, self.wall]\n self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks\n )\n self.orig_joint_angles = self.robot.arm.GetJointValues()\n self.orig_block_poses = [b.get_base_link_pose() for b in self.\n pddl_blocks]\n poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]\n self._execution_client_id = pb_robot.utils.connect(use_gui=True)\n self.execute()\n pb_robot.utils.set_default_camera()\n self.execution_robot = pb_robot.panda.Panda()\n self.execution_robot.arm.hand.Open()\n setup_panda_world(self.execution_robot, blocks, poses, use_platform\n =use_platform)\n if (self.use_vision or self.use_planning_server or self.\n use_learning_server or real):\n import rospy\n try:\n rospy.init_node('panda_agent')\n except:\n print('ROS Node already created')\n if real:\n from franka_interface import ArmInterface\n self.real_arm = ArmInterface()\n from franka_core_msgs.msg import RobotState\n state_topic = (\n '/franka_ros_interface/custom_franka_state_controller/robot_state'\n )\n self.arm_last_error_time = time.time()\n self.arm_error_check_time = 3.0\n self.arm_state_subscriber = rospy.Subscriber(state_topic,\n RobotState, self.robot_state_callback)\n if self.use_vision:\n from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist\n rospy.wait_for_service('get_block_poses_world')\n rospy.wait_for_service('get_block_poses_wrist')\n self._get_block_poses_world = rospy.ServiceProxy(\n 'get_block_poses_world', GetBlockPosesWorld)\n self._get_block_poses_wrist = rospy.ServiceProxy(\n 'get_block_poses_wrist', GetBlockPosesWrist)\n self.last_obj_held = None\n if self.use_planning_server:\n from stacking_ros.srv import GetPlan, SetPlanningState\n from tamp.ros_utils import goal_to_ros, ros_to_task_plan\n print('Waiting for planning server...')\n rospy.wait_for_service('get_latest_plan')\n self.goal_to_ros = goal_to_ros\n self.ros_to_task_plan = ros_to_task_plan\n self.init_state_client = rospy.ServiceProxy('/reset_planning',\n SetPlanningState)\n self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',\n GetPlan)\n print('Done!')\n if self.use_learning_server:\n from stacking_ros.srv import PlanTower\n self.learning_server = rospy.Service('/plan_tower', PlanTower,\n self.learning_server_callback)\n print('Learning server started!')\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='global',\n use_vision=self.use_vision)\n self.noise = noise\n self.txt_id = None\n self.plan()\n\n def _add_text(self, txt):\n self.execute()\n pb_robot.viz.remove_all_debug()\n self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),\n size=2)\n self.plan()\n\n def execute(self):\n self.state = 'execute'\n pb_robot.aabb.set_client(self._execution_client_id)\n pb_robot.body.set_client(self._execution_client_id)\n pb_robot.collisions.set_client(self._execution_client_id)\n pb_robot.geometry.set_client(self._execution_client_id)\n pb_robot.grasp.set_client(self._execution_client_id)\n pb_robot.joint.set_client(self._execution_client_id)\n pb_robot.link.set_client(self._execution_client_id)\n pb_robot.panda.set_client(self._execution_client_id)\n pb_robot.planning.set_client(self._execution_client_id)\n pb_robot.utils.set_client(self._execution_client_id)\n pb_robot.viz.set_client(self._execution_client_id)\n\n def plan(self):\n if self.use_planning_server:\n return\n self.state = 'plan'\n pb_robot.aabb.set_client(self._planning_client_id)\n pb_robot.body.set_client(self._planning_client_id)\n pb_robot.collisions.set_client(self._planning_client_id)\n pb_robot.geometry.set_client(self._planning_client_id)\n pb_robot.grasp.set_client(self._planning_client_id)\n pb_robot.joint.set_client(self._planning_client_id)\n pb_robot.link.set_client(self._planning_client_id)\n pb_robot.panda.set_client(self._planning_client_id)\n pb_robot.planning.set_client(self._planning_client_id)\n pb_robot.utils.set_client(self._planning_client_id)\n pb_robot.viz.set_client(self._planning_client_id)\n\n def reset_world(self):\n \"\"\" Resets the planning world to its original configuration \"\"\"\n print('Resetting world')\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print('Done')\n\n def _get_initial_pddl_state(self):\n \"\"\"\n Get the PDDL representation of the world between experiments. This\n method assumes that all blocks are on the table. We will always \"clean\n up\" an experiment by moving blocks away from the platform after an\n experiment.\n \"\"\"\n fixed = [self.table, self.platform_table, self.platform_leg, self.frame\n ]\n conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.\n GetJointValues())\n print('Initial configuration:', conf.configuration)\n init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (\n 'AtConf', conf), ('HandEmpty',)]\n self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.\n get_base_link_pose())\n init += [('Pose', self.table, self.table_pose), ('AtPose', self.\n table, self.table_pose)]\n for body in self.pddl_blocks:\n print(type(body), body)\n pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())\n init += [('Graspable', body), ('Pose', body, pose), ('AtPose',\n body, pose), ('Block', body), ('On', body, self.table), (\n 'Supported', body, pose, self.table, self.table_pose)]\n if not self.platform_table is None:\n platform_pose = pb_robot.vobj.BodyPose(self.platform_table,\n self.platform_table.get_base_link_pose())\n init += [('Pose', self.platform_table, platform_pose), (\n 'AtPose', self.platform_table, platform_pose)]\n init += [('Block', self.platform_table)]\n init += [('Table', self.table)]\n return init\n\n def _get_observed_pose(self, pddl_block, action):\n \"\"\"\n This pose should be relative to the base of the platform leg to\n agree with the simulation. The two block representations will have\n different orientation but their positions should be the same.\n \"\"\"\n block_transform = pddl_block.get_base_link_transform()\n platform_transform = self.platform_leg.get_base_link_transform()\n platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0\n rel_transform = numpy.linalg.inv(platform_transform) @ block_transform\n end_pose = pb_robot.geometry.pose_from_tform(rel_transform)\n end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))\n end_pose = add_noise(end_pose, self.noise * numpy.eye(3))\n return end_pose\n <mask token>\n\n def build_planning_problem(self, tower, base_xy):\n \"\"\" Builds the initial conditions for planning \"\"\"\n self.moved_blocks = set()\n tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]\n tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n else:\n pddl_problems = []\n base_block = self.pddl_block_lookup[tower[0].name]\n base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z\n base_pose = base_pos, tower[0].rotation\n base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)\n if self.use_planning_server:\n base_block_ros = BodyInfo()\n base_block_ros.name = base_block.readableName\n base_block_ros.stack = True\n pose_to_ros(base_pose, base_block_ros.pose)\n ros_req.goal_state.append(base_block_ros)\n else:\n pddl_problems.append((self.table, base_block, (base_pos, tower[\n 0].rotation)))\n for b_ix in range(1, len(tower)):\n bottom_block = tower[b_ix - 1]\n bottom_pose = bottom_block.pose.pos, bottom_block.rotation\n bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)\n top_block = tower[b_ix]\n top_pose = top_block.pose.pos, top_block.rotation\n top_tform = pb_robot.geometry.tform_from_pose(top_pose)\n rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform\n top_pddl = self.pddl_block_lookup[top_block.name]\n bottom_pddl = self.pddl_block_lookup[bottom_block.name]\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = top_pddl.readableName\n block_ros.base_obj = bottom_pddl.readableName\n transform_to_ros(rel_tform, block_ros.pose)\n block_ros.is_rel_pose = True\n block_ros.stack = True\n ros_req.goal_state.append(block_ros)\n else:\n init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]\n goal_terms = [('On', top_pddl, bottom_pddl)]\n pddl_problems.append((bottom_pddl, top_pddl, rel_tform))\n for ix in reversed(tower_block_order):\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def build_reset_problem(self):\n \"\"\" Builds the initial conditions for a tower reset given a set of moved blocks \"\"\"\n print('Resetting blocks...')\n print('Moved Blocks:', self.moved_blocks)\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=True)\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0, \n 0.5), ignore_resets=False):\n \"\"\"\n Simulates a tower stacking and unstacking by requesting plans from a separate planning server\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n for block in tower:\n print('Block:', block.name)\n print('Pose:', block.pose)\n print('Dims:', block.dimensions)\n print('CoM:', block.com)\n print('Rotations:', block.rotation)\n print('-----')\n if self.use_vision:\n self._update_block_poses()\n self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks\n ]\n planning_prob = self.build_planning_problem(tower, base_xy)\n success, stack_stable, reset_stable, num_success, fatal = (self.\n plan_and_execute(planning_prob, real, T, stack=True,\n ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n while not success and not fatal:\n print(\n f'Got recoverable failure. Replanning from step index {num_success}.'\n )\n if self.use_planning_server:\n from tamp.ros_utils import block_init_to_ros\n if self.real:\n planning_prob.robot_config.angles = (self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n else:\n planning_prob.robot_config.angles = (self.robot.arm.\n GetJointValues())\n planning_prob.init_state = block_init_to_ros(self.pddl_blocks)\n if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):\n planning_prob.held_block.name = (self.last_obj_held.\n body.readableName)\n transform_to_ros(self.last_obj_held.grasp_objF,\n planning_prob.held_block.pose)\n success, stack_stable, reset_stable, num_success, fatal = (self\n .plan_and_execute(planning_prob, real, T, stack=True,\n start_idx=num_success, ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n num_stack_success = min(len(tower), num_success)\n if not ignore_resets:\n try:\n if not (stack_stable and reset_stable):\n if self.use_vision and not stack_stable:\n self._update_block_poses(find_moved=True)\n planning_prob = self.build_reset_problem()\n reset_fatal = False\n num_reset_success = 0\n while len(self.moved_blocks) > 0 and not reset_fatal:\n print(f'Resetting {len(self.moved_blocks)} blocks.')\n (reset_success, _, reset_stable, num_reset_success,\n reset_fatal) = (self.plan_and_execute(\n planning_prob, real, T, stack=False, start_idx=\n num_reset_success))\n except Exception as e:\n print('Planning/execution failed during tower reset.')\n print(e)\n return success, stack_stable, num_stack_success\n\n def plan_and_execute(self, planning_prob, real=False, T=2500, stack=\n True, start_idx=0, ignore_resets=False):\n \"\"\"\n Requests a PDDLStream plan from a planning server and executes the resulting plan\n Returns:\n success : Flag for whether the plan execution succeeded\n stack_stable : Flag for whether stacking a stable tower was successful\n reset_stable : Flag for whether resetting a tower was successful\n num_success : Progress (in number of steps) of successful tasks\n fatal : Flag for whether the error was fatal (True) or recoverable (False)\n start_idx : Start index of planning (for recovering from partial plans)\n ignore_resets : Flag for whether to stop after resets\n \"\"\"\n num_success = start_idx\n stack_stable = False\n reset_stable = False\n planning_active = True\n if self.use_planning_server:\n ros_req = planning_prob\n num_steps = len(ros_req.goal_state)\n trimmed_ros_req = deepcopy(ros_req)\n trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]\n self.init_state_client.call(trimmed_ros_req)\n else:\n pddl_problems = planning_prob\n num_steps = len(pddl_problems)\n while num_success < num_steps:\n try:\n if self.use_planning_server:\n query_block = self.pddl_block_lookup[ros_req.goal_state\n [num_success].name]\n plan = []\n saved_world = pb_robot.utils.WorldSaver()\n while len(plan) == 0 and planning_active:\n time.sleep(5)\n print('Getting a plan from server...')\n ros_resp = self.get_plan_client.call()\n if not ros_resp.planning_active:\n print('Planning failed on server side.')\n if ros_req.goal_state[num_success].stack:\n print(f'Failed during stacking {query_block}')\n fatal = True\n else:\n print(f'Failed during resetting {query_block}')\n input(\n 'Manually reset the blocks and press Enter to continue'\n )\n if real:\n self._update_block_poses()\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n if self.validate_ros_plan(ros_resp, query_block):\n plan = self.ros_to_task_plan(ros_resp, self.\n execution_robot, self.pddl_block_lookup)\n else:\n base, blk, pose = pddl_problems[num_success]\n query_block = blk\n self._add_text('Planning block placement')\n self.plan()\n saved_world = pb_robot.utils.WorldSaver()\n self.robot.arm.hand.Open()\n fixed_objs = self.fixed + [b for b in self.pddl_blocks if\n b != blk]\n init = self._get_initial_pddl_state()\n goal_terms = []\n if base == self.table:\n blk_pose = pb_robot.vobj.BodyPose(blk, pose)\n if (not stack or num_success >= num_steps / 2\n ) and self.alternate_orientations:\n init += [('Reset',)]\n goal_terms.append(('AtHome', blk))\n else:\n init += [('Pose', blk, blk_pose), ('Supported',\n blk, blk_pose, self.table, self.table_pose)]\n goal_terms.append(('AtPose', blk, blk_pose))\n goal_terms.append(('On', blk, self.table))\n else:\n init += [('RelPose', blk, base, pose)]\n goal_terms.append(('On', blk, base))\n goal = tuple(['and'] + goal_terms)\n pddl_info = get_pddlstream_info(self.robot, fixed_objs,\n self.pddl_blocks, add_slanted_grasps=True,\n approach_frame='global', use_vision=self.use_vision,\n home_pose=pose)\n plan, cost = pddlstream_plan(pddl_info, init, goal,\n search_sample_ratio=1.0, max_time=INF)\n if plan is None:\n print('\\nFailed to plan\\n')\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n saved_world.restore()\n print('\\nGot plan:')\n print(plan)\n obstacles = [f for f in self.fixed if f is not None]\n if not self.use_planning_server:\n self.plan()\n ExecuteActions(plan, real=False, pause=False, wait=\n False, obstacles=obstacles)\n self.execute()\n ExecuteActions(plan, real=real, pause=True, wait=False,\n prompt=False, obstacles=obstacles,\n sim_fatal_failure_prob=0.0,\n sim_recoverable_failure_prob=0.0)\n desired_pose = query_block.get_base_link_pose()\n if query_block not in self.moved_blocks:\n self.moved_blocks.add(query_block)\n else:\n self.moved_blocks.remove(query_block)\n if not real:\n self.step_simulation(T, vis_frames=False)\n if stack:\n stable = self.check_stability(real, query_block,\n desired_pose)\n else:\n stable = True\n if stable == 0.0:\n prompt = input(\n 'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'\n )\n if prompt == 'n':\n stable = 1.0\n if stable == 0.0:\n print('Unstable after execution!')\n return True, stack_stable, reset_stable, num_success, False\n else:\n num_success += 1\n if stack and num_success == num_steps / 2:\n print('Completed tower stack!')\n stack_stable = True\n stack = False\n if ignore_resets:\n return (True, stack_stable, reset_stable,\n num_success, False)\n elif num_success == num_steps:\n print('Completed tower reset!')\n reset_stable = True\n return (True, stack_stable, reset_stable,\n num_success, False)\n except ExecutionFailure as e:\n print('Planning/execution failed.')\n print(e)\n saved_world.restore()\n if real:\n self._update_block_poses()\n self.robot.arm.SetJointValues(self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n self.last_obj_held = e.obj_held\n return False, stack_stable, reset_stable, num_success, e.fatal\n\n def check_stability(self, real, block_pddl, desired_pose, max_tries=2):\n if self.use_vision:\n try:\n poses = self._get_block_poses_wrist().poses\n except:\n print(\n 'Service call to get block poses failed during check stability. Exiting.'\n )\n sys.exit()\n visible = False\n for named_pose in poses:\n if named_pose.block_id in block_pddl.readableName.split('_')[-1\n ]:\n visible = True\n pose = named_pose.pose.pose\n des_pos = desired_pose[0]\n obs_pos = pose.position.x, pose.position.y, pose.position.z\n print('[Check Stability] Desired Pos:', des_pos)\n print('[Check Stability] Detected Pos:', obs_pos)\n dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.\n array(des_pos))\n print(\n f'[Check Stability] Position Distance (>0.04): {dist}')\n if dist > 0.04:\n return 0.0\n orn = desired_pose[1]\n obs_orn = pyquaternion.Quaternion(pose.orientation.w,\n pose.orientation.x, pose.orientation.y, pose.\n orientation.z)\n des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1\n ], orn[2])\n angle = (des_orn.inverse * obs_orn).angle\n angle = numpy.abs(numpy.rad2deg(angle))\n print(\n f'[Check Stability] Orientation Distance (> 15): {angle}'\n )\n if angle > 15:\n return 0.0\n if not visible:\n print('[Check Stability] Object not visible to camera.')\n return 0.0\n else:\n end_pose = block_pddl.get_base_link_point()\n dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(\n desired_pose[0]))\n if dist > 0.01:\n print('Unstable!')\n return 0.0\n return 1.0\n\n def validate_ros_plan(self, ros_resp, tgt_block):\n \"\"\" Validates a ROS plan to move a block against the expected target block name \"\"\"\n if len(ros_resp.plan) == 0:\n return True\n else:\n plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']\n if len(plan_blocks) > 0:\n plan_block = plan_blocks[0]\n else:\n return False\n print(\n f'Received plan to move {plan_block} and expected to move {tgt_block}'\n )\n return tgt_block.readableName == plan_block\n <mask token>\n\n def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):\n \"\"\" Service callback function to plan and execute a tower from active learning script \"\"\"\n from stacking_ros.srv import PlanTowerResponse\n from tamp.ros_utils import ros_to_tower\n tower = ros_to_tower(ros_req.tower_info)\n success, stable, num_stack_stable = self.simulate_tower(tower, True,\n real=self.real, base_xy=base_xy)\n resp = PlanTowerResponse()\n resp.success = success\n resp.stable = stable\n resp.num_stack_stable = num_stack_stable\n return resp\n\n def step_simulation(self, T, vis_frames=False, lifeTime=0.1):\n p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)\n p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)\n q = self.robot.get_joint_positions()\n for _ in range(T):\n p.stepSimulation(physicsClientId=self._execution_client_id)\n p.stepSimulation(physicsClientId=self._planning_client_id)\n self.execute()\n self.execution_robot.set_joint_positions(self.robot.joints, q)\n self.plan()\n self.robot.set_joint_positions(self.robot.joints, q)\n time.sleep(1 / 2400.0)\n if vis_frames:\n length = 0.1\n for pddl_block in self.pddl_blocks:\n pos, quat = pddl_block.get_pose()\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n\n def simulate_action(self, action, block_ix, T=50, vis_sim=False,\n vis_placement=False):\n \"\"\"\n Perform the given action to with the given block. An observation\n should be returned in the reference frame of the platform.\n :param action: Place action which describes the relative pose of the block to the platform surface.\n :param real_block: Belief representation of the block to perform the action on.\n :param T: How many timesteps to simulate the block falling for.\n :param vis_sim: Ununsed.\n :return: (action, T, end_pose) End pose should be TODO: what frame?\n \n TODO: Not sure if this method works at the moment...\n \"\"\"\n assert self.platform_table is not None\n real_block = self.belief_blocks[block_ix]\n pddl_block = self.pddl_blocks[block_ix]\n original_pose = pddl_block.get_base_link_pose()\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))\n rotated_block = get_rotated_block(real_block)\n x = action.pos[0]\n y = action.pos[1]\n z = self.platform_table.get_dimensions()[2\n ] / 2.0 + rotated_block.dimensions[2] / 2\n tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0, \n 0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])\n tform[0:3, 0:3] = action.rot.as_matrix()\n if vis_placement:\n surface_tform = pb_robot.geometry.tform_from_pose(self.\n platform_table.get_base_link_pose())\n body_tform = surface_tform @ tform\n length, lifeTime = 0.2, 0.0\n pos, quat = pb_robot.geometry.pose_from_tform(body_tform)\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)\n init += [('RelPose', pddl_block, self.platform_table, tform)]\n goal = 'On', pddl_block, self.platform_table\n print('Init:', init)\n print('Goal:', goal)\n self.plan_and_execute(init, goal, search_sample_ratio=1000)\n self.step_simulation(T)\n end_pose = self._get_observed_pose(pddl_block, action)\n observation = action, T, end_pose\n self.step_simulation(500 - T)\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)\n init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,\n goal_pose, self.table, self.table_pose)]\n goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,\n self.table)\n print('Init:', init)\n print('Goal:', goal)\n success = self.plan_and_execute(init, goal, max_time=100.0,\n search_sample_ratio=1000)\n return observation\n\n\nclass PandaClientAgent:\n \"\"\"\n Lightweight client to call a PandaAgent as a service for active learning\n \"\"\"\n\n def __init__(self):\n import rospy\n rospy.init_node('panda_client')\n self.restart_services()\n\n def restart_services(self):\n import rospy\n from stacking_ros.srv import PlanTower\n print('Waiting for Panda Agent server...')\n rospy.wait_for_service('/plan_tower')\n print('Done')\n self.client = rospy.ServiceProxy('/plan_tower', PlanTower)\n\n def simulate_tower(self, tower, vis, real=False):\n \"\"\" \n Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n from stacking_ros.srv import PlanTowerRequest\n from tamp.ros_utils import tower_to_ros, ros_to_tower\n request = PlanTowerRequest()\n request.tower_info = tower_to_ros(tower)\n if vis:\n w = World(tower)\n env = Environment([w], vis_sim=True, vis_frames=True)\n env.step(vis_frames=True)\n for b in tower:\n print('----- Block info -----')\n print(b.name)\n print(b.dimensions)\n print(b.pose)\n print(b.rotation)\n response = self.client.call(request)\n if vis:\n env.disconnect()\n return response.success, response.stable, response.num_stack_stable\n",
"step-3": "<mask token>\n\n\nclass PandaAgent:\n\n def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,\n use_platform=False, use_vision=False, real=False,\n use_planning_server=False, use_learning_server=False,\n alternate_orientations=False):\n \"\"\"\n Build the Panda world in PyBullet and set up the PDDLStream solver.\n The Panda world should in include the given blocks as well as a\n platform which can be used in experimentation.\n :param use_platform: Boolean stating whether to include the platform to\n push blocks off of or not.\n :param use_vision: Boolean stating whether to use vision to detect blocks.\n :param use_planning_server: Boolean stating whether to use the separate\n ROS planning service server.\n :param use_learning_server: Boolean stating whether to host a ROS service\n server to drive planning from active learning script.\n :param alternate_orientations: Boolean stating whether blocks can be replaced in \n their home positions at alternate orientations.\n\n If you are using the ROS action server, you must start it in a separate terminal:\n rosrun stacking_ros planning_server.py\n \"\"\"\n self.real = real\n self.use_vision = use_vision\n self.use_platform = use_platform\n self.use_planning_server = use_planning_server\n self.use_learning_server = use_learning_server\n self.alternate_orientations = alternate_orientations\n self._planning_client_id = pb_robot.utils.connect(use_gui=False)\n self.plan()\n pb_robot.utils.set_default_camera()\n self.robot = pb_robot.panda.Panda()\n self.robot.arm.hand.Open()\n self.belief_blocks = blocks\n (self.pddl_blocks, self.platform_table, self.platform_leg, self.\n table, self.frame, self.wall) = (setup_panda_world(self.robot,\n blocks, block_init_xy_poses, use_platform=use_platform))\n self.fixed = [self.platform_table, self.platform_leg, self.table,\n self.frame, self.wall]\n self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks\n )\n self.orig_joint_angles = self.robot.arm.GetJointValues()\n self.orig_block_poses = [b.get_base_link_pose() for b in self.\n pddl_blocks]\n poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]\n self._execution_client_id = pb_robot.utils.connect(use_gui=True)\n self.execute()\n pb_robot.utils.set_default_camera()\n self.execution_robot = pb_robot.panda.Panda()\n self.execution_robot.arm.hand.Open()\n setup_panda_world(self.execution_robot, blocks, poses, use_platform\n =use_platform)\n if (self.use_vision or self.use_planning_server or self.\n use_learning_server or real):\n import rospy\n try:\n rospy.init_node('panda_agent')\n except:\n print('ROS Node already created')\n if real:\n from franka_interface import ArmInterface\n self.real_arm = ArmInterface()\n from franka_core_msgs.msg import RobotState\n state_topic = (\n '/franka_ros_interface/custom_franka_state_controller/robot_state'\n )\n self.arm_last_error_time = time.time()\n self.arm_error_check_time = 3.0\n self.arm_state_subscriber = rospy.Subscriber(state_topic,\n RobotState, self.robot_state_callback)\n if self.use_vision:\n from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist\n rospy.wait_for_service('get_block_poses_world')\n rospy.wait_for_service('get_block_poses_wrist')\n self._get_block_poses_world = rospy.ServiceProxy(\n 'get_block_poses_world', GetBlockPosesWorld)\n self._get_block_poses_wrist = rospy.ServiceProxy(\n 'get_block_poses_wrist', GetBlockPosesWrist)\n self.last_obj_held = None\n if self.use_planning_server:\n from stacking_ros.srv import GetPlan, SetPlanningState\n from tamp.ros_utils import goal_to_ros, ros_to_task_plan\n print('Waiting for planning server...')\n rospy.wait_for_service('get_latest_plan')\n self.goal_to_ros = goal_to_ros\n self.ros_to_task_plan = ros_to_task_plan\n self.init_state_client = rospy.ServiceProxy('/reset_planning',\n SetPlanningState)\n self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',\n GetPlan)\n print('Done!')\n if self.use_learning_server:\n from stacking_ros.srv import PlanTower\n self.learning_server = rospy.Service('/plan_tower', PlanTower,\n self.learning_server_callback)\n print('Learning server started!')\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='global',\n use_vision=self.use_vision)\n self.noise = noise\n self.txt_id = None\n self.plan()\n\n def _add_text(self, txt):\n self.execute()\n pb_robot.viz.remove_all_debug()\n self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),\n size=2)\n self.plan()\n\n def execute(self):\n self.state = 'execute'\n pb_robot.aabb.set_client(self._execution_client_id)\n pb_robot.body.set_client(self._execution_client_id)\n pb_robot.collisions.set_client(self._execution_client_id)\n pb_robot.geometry.set_client(self._execution_client_id)\n pb_robot.grasp.set_client(self._execution_client_id)\n pb_robot.joint.set_client(self._execution_client_id)\n pb_robot.link.set_client(self._execution_client_id)\n pb_robot.panda.set_client(self._execution_client_id)\n pb_robot.planning.set_client(self._execution_client_id)\n pb_robot.utils.set_client(self._execution_client_id)\n pb_robot.viz.set_client(self._execution_client_id)\n\n def plan(self):\n if self.use_planning_server:\n return\n self.state = 'plan'\n pb_robot.aabb.set_client(self._planning_client_id)\n pb_robot.body.set_client(self._planning_client_id)\n pb_robot.collisions.set_client(self._planning_client_id)\n pb_robot.geometry.set_client(self._planning_client_id)\n pb_robot.grasp.set_client(self._planning_client_id)\n pb_robot.joint.set_client(self._planning_client_id)\n pb_robot.link.set_client(self._planning_client_id)\n pb_robot.panda.set_client(self._planning_client_id)\n pb_robot.planning.set_client(self._planning_client_id)\n pb_robot.utils.set_client(self._planning_client_id)\n pb_robot.viz.set_client(self._planning_client_id)\n\n def reset_world(self):\n \"\"\" Resets the planning world to its original configuration \"\"\"\n print('Resetting world')\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print('Done')\n\n def _get_initial_pddl_state(self):\n \"\"\"\n Get the PDDL representation of the world between experiments. This\n method assumes that all blocks are on the table. We will always \"clean\n up\" an experiment by moving blocks away from the platform after an\n experiment.\n \"\"\"\n fixed = [self.table, self.platform_table, self.platform_leg, self.frame\n ]\n conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.\n GetJointValues())\n print('Initial configuration:', conf.configuration)\n init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (\n 'AtConf', conf), ('HandEmpty',)]\n self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.\n get_base_link_pose())\n init += [('Pose', self.table, self.table_pose), ('AtPose', self.\n table, self.table_pose)]\n for body in self.pddl_blocks:\n print(type(body), body)\n pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())\n init += [('Graspable', body), ('Pose', body, pose), ('AtPose',\n body, pose), ('Block', body), ('On', body, self.table), (\n 'Supported', body, pose, self.table, self.table_pose)]\n if not self.platform_table is None:\n platform_pose = pb_robot.vobj.BodyPose(self.platform_table,\n self.platform_table.get_base_link_pose())\n init += [('Pose', self.platform_table, platform_pose), (\n 'AtPose', self.platform_table, platform_pose)]\n init += [('Block', self.platform_table)]\n init += [('Table', self.table)]\n return init\n\n def _get_observed_pose(self, pddl_block, action):\n \"\"\"\n This pose should be relative to the base of the platform leg to\n agree with the simulation. The two block representations will have\n different orientation but their positions should be the same.\n \"\"\"\n block_transform = pddl_block.get_base_link_transform()\n platform_transform = self.platform_leg.get_base_link_transform()\n platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0\n rel_transform = numpy.linalg.inv(platform_transform) @ block_transform\n end_pose = pb_robot.geometry.pose_from_tform(rel_transform)\n end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))\n end_pose = add_noise(end_pose, self.noise * numpy.eye(3))\n return end_pose\n\n def _update_block_poses(self, find_moved=False):\n \"\"\" Use the global world cameras to update the positions of the blocks \"\"\"\n try:\n resp = self._get_block_poses_world()\n named_poses = resp.poses\n except:\n import sys\n print('Service call to get block poses failed. Exiting.')\n sys.exit()\n n_found = 0\n for pddl_block_name, pddl_block in self.pddl_block_lookup.items():\n for named_pose in named_poses:\n if named_pose.block_id == pddl_block_name.split('_')[-1]:\n pose = named_pose.pose.pose\n if pose.position.x < 0.05:\n continue\n n_found += 1\n position = (pose.position.x, pose.position.y, pose.\n position.z)\n orientation = (pose.orientation.x, pose.orientation.y,\n pose.orientation.z, pose.orientation.w)\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n if not self.use_planning_server:\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n if find_moved and n_found != len(self.moved_blocks):\n input(\n 'Could not find all the moved blocks. Please reposition blocks outside of the camera view and hit enter to continue.'\n )\n self._update_block_poses(find_moved=True)\n return\n for _, pddl_block in self.pddl_block_lookup.items():\n if pb_robot.collisions.body_collision(pddl_block, self.table):\n print('Collision with table and block:', pddl_block.\n readableName)\n position, orientation = pddl_block.get_base_link_pose()\n stable_z = pb_robot.placements.stable_z(pddl_block, self.table)\n position = position[0], position[1], stable_z\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=False)\n for ix in range(len(block_ixs)):\n bottom_block = self.pddl_blocks[block_ixs[ix]]\n for jx in range(ix + 1, len(block_ixs)):\n top_block = self.pddl_blocks[block_ixs[jx]]\n dist_moved = 0\n while pb_robot.collisions.body_collision(bottom_block,\n top_block):\n print('Collision with bottom %s and top %s:' % (\n bottom_block.readableName, top_block.readableName))\n position, orientation = top_block.get_base_link_pose()\n stable_z = position[2] + 0.001\n dist_moved += 0.001\n if self.real and dist_moved > 0.04:\n print(\n f'Found blocks {bottom_block} and {top_block} in collision'\n )\n input(\n 'Manually move the blocks and press Enter to continue'\n )\n self._update_block_poses(find_moved=False)\n return\n position = position[0], position[1], stable_z\n self.execute()\n top_block.set_base_link_pose((position, orientation))\n self.plan()\n top_block.set_base_link_pose((position, orientation))\n\n def build_planning_problem(self, tower, base_xy):\n \"\"\" Builds the initial conditions for planning \"\"\"\n self.moved_blocks = set()\n tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]\n tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n else:\n pddl_problems = []\n base_block = self.pddl_block_lookup[tower[0].name]\n base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z\n base_pose = base_pos, tower[0].rotation\n base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)\n if self.use_planning_server:\n base_block_ros = BodyInfo()\n base_block_ros.name = base_block.readableName\n base_block_ros.stack = True\n pose_to_ros(base_pose, base_block_ros.pose)\n ros_req.goal_state.append(base_block_ros)\n else:\n pddl_problems.append((self.table, base_block, (base_pos, tower[\n 0].rotation)))\n for b_ix in range(1, len(tower)):\n bottom_block = tower[b_ix - 1]\n bottom_pose = bottom_block.pose.pos, bottom_block.rotation\n bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)\n top_block = tower[b_ix]\n top_pose = top_block.pose.pos, top_block.rotation\n top_tform = pb_robot.geometry.tform_from_pose(top_pose)\n rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform\n top_pddl = self.pddl_block_lookup[top_block.name]\n bottom_pddl = self.pddl_block_lookup[bottom_block.name]\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = top_pddl.readableName\n block_ros.base_obj = bottom_pddl.readableName\n transform_to_ros(rel_tform, block_ros.pose)\n block_ros.is_rel_pose = True\n block_ros.stack = True\n ros_req.goal_state.append(block_ros)\n else:\n init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]\n goal_terms = [('On', top_pddl, bottom_pddl)]\n pddl_problems.append((bottom_pddl, top_pddl, rel_tform))\n for ix in reversed(tower_block_order):\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def build_reset_problem(self):\n \"\"\" Builds the initial conditions for a tower reset given a set of moved blocks \"\"\"\n print('Resetting blocks...')\n print('Moved Blocks:', self.moved_blocks)\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=True)\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0, \n 0.5), ignore_resets=False):\n \"\"\"\n Simulates a tower stacking and unstacking by requesting plans from a separate planning server\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n for block in tower:\n print('Block:', block.name)\n print('Pose:', block.pose)\n print('Dims:', block.dimensions)\n print('CoM:', block.com)\n print('Rotations:', block.rotation)\n print('-----')\n if self.use_vision:\n self._update_block_poses()\n self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks\n ]\n planning_prob = self.build_planning_problem(tower, base_xy)\n success, stack_stable, reset_stable, num_success, fatal = (self.\n plan_and_execute(planning_prob, real, T, stack=True,\n ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n while not success and not fatal:\n print(\n f'Got recoverable failure. Replanning from step index {num_success}.'\n )\n if self.use_planning_server:\n from tamp.ros_utils import block_init_to_ros\n if self.real:\n planning_prob.robot_config.angles = (self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n else:\n planning_prob.robot_config.angles = (self.robot.arm.\n GetJointValues())\n planning_prob.init_state = block_init_to_ros(self.pddl_blocks)\n if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):\n planning_prob.held_block.name = (self.last_obj_held.\n body.readableName)\n transform_to_ros(self.last_obj_held.grasp_objF,\n planning_prob.held_block.pose)\n success, stack_stable, reset_stable, num_success, fatal = (self\n .plan_and_execute(planning_prob, real, T, stack=True,\n start_idx=num_success, ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n num_stack_success = min(len(tower), num_success)\n if not ignore_resets:\n try:\n if not (stack_stable and reset_stable):\n if self.use_vision and not stack_stable:\n self._update_block_poses(find_moved=True)\n planning_prob = self.build_reset_problem()\n reset_fatal = False\n num_reset_success = 0\n while len(self.moved_blocks) > 0 and not reset_fatal:\n print(f'Resetting {len(self.moved_blocks)} blocks.')\n (reset_success, _, reset_stable, num_reset_success,\n reset_fatal) = (self.plan_and_execute(\n planning_prob, real, T, stack=False, start_idx=\n num_reset_success))\n except Exception as e:\n print('Planning/execution failed during tower reset.')\n print(e)\n return success, stack_stable, num_stack_success\n\n def plan_and_execute(self, planning_prob, real=False, T=2500, stack=\n True, start_idx=0, ignore_resets=False):\n \"\"\"\n Requests a PDDLStream plan from a planning server and executes the resulting plan\n Returns:\n success : Flag for whether the plan execution succeeded\n stack_stable : Flag for whether stacking a stable tower was successful\n reset_stable : Flag for whether resetting a tower was successful\n num_success : Progress (in number of steps) of successful tasks\n fatal : Flag for whether the error was fatal (True) or recoverable (False)\n start_idx : Start index of planning (for recovering from partial plans)\n ignore_resets : Flag for whether to stop after resets\n \"\"\"\n num_success = start_idx\n stack_stable = False\n reset_stable = False\n planning_active = True\n if self.use_planning_server:\n ros_req = planning_prob\n num_steps = len(ros_req.goal_state)\n trimmed_ros_req = deepcopy(ros_req)\n trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]\n self.init_state_client.call(trimmed_ros_req)\n else:\n pddl_problems = planning_prob\n num_steps = len(pddl_problems)\n while num_success < num_steps:\n try:\n if self.use_planning_server:\n query_block = self.pddl_block_lookup[ros_req.goal_state\n [num_success].name]\n plan = []\n saved_world = pb_robot.utils.WorldSaver()\n while len(plan) == 0 and planning_active:\n time.sleep(5)\n print('Getting a plan from server...')\n ros_resp = self.get_plan_client.call()\n if not ros_resp.planning_active:\n print('Planning failed on server side.')\n if ros_req.goal_state[num_success].stack:\n print(f'Failed during stacking {query_block}')\n fatal = True\n else:\n print(f'Failed during resetting {query_block}')\n input(\n 'Manually reset the blocks and press Enter to continue'\n )\n if real:\n self._update_block_poses()\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n if self.validate_ros_plan(ros_resp, query_block):\n plan = self.ros_to_task_plan(ros_resp, self.\n execution_robot, self.pddl_block_lookup)\n else:\n base, blk, pose = pddl_problems[num_success]\n query_block = blk\n self._add_text('Planning block placement')\n self.plan()\n saved_world = pb_robot.utils.WorldSaver()\n self.robot.arm.hand.Open()\n fixed_objs = self.fixed + [b for b in self.pddl_blocks if\n b != blk]\n init = self._get_initial_pddl_state()\n goal_terms = []\n if base == self.table:\n blk_pose = pb_robot.vobj.BodyPose(blk, pose)\n if (not stack or num_success >= num_steps / 2\n ) and self.alternate_orientations:\n init += [('Reset',)]\n goal_terms.append(('AtHome', blk))\n else:\n init += [('Pose', blk, blk_pose), ('Supported',\n blk, blk_pose, self.table, self.table_pose)]\n goal_terms.append(('AtPose', blk, blk_pose))\n goal_terms.append(('On', blk, self.table))\n else:\n init += [('RelPose', blk, base, pose)]\n goal_terms.append(('On', blk, base))\n goal = tuple(['and'] + goal_terms)\n pddl_info = get_pddlstream_info(self.robot, fixed_objs,\n self.pddl_blocks, add_slanted_grasps=True,\n approach_frame='global', use_vision=self.use_vision,\n home_pose=pose)\n plan, cost = pddlstream_plan(pddl_info, init, goal,\n search_sample_ratio=1.0, max_time=INF)\n if plan is None:\n print('\\nFailed to plan\\n')\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n saved_world.restore()\n print('\\nGot plan:')\n print(plan)\n obstacles = [f for f in self.fixed if f is not None]\n if not self.use_planning_server:\n self.plan()\n ExecuteActions(plan, real=False, pause=False, wait=\n False, obstacles=obstacles)\n self.execute()\n ExecuteActions(plan, real=real, pause=True, wait=False,\n prompt=False, obstacles=obstacles,\n sim_fatal_failure_prob=0.0,\n sim_recoverable_failure_prob=0.0)\n desired_pose = query_block.get_base_link_pose()\n if query_block not in self.moved_blocks:\n self.moved_blocks.add(query_block)\n else:\n self.moved_blocks.remove(query_block)\n if not real:\n self.step_simulation(T, vis_frames=False)\n if stack:\n stable = self.check_stability(real, query_block,\n desired_pose)\n else:\n stable = True\n if stable == 0.0:\n prompt = input(\n 'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'\n )\n if prompt == 'n':\n stable = 1.0\n if stable == 0.0:\n print('Unstable after execution!')\n return True, stack_stable, reset_stable, num_success, False\n else:\n num_success += 1\n if stack and num_success == num_steps / 2:\n print('Completed tower stack!')\n stack_stable = True\n stack = False\n if ignore_resets:\n return (True, stack_stable, reset_stable,\n num_success, False)\n elif num_success == num_steps:\n print('Completed tower reset!')\n reset_stable = True\n return (True, stack_stable, reset_stable,\n num_success, False)\n except ExecutionFailure as e:\n print('Planning/execution failed.')\n print(e)\n saved_world.restore()\n if real:\n self._update_block_poses()\n self.robot.arm.SetJointValues(self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n self.last_obj_held = e.obj_held\n return False, stack_stable, reset_stable, num_success, e.fatal\n\n def check_stability(self, real, block_pddl, desired_pose, max_tries=2):\n if self.use_vision:\n try:\n poses = self._get_block_poses_wrist().poses\n except:\n print(\n 'Service call to get block poses failed during check stability. Exiting.'\n )\n sys.exit()\n visible = False\n for named_pose in poses:\n if named_pose.block_id in block_pddl.readableName.split('_')[-1\n ]:\n visible = True\n pose = named_pose.pose.pose\n des_pos = desired_pose[0]\n obs_pos = pose.position.x, pose.position.y, pose.position.z\n print('[Check Stability] Desired Pos:', des_pos)\n print('[Check Stability] Detected Pos:', obs_pos)\n dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.\n array(des_pos))\n print(\n f'[Check Stability] Position Distance (>0.04): {dist}')\n if dist > 0.04:\n return 0.0\n orn = desired_pose[1]\n obs_orn = pyquaternion.Quaternion(pose.orientation.w,\n pose.orientation.x, pose.orientation.y, pose.\n orientation.z)\n des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1\n ], orn[2])\n angle = (des_orn.inverse * obs_orn).angle\n angle = numpy.abs(numpy.rad2deg(angle))\n print(\n f'[Check Stability] Orientation Distance (> 15): {angle}'\n )\n if angle > 15:\n return 0.0\n if not visible:\n print('[Check Stability] Object not visible to camera.')\n return 0.0\n else:\n end_pose = block_pddl.get_base_link_point()\n dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(\n desired_pose[0]))\n if dist > 0.01:\n print('Unstable!')\n return 0.0\n return 1.0\n\n def validate_ros_plan(self, ros_resp, tgt_block):\n \"\"\" Validates a ROS plan to move a block against the expected target block name \"\"\"\n if len(ros_resp.plan) == 0:\n return True\n else:\n plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']\n if len(plan_blocks) > 0:\n plan_block = plan_blocks[0]\n else:\n return False\n print(\n f'Received plan to move {plan_block} and expected to move {tgt_block}'\n )\n return tgt_block.readableName == plan_block\n\n def robot_state_callback(self, msg):\n \"\"\" Processes robot state errors and raises execution failures for planning \"\"\"\n cur_time = time.time()\n if cur_time - self.arm_last_error_time < self.arm_error_check_time:\n return\n self.arm_last_error_time = cur_time\n cur_errors = msg.current_errors\n if cur_errors.communication_constraints_violation:\n reason = 'Communication constraints violation detected!'\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_position_limits_violation:\n reason = 'Joint position limits violation detected!'\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_motion_generator_position_limits_violation:\n reason = (\n 'Joint motion generator position limits violation detected!')\n raise ExecutionFailure(reason=reason, fatal=True)\n\n def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):\n \"\"\" Service callback function to plan and execute a tower from active learning script \"\"\"\n from stacking_ros.srv import PlanTowerResponse\n from tamp.ros_utils import ros_to_tower\n tower = ros_to_tower(ros_req.tower_info)\n success, stable, num_stack_stable = self.simulate_tower(tower, True,\n real=self.real, base_xy=base_xy)\n resp = PlanTowerResponse()\n resp.success = success\n resp.stable = stable\n resp.num_stack_stable = num_stack_stable\n return resp\n\n def step_simulation(self, T, vis_frames=False, lifeTime=0.1):\n p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)\n p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)\n q = self.robot.get_joint_positions()\n for _ in range(T):\n p.stepSimulation(physicsClientId=self._execution_client_id)\n p.stepSimulation(physicsClientId=self._planning_client_id)\n self.execute()\n self.execution_robot.set_joint_positions(self.robot.joints, q)\n self.plan()\n self.robot.set_joint_positions(self.robot.joints, q)\n time.sleep(1 / 2400.0)\n if vis_frames:\n length = 0.1\n for pddl_block in self.pddl_blocks:\n pos, quat = pddl_block.get_pose()\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n\n def simulate_action(self, action, block_ix, T=50, vis_sim=False,\n vis_placement=False):\n \"\"\"\n Perform the given action to with the given block. An observation\n should be returned in the reference frame of the platform.\n :param action: Place action which describes the relative pose of the block to the platform surface.\n :param real_block: Belief representation of the block to perform the action on.\n :param T: How many timesteps to simulate the block falling for.\n :param vis_sim: Ununsed.\n :return: (action, T, end_pose) End pose should be TODO: what frame?\n \n TODO: Not sure if this method works at the moment...\n \"\"\"\n assert self.platform_table is not None\n real_block = self.belief_blocks[block_ix]\n pddl_block = self.pddl_blocks[block_ix]\n original_pose = pddl_block.get_base_link_pose()\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))\n rotated_block = get_rotated_block(real_block)\n x = action.pos[0]\n y = action.pos[1]\n z = self.platform_table.get_dimensions()[2\n ] / 2.0 + rotated_block.dimensions[2] / 2\n tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0, \n 0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])\n tform[0:3, 0:3] = action.rot.as_matrix()\n if vis_placement:\n surface_tform = pb_robot.geometry.tform_from_pose(self.\n platform_table.get_base_link_pose())\n body_tform = surface_tform @ tform\n length, lifeTime = 0.2, 0.0\n pos, quat = pb_robot.geometry.pose_from_tform(body_tform)\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)\n init += [('RelPose', pddl_block, self.platform_table, tform)]\n goal = 'On', pddl_block, self.platform_table\n print('Init:', init)\n print('Goal:', goal)\n self.plan_and_execute(init, goal, search_sample_ratio=1000)\n self.step_simulation(T)\n end_pose = self._get_observed_pose(pddl_block, action)\n observation = action, T, end_pose\n self.step_simulation(500 - T)\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)\n init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,\n goal_pose, self.table, self.table_pose)]\n goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,\n self.table)\n print('Init:', init)\n print('Goal:', goal)\n success = self.plan_and_execute(init, goal, max_time=100.0,\n search_sample_ratio=1000)\n return observation\n\n\nclass PandaClientAgent:\n \"\"\"\n Lightweight client to call a PandaAgent as a service for active learning\n \"\"\"\n\n def __init__(self):\n import rospy\n rospy.init_node('panda_client')\n self.restart_services()\n\n def restart_services(self):\n import rospy\n from stacking_ros.srv import PlanTower\n print('Waiting for Panda Agent server...')\n rospy.wait_for_service('/plan_tower')\n print('Done')\n self.client = rospy.ServiceProxy('/plan_tower', PlanTower)\n\n def simulate_tower(self, tower, vis, real=False):\n \"\"\" \n Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n from stacking_ros.srv import PlanTowerRequest\n from tamp.ros_utils import tower_to_ros, ros_to_tower\n request = PlanTowerRequest()\n request.tower_info = tower_to_ros(tower)\n if vis:\n w = World(tower)\n env = Environment([w], vis_sim=True, vis_frames=True)\n env.step(vis_frames=True)\n for b in tower:\n print('----- Block info -----')\n print(b.name)\n print(b.dimensions)\n print(b.pose)\n print(b.rotation)\n response = self.client.call(request)\n if vis:\n env.disconnect()\n return response.success, response.stable, response.num_stack_stable\n",
"step-4": "import sys\nimport time\nimport numpy\nimport pb_robot\nimport pyquaternion\nimport pybullet as p\nfrom copy import deepcopy\nfrom actions import PlaceAction, make_platform_world\nfrom block_utils import get_adversarial_blocks, rotation_group, ZERO_POS, Quaternion, get_rotated_block, Pose, add_noise, Environment, Position, World\nfrom pddlstream.utils import INF\nfrom pybullet_utils import transformation\nimport tamp.primitives\nfrom tamp.misc import setup_panda_world, get_pddl_block_lookup, print_planning_problem, ExecuteActions, ExecutionFailure\nfrom tamp.pddlstream_utils import get_pddlstream_info, pddlstream_plan\n\n\nclass PandaAgent:\n\n def __init__(self, blocks, noise=5e-05, block_init_xy_poses=None,\n use_platform=False, use_vision=False, real=False,\n use_planning_server=False, use_learning_server=False,\n alternate_orientations=False):\n \"\"\"\n Build the Panda world in PyBullet and set up the PDDLStream solver.\n The Panda world should in include the given blocks as well as a\n platform which can be used in experimentation.\n :param use_platform: Boolean stating whether to include the platform to\n push blocks off of or not.\n :param use_vision: Boolean stating whether to use vision to detect blocks.\n :param use_planning_server: Boolean stating whether to use the separate\n ROS planning service server.\n :param use_learning_server: Boolean stating whether to host a ROS service\n server to drive planning from active learning script.\n :param alternate_orientations: Boolean stating whether blocks can be replaced in \n their home positions at alternate orientations.\n\n If you are using the ROS action server, you must start it in a separate terminal:\n rosrun stacking_ros planning_server.py\n \"\"\"\n self.real = real\n self.use_vision = use_vision\n self.use_platform = use_platform\n self.use_planning_server = use_planning_server\n self.use_learning_server = use_learning_server\n self.alternate_orientations = alternate_orientations\n self._planning_client_id = pb_robot.utils.connect(use_gui=False)\n self.plan()\n pb_robot.utils.set_default_camera()\n self.robot = pb_robot.panda.Panda()\n self.robot.arm.hand.Open()\n self.belief_blocks = blocks\n (self.pddl_blocks, self.platform_table, self.platform_leg, self.\n table, self.frame, self.wall) = (setup_panda_world(self.robot,\n blocks, block_init_xy_poses, use_platform=use_platform))\n self.fixed = [self.platform_table, self.platform_leg, self.table,\n self.frame, self.wall]\n self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks\n )\n self.orig_joint_angles = self.robot.arm.GetJointValues()\n self.orig_block_poses = [b.get_base_link_pose() for b in self.\n pddl_blocks]\n poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]\n self._execution_client_id = pb_robot.utils.connect(use_gui=True)\n self.execute()\n pb_robot.utils.set_default_camera()\n self.execution_robot = pb_robot.panda.Panda()\n self.execution_robot.arm.hand.Open()\n setup_panda_world(self.execution_robot, blocks, poses, use_platform\n =use_platform)\n if (self.use_vision or self.use_planning_server or self.\n use_learning_server or real):\n import rospy\n try:\n rospy.init_node('panda_agent')\n except:\n print('ROS Node already created')\n if real:\n from franka_interface import ArmInterface\n self.real_arm = ArmInterface()\n from franka_core_msgs.msg import RobotState\n state_topic = (\n '/franka_ros_interface/custom_franka_state_controller/robot_state'\n )\n self.arm_last_error_time = time.time()\n self.arm_error_check_time = 3.0\n self.arm_state_subscriber = rospy.Subscriber(state_topic,\n RobotState, self.robot_state_callback)\n if self.use_vision:\n from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist\n rospy.wait_for_service('get_block_poses_world')\n rospy.wait_for_service('get_block_poses_wrist')\n self._get_block_poses_world = rospy.ServiceProxy(\n 'get_block_poses_world', GetBlockPosesWorld)\n self._get_block_poses_wrist = rospy.ServiceProxy(\n 'get_block_poses_wrist', GetBlockPosesWrist)\n self.last_obj_held = None\n if self.use_planning_server:\n from stacking_ros.srv import GetPlan, SetPlanningState\n from tamp.ros_utils import goal_to_ros, ros_to_task_plan\n print('Waiting for planning server...')\n rospy.wait_for_service('get_latest_plan')\n self.goal_to_ros = goal_to_ros\n self.ros_to_task_plan = ros_to_task_plan\n self.init_state_client = rospy.ServiceProxy('/reset_planning',\n SetPlanningState)\n self.get_plan_client = rospy.ServiceProxy('/get_latest_plan',\n GetPlan)\n print('Done!')\n if self.use_learning_server:\n from stacking_ros.srv import PlanTower\n self.learning_server = rospy.Service('/plan_tower', PlanTower,\n self.learning_server_callback)\n print('Learning server started!')\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='global',\n use_vision=self.use_vision)\n self.noise = noise\n self.txt_id = None\n self.plan()\n\n def _add_text(self, txt):\n self.execute()\n pb_robot.viz.remove_all_debug()\n self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75),\n size=2)\n self.plan()\n\n def execute(self):\n self.state = 'execute'\n pb_robot.aabb.set_client(self._execution_client_id)\n pb_robot.body.set_client(self._execution_client_id)\n pb_robot.collisions.set_client(self._execution_client_id)\n pb_robot.geometry.set_client(self._execution_client_id)\n pb_robot.grasp.set_client(self._execution_client_id)\n pb_robot.joint.set_client(self._execution_client_id)\n pb_robot.link.set_client(self._execution_client_id)\n pb_robot.panda.set_client(self._execution_client_id)\n pb_robot.planning.set_client(self._execution_client_id)\n pb_robot.utils.set_client(self._execution_client_id)\n pb_robot.viz.set_client(self._execution_client_id)\n\n def plan(self):\n if self.use_planning_server:\n return\n self.state = 'plan'\n pb_robot.aabb.set_client(self._planning_client_id)\n pb_robot.body.set_client(self._planning_client_id)\n pb_robot.collisions.set_client(self._planning_client_id)\n pb_robot.geometry.set_client(self._planning_client_id)\n pb_robot.grasp.set_client(self._planning_client_id)\n pb_robot.joint.set_client(self._planning_client_id)\n pb_robot.link.set_client(self._planning_client_id)\n pb_robot.panda.set_client(self._planning_client_id)\n pb_robot.planning.set_client(self._planning_client_id)\n pb_robot.utils.set_client(self._planning_client_id)\n pb_robot.viz.set_client(self._planning_client_id)\n\n def reset_world(self):\n \"\"\" Resets the planning world to its original configuration \"\"\"\n print('Resetting world')\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print('Done')\n\n def _get_initial_pddl_state(self):\n \"\"\"\n Get the PDDL representation of the world between experiments. This\n method assumes that all blocks are on the table. We will always \"clean\n up\" an experiment by moving blocks away from the platform after an\n experiment.\n \"\"\"\n fixed = [self.table, self.platform_table, self.platform_leg, self.frame\n ]\n conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.\n GetJointValues())\n print('Initial configuration:', conf.configuration)\n init = [('CanMove',), ('Conf', conf), ('StartConf', conf), (\n 'AtConf', conf), ('HandEmpty',)]\n self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.\n get_base_link_pose())\n init += [('Pose', self.table, self.table_pose), ('AtPose', self.\n table, self.table_pose)]\n for body in self.pddl_blocks:\n print(type(body), body)\n pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())\n init += [('Graspable', body), ('Pose', body, pose), ('AtPose',\n body, pose), ('Block', body), ('On', body, self.table), (\n 'Supported', body, pose, self.table, self.table_pose)]\n if not self.platform_table is None:\n platform_pose = pb_robot.vobj.BodyPose(self.platform_table,\n self.platform_table.get_base_link_pose())\n init += [('Pose', self.platform_table, platform_pose), (\n 'AtPose', self.platform_table, platform_pose)]\n init += [('Block', self.platform_table)]\n init += [('Table', self.table)]\n return init\n\n def _get_observed_pose(self, pddl_block, action):\n \"\"\"\n This pose should be relative to the base of the platform leg to\n agree with the simulation. The two block representations will have\n different orientation but their positions should be the same.\n \"\"\"\n block_transform = pddl_block.get_base_link_transform()\n platform_transform = self.platform_leg.get_base_link_transform()\n platform_transform[2, 3] -= self.platform_leg.get_dimensions()[2] / 2.0\n rel_transform = numpy.linalg.inv(platform_transform) @ block_transform\n end_pose = pb_robot.geometry.pose_from_tform(rel_transform)\n end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))\n end_pose = add_noise(end_pose, self.noise * numpy.eye(3))\n return end_pose\n\n def _update_block_poses(self, find_moved=False):\n \"\"\" Use the global world cameras to update the positions of the blocks \"\"\"\n try:\n resp = self._get_block_poses_world()\n named_poses = resp.poses\n except:\n import sys\n print('Service call to get block poses failed. Exiting.')\n sys.exit()\n n_found = 0\n for pddl_block_name, pddl_block in self.pddl_block_lookup.items():\n for named_pose in named_poses:\n if named_pose.block_id == pddl_block_name.split('_')[-1]:\n pose = named_pose.pose.pose\n if pose.position.x < 0.05:\n continue\n n_found += 1\n position = (pose.position.x, pose.position.y, pose.\n position.z)\n orientation = (pose.orientation.x, pose.orientation.y,\n pose.orientation.z, pose.orientation.w)\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n if not self.use_planning_server:\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n if find_moved and n_found != len(self.moved_blocks):\n input(\n 'Could not find all the moved blocks. Please reposition blocks outside of the camera view and hit enter to continue.'\n )\n self._update_block_poses(find_moved=True)\n return\n for _, pddl_block in self.pddl_block_lookup.items():\n if pb_robot.collisions.body_collision(pddl_block, self.table):\n print('Collision with table and block:', pddl_block.\n readableName)\n position, orientation = pddl_block.get_base_link_pose()\n stable_z = pb_robot.placements.stable_z(pddl_block, self.table)\n position = position[0], position[1], stable_z\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=False)\n for ix in range(len(block_ixs)):\n bottom_block = self.pddl_blocks[block_ixs[ix]]\n for jx in range(ix + 1, len(block_ixs)):\n top_block = self.pddl_blocks[block_ixs[jx]]\n dist_moved = 0\n while pb_robot.collisions.body_collision(bottom_block,\n top_block):\n print('Collision with bottom %s and top %s:' % (\n bottom_block.readableName, top_block.readableName))\n position, orientation = top_block.get_base_link_pose()\n stable_z = position[2] + 0.001\n dist_moved += 0.001\n if self.real and dist_moved > 0.04:\n print(\n f'Found blocks {bottom_block} and {top_block} in collision'\n )\n input(\n 'Manually move the blocks and press Enter to continue'\n )\n self._update_block_poses(find_moved=False)\n return\n position = position[0], position[1], stable_z\n self.execute()\n top_block.set_base_link_pose((position, orientation))\n self.plan()\n top_block.set_base_link_pose((position, orientation))\n\n def build_planning_problem(self, tower, base_xy):\n \"\"\" Builds the initial conditions for planning \"\"\"\n self.moved_blocks = set()\n tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]\n tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n else:\n pddl_problems = []\n base_block = self.pddl_block_lookup[tower[0].name]\n base_pos = base_xy[0], base_xy[1], tower[0].pose.pos.z\n base_pose = base_pos, tower[0].rotation\n base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)\n if self.use_planning_server:\n base_block_ros = BodyInfo()\n base_block_ros.name = base_block.readableName\n base_block_ros.stack = True\n pose_to_ros(base_pose, base_block_ros.pose)\n ros_req.goal_state.append(base_block_ros)\n else:\n pddl_problems.append((self.table, base_block, (base_pos, tower[\n 0].rotation)))\n for b_ix in range(1, len(tower)):\n bottom_block = tower[b_ix - 1]\n bottom_pose = bottom_block.pose.pos, bottom_block.rotation\n bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)\n top_block = tower[b_ix]\n top_pose = top_block.pose.pos, top_block.rotation\n top_tform = pb_robot.geometry.tform_from_pose(top_pose)\n rel_tform = numpy.linalg.inv(bottom_tform) @ top_tform\n top_pddl = self.pddl_block_lookup[top_block.name]\n bottom_pddl = self.pddl_block_lookup[bottom_block.name]\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = top_pddl.readableName\n block_ros.base_obj = bottom_pddl.readableName\n transform_to_ros(rel_tform, block_ros.pose)\n block_ros.is_rel_pose = True\n block_ros.stack = True\n ros_req.goal_state.append(block_ros)\n else:\n init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]\n goal_terms = [('On', top_pddl, bottom_pddl)]\n pddl_problems.append((bottom_pddl, top_pddl, rel_tform))\n for ix in reversed(tower_block_order):\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def build_reset_problem(self):\n \"\"\" Builds the initial conditions for a tower reset given a set of moved blocks \"\"\"\n print('Resetting blocks...')\n print('Moved Blocks:', self.moved_blocks)\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2\n ], reverse=True)\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self\n .real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0.0, \n 0.5), ignore_resets=False):\n \"\"\"\n Simulates a tower stacking and unstacking by requesting plans from a separate planning server\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n for block in tower:\n print('Block:', block.name)\n print('Pose:', block.pose)\n print('Dims:', block.dimensions)\n print('CoM:', block.com)\n print('Rotations:', block.rotation)\n print('-----')\n if self.use_vision:\n self._update_block_poses()\n self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks\n ]\n planning_prob = self.build_planning_problem(tower, base_xy)\n success, stack_stable, reset_stable, num_success, fatal = (self.\n plan_and_execute(planning_prob, real, T, stack=True,\n ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n while not success and not fatal:\n print(\n f'Got recoverable failure. Replanning from step index {num_success}.'\n )\n if self.use_planning_server:\n from tamp.ros_utils import block_init_to_ros\n if self.real:\n planning_prob.robot_config.angles = (self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n else:\n planning_prob.robot_config.angles = (self.robot.arm.\n GetJointValues())\n planning_prob.init_state = block_init_to_ros(self.pddl_blocks)\n if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):\n planning_prob.held_block.name = (self.last_obj_held.\n body.readableName)\n transform_to_ros(self.last_obj_held.grasp_objF,\n planning_prob.held_block.pose)\n success, stack_stable, reset_stable, num_success, fatal = (self\n .plan_and_execute(planning_prob, real, T, stack=True,\n start_idx=num_success, ignore_resets=ignore_resets))\n print(\n f'Completed tower stack with success: {success}, stable: {stack_stable}'\n )\n if reset_stable:\n print(f'Completed tower reset stable: {reset_stable}')\n num_stack_success = min(len(tower), num_success)\n if not ignore_resets:\n try:\n if not (stack_stable and reset_stable):\n if self.use_vision and not stack_stable:\n self._update_block_poses(find_moved=True)\n planning_prob = self.build_reset_problem()\n reset_fatal = False\n num_reset_success = 0\n while len(self.moved_blocks) > 0 and not reset_fatal:\n print(f'Resetting {len(self.moved_blocks)} blocks.')\n (reset_success, _, reset_stable, num_reset_success,\n reset_fatal) = (self.plan_and_execute(\n planning_prob, real, T, stack=False, start_idx=\n num_reset_success))\n except Exception as e:\n print('Planning/execution failed during tower reset.')\n print(e)\n return success, stack_stable, num_stack_success\n\n def plan_and_execute(self, planning_prob, real=False, T=2500, stack=\n True, start_idx=0, ignore_resets=False):\n \"\"\"\n Requests a PDDLStream plan from a planning server and executes the resulting plan\n Returns:\n success : Flag for whether the plan execution succeeded\n stack_stable : Flag for whether stacking a stable tower was successful\n reset_stable : Flag for whether resetting a tower was successful\n num_success : Progress (in number of steps) of successful tasks\n fatal : Flag for whether the error was fatal (True) or recoverable (False)\n start_idx : Start index of planning (for recovering from partial plans)\n ignore_resets : Flag for whether to stop after resets\n \"\"\"\n num_success = start_idx\n stack_stable = False\n reset_stable = False\n planning_active = True\n if self.use_planning_server:\n ros_req = planning_prob\n num_steps = len(ros_req.goal_state)\n trimmed_ros_req = deepcopy(ros_req)\n trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]\n self.init_state_client.call(trimmed_ros_req)\n else:\n pddl_problems = planning_prob\n num_steps = len(pddl_problems)\n while num_success < num_steps:\n try:\n if self.use_planning_server:\n query_block = self.pddl_block_lookup[ros_req.goal_state\n [num_success].name]\n plan = []\n saved_world = pb_robot.utils.WorldSaver()\n while len(plan) == 0 and planning_active:\n time.sleep(5)\n print('Getting a plan from server...')\n ros_resp = self.get_plan_client.call()\n if not ros_resp.planning_active:\n print('Planning failed on server side.')\n if ros_req.goal_state[num_success].stack:\n print(f'Failed during stacking {query_block}')\n fatal = True\n else:\n print(f'Failed during resetting {query_block}')\n input(\n 'Manually reset the blocks and press Enter to continue'\n )\n if real:\n self._update_block_poses()\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n if self.validate_ros_plan(ros_resp, query_block):\n plan = self.ros_to_task_plan(ros_resp, self.\n execution_robot, self.pddl_block_lookup)\n else:\n base, blk, pose = pddl_problems[num_success]\n query_block = blk\n self._add_text('Planning block placement')\n self.plan()\n saved_world = pb_robot.utils.WorldSaver()\n self.robot.arm.hand.Open()\n fixed_objs = self.fixed + [b for b in self.pddl_blocks if\n b != blk]\n init = self._get_initial_pddl_state()\n goal_terms = []\n if base == self.table:\n blk_pose = pb_robot.vobj.BodyPose(blk, pose)\n if (not stack or num_success >= num_steps / 2\n ) and self.alternate_orientations:\n init += [('Reset',)]\n goal_terms.append(('AtHome', blk))\n else:\n init += [('Pose', blk, blk_pose), ('Supported',\n blk, blk_pose, self.table, self.table_pose)]\n goal_terms.append(('AtPose', blk, blk_pose))\n goal_terms.append(('On', blk, self.table))\n else:\n init += [('RelPose', blk, base, pose)]\n goal_terms.append(('On', blk, base))\n goal = tuple(['and'] + goal_terms)\n pddl_info = get_pddlstream_info(self.robot, fixed_objs,\n self.pddl_blocks, add_slanted_grasps=True,\n approach_frame='global', use_vision=self.use_vision,\n home_pose=pose)\n plan, cost = pddlstream_plan(pddl_info, init, goal,\n search_sample_ratio=1.0, max_time=INF)\n if plan is None:\n print('\\nFailed to plan\\n')\n fatal = False\n return (False, stack_stable, reset_stable,\n num_success, fatal)\n saved_world.restore()\n print('\\nGot plan:')\n print(plan)\n obstacles = [f for f in self.fixed if f is not None]\n if not self.use_planning_server:\n self.plan()\n ExecuteActions(plan, real=False, pause=False, wait=\n False, obstacles=obstacles)\n self.execute()\n ExecuteActions(plan, real=real, pause=True, wait=False,\n prompt=False, obstacles=obstacles,\n sim_fatal_failure_prob=0.0,\n sim_recoverable_failure_prob=0.0)\n desired_pose = query_block.get_base_link_pose()\n if query_block not in self.moved_blocks:\n self.moved_blocks.add(query_block)\n else:\n self.moved_blocks.remove(query_block)\n if not real:\n self.step_simulation(T, vis_frames=False)\n if stack:\n stable = self.check_stability(real, query_block,\n desired_pose)\n else:\n stable = True\n if stable == 0.0:\n prompt = input(\n 'Tower NOT stable. Is this true? [y: Unstable / n: Stable]'\n )\n if prompt == 'n':\n stable = 1.0\n if stable == 0.0:\n print('Unstable after execution!')\n return True, stack_stable, reset_stable, num_success, False\n else:\n num_success += 1\n if stack and num_success == num_steps / 2:\n print('Completed tower stack!')\n stack_stable = True\n stack = False\n if ignore_resets:\n return (True, stack_stable, reset_stable,\n num_success, False)\n elif num_success == num_steps:\n print('Completed tower reset!')\n reset_stable = True\n return (True, stack_stable, reset_stable,\n num_success, False)\n except ExecutionFailure as e:\n print('Planning/execution failed.')\n print(e)\n saved_world.restore()\n if real:\n self._update_block_poses()\n self.robot.arm.SetJointValues(self.real_arm.\n convertToList(self.real_arm.joint_angles()))\n self.last_obj_held = e.obj_held\n return False, stack_stable, reset_stable, num_success, e.fatal\n\n def check_stability(self, real, block_pddl, desired_pose, max_tries=2):\n if self.use_vision:\n try:\n poses = self._get_block_poses_wrist().poses\n except:\n print(\n 'Service call to get block poses failed during check stability. Exiting.'\n )\n sys.exit()\n visible = False\n for named_pose in poses:\n if named_pose.block_id in block_pddl.readableName.split('_')[-1\n ]:\n visible = True\n pose = named_pose.pose.pose\n des_pos = desired_pose[0]\n obs_pos = pose.position.x, pose.position.y, pose.position.z\n print('[Check Stability] Desired Pos:', des_pos)\n print('[Check Stability] Detected Pos:', obs_pos)\n dist = numpy.linalg.norm(numpy.array(obs_pos) - numpy.\n array(des_pos))\n print(\n f'[Check Stability] Position Distance (>0.04): {dist}')\n if dist > 0.04:\n return 0.0\n orn = desired_pose[1]\n obs_orn = pyquaternion.Quaternion(pose.orientation.w,\n pose.orientation.x, pose.orientation.y, pose.\n orientation.z)\n des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1\n ], orn[2])\n angle = (des_orn.inverse * obs_orn).angle\n angle = numpy.abs(numpy.rad2deg(angle))\n print(\n f'[Check Stability] Orientation Distance (> 15): {angle}'\n )\n if angle > 15:\n return 0.0\n if not visible:\n print('[Check Stability] Object not visible to camera.')\n return 0.0\n else:\n end_pose = block_pddl.get_base_link_point()\n dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(\n desired_pose[0]))\n if dist > 0.01:\n print('Unstable!')\n return 0.0\n return 1.0\n\n def validate_ros_plan(self, ros_resp, tgt_block):\n \"\"\" Validates a ROS plan to move a block against the expected target block name \"\"\"\n if len(ros_resp.plan) == 0:\n return True\n else:\n plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == 'pick']\n if len(plan_blocks) > 0:\n plan_block = plan_blocks[0]\n else:\n return False\n print(\n f'Received plan to move {plan_block} and expected to move {tgt_block}'\n )\n return tgt_block.readableName == plan_block\n\n def robot_state_callback(self, msg):\n \"\"\" Processes robot state errors and raises execution failures for planning \"\"\"\n cur_time = time.time()\n if cur_time - self.arm_last_error_time < self.arm_error_check_time:\n return\n self.arm_last_error_time = cur_time\n cur_errors = msg.current_errors\n if cur_errors.communication_constraints_violation:\n reason = 'Communication constraints violation detected!'\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_position_limits_violation:\n reason = 'Joint position limits violation detected!'\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_motion_generator_position_limits_violation:\n reason = (\n 'Joint motion generator position limits violation detected!')\n raise ExecutionFailure(reason=reason, fatal=True)\n\n def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):\n \"\"\" Service callback function to plan and execute a tower from active learning script \"\"\"\n from stacking_ros.srv import PlanTowerResponse\n from tamp.ros_utils import ros_to_tower\n tower = ros_to_tower(ros_req.tower_info)\n success, stable, num_stack_stable = self.simulate_tower(tower, True,\n real=self.real, base_xy=base_xy)\n resp = PlanTowerResponse()\n resp.success = success\n resp.stable = stable\n resp.num_stack_stable = num_stack_stable\n return resp\n\n def step_simulation(self, T, vis_frames=False, lifeTime=0.1):\n p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)\n p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)\n q = self.robot.get_joint_positions()\n for _ in range(T):\n p.stepSimulation(physicsClientId=self._execution_client_id)\n p.stepSimulation(physicsClientId=self._planning_client_id)\n self.execute()\n self.execution_robot.set_joint_positions(self.robot.joints, q)\n self.plan()\n self.robot.set_joint_positions(self.robot.joints, q)\n time.sleep(1 / 2400.0)\n if vis_frames:\n length = 0.1\n for pddl_block in self.pddl_blocks:\n pos, quat = pddl_block.get_pose()\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lineWidth=3,\n lifeTime=lifeTime, physicsClientId=self.\n _execution_client_id)\n\n def simulate_action(self, action, block_ix, T=50, vis_sim=False,\n vis_placement=False):\n \"\"\"\n Perform the given action to with the given block. An observation\n should be returned in the reference frame of the platform.\n :param action: Place action which describes the relative pose of the block to the platform surface.\n :param real_block: Belief representation of the block to perform the action on.\n :param T: How many timesteps to simulate the block falling for.\n :param vis_sim: Ununsed.\n :return: (action, T, end_pose) End pose should be TODO: what frame?\n \n TODO: Not sure if this method works at the moment...\n \"\"\"\n assert self.platform_table is not None\n real_block = self.belief_blocks[block_ix]\n pddl_block = self.pddl_blocks[block_ix]\n original_pose = pddl_block.get_base_link_pose()\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=False, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))\n rotated_block = get_rotated_block(real_block)\n x = action.pos[0]\n y = action.pos[1]\n z = self.platform_table.get_dimensions()[2\n ] / 2.0 + rotated_block.dimensions[2] / 2\n tform = numpy.array([[1.0, 0.0, 0.0, x], [0.0, 1.0, 0.0, y], [0.0, \n 0.0, 1.0, z], [0.0, 0.0, 0.0, 1.0]])\n tform[0:3, 0:3] = action.rot.as_matrix()\n if vis_placement:\n surface_tform = pb_robot.geometry.tform_from_pose(self.\n platform_table.get_base_link_pose())\n body_tform = surface_tform @ tform\n length, lifeTime = 0.2, 0.0\n pos, quat = pb_robot.geometry.pose_from_tform(body_tform)\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n p.addUserDebugLine(pos, new_x, [1, 0, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_y, [0, 1, 0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_z, [0, 0, 1], lifeTime=lifeTime)\n init += [('RelPose', pddl_block, self.platform_table, tform)]\n goal = 'On', pddl_block, self.platform_table\n print('Init:', init)\n print('Goal:', goal)\n self.plan_and_execute(init, goal, search_sample_ratio=1000)\n self.step_simulation(T)\n end_pose = self._get_observed_pose(pddl_block, action)\n observation = action, T, end_pose\n self.step_simulation(500 - T)\n self.pddl_info = get_pddlstream_info(self.robot, self.fixed, self.\n pddl_blocks, add_slanted_grasps=True, approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)\n init += [('Pose', pddl_block, goal_pose), ('Supported', pddl_block,\n goal_pose, self.table, self.table_pose)]\n goal = 'and', ('AtPose', pddl_block, goal_pose), ('On', pddl_block,\n self.table)\n print('Init:', init)\n print('Goal:', goal)\n success = self.plan_and_execute(init, goal, max_time=100.0,\n search_sample_ratio=1000)\n return observation\n\n\nclass PandaClientAgent:\n \"\"\"\n Lightweight client to call a PandaAgent as a service for active learning\n \"\"\"\n\n def __init__(self):\n import rospy\n rospy.init_node('panda_client')\n self.restart_services()\n\n def restart_services(self):\n import rospy\n from stacking_ros.srv import PlanTower\n print('Waiting for Panda Agent server...')\n rospy.wait_for_service('/plan_tower')\n print('Done')\n self.client = rospy.ServiceProxy('/plan_tower', PlanTower)\n\n def simulate_tower(self, tower, vis, real=False):\n \"\"\" \n Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n from stacking_ros.srv import PlanTowerRequest\n from tamp.ros_utils import tower_to_ros, ros_to_tower\n request = PlanTowerRequest()\n request.tower_info = tower_to_ros(tower)\n if vis:\n w = World(tower)\n env = Environment([w], vis_sim=True, vis_frames=True)\n env.step(vis_frames=True)\n for b in tower:\n print('----- Block info -----')\n print(b.name)\n print(b.dimensions)\n print(b.pose)\n print(b.rotation)\n response = self.client.call(request)\n if vis:\n env.disconnect()\n return response.success, response.stable, response.num_stack_stable\n",
"step-5": "import sys\nimport time\nimport numpy\nimport pb_robot\nimport pyquaternion\nimport pybullet as p\nfrom copy import deepcopy\n\nfrom actions import PlaceAction, make_platform_world\nfrom block_utils import get_adversarial_blocks, rotation_group, ZERO_POS, \\\n Quaternion, get_rotated_block, Pose, add_noise, \\\n Environment, Position, World\nfrom pddlstream.utils import INF\nfrom pybullet_utils import transformation\nimport tamp.primitives\nfrom tamp.misc import setup_panda_world, get_pddl_block_lookup, \\\n print_planning_problem, ExecuteActions, ExecutionFailure\nfrom tamp.pddlstream_utils import get_pddlstream_info, pddlstream_plan\n\n\nclass PandaAgent:\n def __init__(self, blocks, noise=0.00005, block_init_xy_poses=None,\n use_platform=False, use_vision=False, real=False,\n use_planning_server=False, use_learning_server=False, \n alternate_orientations=False):\n \"\"\"\n Build the Panda world in PyBullet and set up the PDDLStream solver.\n The Panda world should in include the given blocks as well as a\n platform which can be used in experimentation.\n :param use_platform: Boolean stating whether to include the platform to\n push blocks off of or not.\n :param use_vision: Boolean stating whether to use vision to detect blocks.\n :param use_planning_server: Boolean stating whether to use the separate\n ROS planning service server.\n :param use_learning_server: Boolean stating whether to host a ROS service\n server to drive planning from active learning script.\n :param alternate_orientations: Boolean stating whether blocks can be replaced in \n their home positions at alternate orientations.\n\n If you are using the ROS action server, you must start it in a separate terminal:\n rosrun stacking_ros planning_server.py\n \"\"\"\n self.real = real\n self.use_vision = use_vision\n self.use_platform = use_platform\n self.use_planning_server = use_planning_server\n self.use_learning_server = use_learning_server\n self.alternate_orientations = alternate_orientations\n\n # Setup PyBullet instance to run in the background and handle planning/collision checking.\n self._planning_client_id = pb_robot.utils.connect(use_gui=False)\n self.plan()\n pb_robot.utils.set_default_camera()\n self.robot = pb_robot.panda.Panda()\n self.robot.arm.hand.Open()\n self.belief_blocks = blocks\n self.pddl_blocks, self.platform_table, self.platform_leg, self.table, self.frame, self.wall = setup_panda_world(self.robot,\n blocks,\n block_init_xy_poses,\n use_platform=use_platform)\n self.fixed = [self.platform_table, self.platform_leg, self.table, self.frame, self.wall]\n self.pddl_block_lookup = get_pddl_block_lookup(blocks, self.pddl_blocks)\n\n self.orig_joint_angles = self.robot.arm.GetJointValues()\n self.orig_block_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n\n # Setup PyBullet instance that only visualizes plan execution. State needs to match the planning instance.\n poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n poses = [Pose(Position(*p[0]), Quaternion(*p[1])) for p in poses]\n self._execution_client_id = pb_robot.utils.connect(use_gui=True)\n self.execute()\n pb_robot.utils.set_default_camera()\n self.execution_robot = pb_robot.panda.Panda()\n self.execution_robot.arm.hand.Open()\n setup_panda_world(self.execution_robot, blocks, poses, use_platform=use_platform)\n\n # Set up ROS plumbing if using features that require it\n if self.use_vision or self.use_planning_server or self.use_learning_server or real:\n import rospy\n try:\n rospy.init_node(\"panda_agent\")\n except:\n print(\"ROS Node already created\")\n\n # Create an arm interface\n if real:\n from franka_interface import ArmInterface\n self.real_arm = ArmInterface()\n\n from franka_core_msgs.msg import RobotState\n state_topic = \"/franka_ros_interface/custom_franka_state_controller/robot_state\"\n self.arm_last_error_time = time.time()\n self.arm_error_check_time = 3.0\n self.arm_state_subscriber = rospy.Subscriber(\n state_topic, RobotState, self.robot_state_callback)\n\n # Set initial poses of all blocks and setup vision ROS services.\n if self.use_vision:\n from panda_vision.srv import GetBlockPosesWorld, GetBlockPosesWrist\n rospy.wait_for_service('get_block_poses_world')\n rospy.wait_for_service('get_block_poses_wrist')\n self._get_block_poses_world = rospy.ServiceProxy('get_block_poses_world', GetBlockPosesWorld)\n self._get_block_poses_wrist = rospy.ServiceProxy('get_block_poses_wrist', GetBlockPosesWrist)\n\n # Start ROS clients and servers as needed\n self.last_obj_held = None\n if self.use_planning_server:\n from stacking_ros.srv import GetPlan, SetPlanningState\n from tamp.ros_utils import goal_to_ros, ros_to_task_plan\n\n print(\"Waiting for planning server...\")\n rospy.wait_for_service(\"get_latest_plan\")\n self.goal_to_ros = goal_to_ros\n self.ros_to_task_plan = ros_to_task_plan\n self.init_state_client = rospy.ServiceProxy(\n \"/reset_planning\", SetPlanningState)\n self.get_plan_client = rospy.ServiceProxy(\n \"/get_latest_plan\", GetPlan)\n print(\"Done!\")\n if self.use_learning_server:\n from stacking_ros.srv import PlanTower\n self.learning_server = rospy.Service(\n \"/plan_tower\", PlanTower, self.learning_server_callback)\n print(\"Learning server started!\")\n\n self.pddl_info = get_pddlstream_info(self.robot,\n self.fixed,\n self.pddl_blocks,\n add_slanted_grasps=False,\n approach_frame='global',\n use_vision=self.use_vision)\n\n self.noise = noise\n self.txt_id = None\n self.plan()\n\n\n def _add_text(self, txt):\n self.execute()\n pb_robot.viz.remove_all_debug()\n self.txt_id = pb_robot.viz.add_text(txt, position=(0, 0.25, 0.75), size=2)\n self.plan()\n\n\n def execute(self):\n self.state = 'execute'\n pb_robot.aabb.set_client(self._execution_client_id)\n pb_robot.body.set_client(self._execution_client_id)\n pb_robot.collisions.set_client(self._execution_client_id)\n pb_robot.geometry.set_client(self._execution_client_id)\n pb_robot.grasp.set_client(self._execution_client_id)\n pb_robot.joint.set_client(self._execution_client_id)\n pb_robot.link.set_client(self._execution_client_id)\n pb_robot.panda.set_client(self._execution_client_id)\n pb_robot.planning.set_client(self._execution_client_id)\n pb_robot.utils.set_client(self._execution_client_id)\n pb_robot.viz.set_client(self._execution_client_id)\n\n\n def plan(self):\n if self.use_planning_server:\n return\n self.state = 'plan'\n pb_robot.aabb.set_client(self._planning_client_id)\n pb_robot.body.set_client(self._planning_client_id)\n pb_robot.collisions.set_client(self._planning_client_id)\n pb_robot.geometry.set_client(self._planning_client_id)\n pb_robot.grasp.set_client(self._planning_client_id)\n pb_robot.joint.set_client(self._planning_client_id)\n pb_robot.link.set_client(self._planning_client_id)\n pb_robot.panda.set_client(self._planning_client_id)\n pb_robot.planning.set_client(self._planning_client_id)\n pb_robot.utils.set_client(self._planning_client_id)\n pb_robot.viz.set_client(self._planning_client_id)\n\n\n def reset_world(self):\n \"\"\" Resets the planning world to its original configuration \"\"\"\n print(\"Resetting world\")\n\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print(\"Done\")\n\n\n def _get_initial_pddl_state(self):\n \"\"\"\n Get the PDDL representation of the world between experiments. This\n method assumes that all blocks are on the table. We will always \"clean\n up\" an experiment by moving blocks away from the platform after an\n experiment.\n \"\"\"\n fixed = [self.table, self.platform_table, self.platform_leg, self.frame]\n conf = pb_robot.vobj.BodyConf(self.robot, self.robot.arm.GetJointValues())\n print('Initial configuration:', conf.configuration)\n init = [('CanMove',),\n ('Conf', conf),\n ('StartConf', conf),\n ('AtConf', conf),\n ('HandEmpty',)]\n\n self.table_pose = pb_robot.vobj.BodyPose(self.table, self.table.get_base_link_pose())\n init += [('Pose', self.table, self.table_pose), \n ('AtPose', self.table, self.table_pose)]\n\n for body in self.pddl_blocks:\n print(type(body), body)\n pose = pb_robot.vobj.BodyPose(body, body.get_base_link_pose())\n init += [('Graspable', body),\n ('Pose', body, pose),\n ('AtPose', body, pose),\n ('Block', body),\n ('On', body, self.table),\n ('Supported', body, pose, self.table, self.table_pose)]\n\n if not self.platform_table is None:\n platform_pose = pb_robot.vobj.BodyPose(self.platform_table, self.platform_table.get_base_link_pose())\n init += [('Pose', self.platform_table, platform_pose), \n ('AtPose', self.platform_table, platform_pose)]\n init += [('Block', self.platform_table)]\n init += [('Table', self.table)]\n return init\n\n\n def _get_observed_pose(self, pddl_block, action):\n \"\"\"\n This pose should be relative to the base of the platform leg to\n agree with the simulation. The two block representations will have\n different orientation but their positions should be the same.\n \"\"\"\n block_transform = pddl_block.get_base_link_transform()\n platform_transform = self.platform_leg.get_base_link_transform()\n platform_transform[2,3] -= self.platform_leg.get_dimensions()[2]/2.\n\n rel_transform = numpy.linalg.inv(platform_transform)@block_transform\n end_pose = pb_robot.geometry.pose_from_tform(rel_transform)\n # TODO: Add noise to the observation.\n\n end_pose = Pose(Position(*end_pose[0]), Quaternion(*end_pose[1]))\n end_pose = add_noise(end_pose, self.noise*numpy.eye(3))\n\n return end_pose\n\n\n def _update_block_poses(self, find_moved=False):\n \"\"\" Use the global world cameras to update the positions of the blocks \"\"\"\n try:\n resp = self._get_block_poses_world()\n named_poses = resp.poses\n except:\n import sys\n print('Service call to get block poses failed. Exiting.')\n sys.exit()\n\n n_found = 0\n for pddl_block_name, pddl_block in self.pddl_block_lookup.items():\n for named_pose in named_poses:\n if named_pose.block_id == pddl_block_name.split('_')[-1]:\n pose = named_pose.pose.pose\n # Skip changes the pose of objects in storage.\n if pose.position.x < 0.05:\n continue\n n_found += 1\n position = (pose.position.x, pose.position.y, pose.position.z)\n orientation = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n if not self.use_planning_server:\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n\n if find_moved and n_found != len(self.moved_blocks):\n input('Could not find all the moved blocks. Please reposition blocks outside of the camera view and hit enter to continue.')\n self._update_block_poses(find_moved=True)\n return\n\n # After loading from vision, objects may be in collision. Resolve this.\n for _, pddl_block in self.pddl_block_lookup.items():\n if pb_robot.collisions.body_collision(pddl_block, self.table):\n print('Collision with table and block:', pddl_block.readableName)\n position, orientation = pddl_block.get_base_link_pose()\n stable_z = pb_robot.placements.stable_z(pddl_block, self.table)\n position = (position[0], position[1], stable_z)\n self.execute()\n pddl_block.set_base_link_pose((position, orientation))\n self.plan()\n pddl_block.set_base_link_pose((position, orientation))\n\n # Resolve from low to high blocks.\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2], reverse=False)\n for ix in range(len(block_ixs)):\n bottom_block = self.pddl_blocks[block_ixs[ix]]\n for jx in range(ix+1, len(block_ixs)):\n top_block = self.pddl_blocks[block_ixs[jx]]\n\n dist_moved = 0\n while pb_robot.collisions.body_collision(bottom_block, top_block):\n print('Collision with bottom %s and top %s:' % (bottom_block.readableName, top_block.readableName))\n position, orientation = top_block.get_base_link_pose()\n stable_z = position[2] + 0.001\n dist_moved += 0.001\n if self.real and dist_moved > 0.04:\n print(f\"Found blocks {bottom_block} and {top_block} in collision\")\n input(\"Manually move the blocks and press Enter to continue\")\n self._update_block_poses(find_moved=False)\n return\n position = (position[0], position[1], stable_z)\n self.execute()\n top_block.set_base_link_pose((position, orientation))\n self.plan()\n top_block.set_base_link_pose((position, orientation))\n\n\n def build_planning_problem(self, tower, base_xy):\n \"\"\" Builds the initial conditions for planning \"\"\"\n # Set up the list of original poses and order of blocks in the tower\n self.moved_blocks = set()\n tower_pddl = [self.pddl_block_lookup[b.name] for b in tower]\n tower_block_order = [self.pddl_blocks.index(b) for b in tower_pddl]\n\n # Build the initial data structures\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n # Initial poses and robot configuration\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n else:\n pddl_problems = []\n\n # Base block goal pose\n # TODO: Set base block to be rotated in its current position.\n base_block = self.pddl_block_lookup[tower[0].name]\n base_pos = (base_xy[0], base_xy[1], tower[0].pose.pos.z)\n base_pose = (base_pos, tower[0].rotation)\n base_pose = pb_robot.vobj.BodyPose(base_block, base_pose)\n if self.use_planning_server:\n base_block_ros = BodyInfo()\n base_block_ros.name = base_block.readableName\n base_block_ros.stack = True\n pose_to_ros(base_pose, base_block_ros.pose)\n ros_req.goal_state.append(base_block_ros)\n else:\n pddl_problems.append((self.table, base_block, (base_pos, tower[0].rotation)))\n\n # Other block goal poses\n for b_ix in range(1, len(tower)):\n bottom_block = tower[b_ix-1]\n bottom_pose = (bottom_block.pose.pos, bottom_block.rotation)\n bottom_tform = pb_robot.geometry.tform_from_pose(bottom_pose)\n top_block = tower[b_ix]\n top_pose = (top_block.pose.pos, top_block.rotation)\n top_tform = pb_robot.geometry.tform_from_pose(top_pose)\n\n rel_tform = numpy.linalg.inv(bottom_tform)@top_tform\n top_pddl = self.pddl_block_lookup[top_block.name]\n bottom_pddl = self.pddl_block_lookup[bottom_block.name]\n\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = top_pddl.readableName\n block_ros.base_obj = bottom_pddl.readableName\n transform_to_ros(rel_tform, block_ros.pose)\n block_ros.is_rel_pose = True\n block_ros.stack = True\n ros_req.goal_state.append(block_ros)\n else:\n init_terms = [('RelPose', top_pddl, bottom_pddl, rel_tform)]\n goal_terms = [('On', top_pddl, bottom_pddl)]\n pddl_problems.append((bottom_pddl, top_pddl, rel_tform))\n \n # Finally, tack on the tower resetting steps\n for ix in reversed(tower_block_order):\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n\n if self.use_planning_server:\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n\n # Return the planning data structure\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n\n def build_reset_problem(self):\n \"\"\" Builds the initial conditions for a tower reset given a set of moved blocks \"\"\"\n\n print(\"Resetting blocks...\")\n print(\"Moved Blocks:\", self.moved_blocks)\n \n # Define block order by sorting by height\n current_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n block_ixs = range(len(self.pddl_blocks))\n block_ixs = sorted(block_ixs, key=lambda ix: current_poses[ix][0][2], reverse=True)\n \n # Build the initial data structures\n if self.use_planning_server:\n from stacking_ros.msg import BodyInfo\n from stacking_ros.srv import SetPlanningStateRequest\n from tamp.ros_utils import block_init_to_ros, pose_to_ros, pose_tuple_to_ros, transform_to_ros\n ros_req = SetPlanningStateRequest()\n ros_req.init_state = block_init_to_ros(self.pddl_blocks)\n if self.real:\n ros_req.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n ros_req.robot_config.angles = self.robot.arm.GetJointValues()\n else:\n pddl_problems = []\n\n # Add all blocks to be moved to the data structure\n for ix in block_ixs:\n blk, pose = self.pddl_blocks[ix], self.original_poses[ix]\n if blk in self.moved_blocks:\n if self.use_planning_server:\n goal_pose = pb_robot.vobj.BodyPose(blk, pose)\n block_ros = BodyInfo()\n block_ros.name = blk.readableName\n block_ros.stack = False\n pose_to_ros(goal_pose, block_ros.pose)\n ros_req.goal_state.append(block_ros)\n else:\n pddl_problems.append((self.table, blk, pose))\n\n # Return the planning data structure\n if self.use_planning_server:\n return ros_req\n else:\n return pddl_problems\n\n\n def simulate_tower(self, tower, vis, T=2500, real=False, base_xy=(0., 0.5), ignore_resets=False):\n \"\"\"\n Simulates a tower stacking and unstacking by requesting plans from a separate planning server\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n\n for block in tower:\n print('Block:', block.name)\n print('Pose:', block.pose)\n print('Dims:', block.dimensions)\n print('CoM:', block.com)\n print('Rotations:', block.rotation)\n print('-----')\n if self.use_vision:\n self._update_block_poses()\n self.original_poses = [b.get_base_link_pose() for b in self.pddl_blocks]\n planning_prob = self.build_planning_problem(tower, base_xy)\n\n # Execute the stacking plan\n success, stack_stable, reset_stable, num_success, fatal = \\\n self.plan_and_execute(planning_prob, real, T, stack=True, ignore_resets=ignore_resets)\n print(f\"Completed tower stack with success: {success}, stable: {stack_stable}\")\n if reset_stable:\n print(f\"Completed tower reset stable: {reset_stable}\")\n\n # If we have a nonfatal failure, replan from new state, removing successful goals\n while (not success and not fatal):\n print(f\"Got recoverable failure. Replanning from step index {num_success}.\")\n if self.use_planning_server:\n from tamp.ros_utils import block_init_to_ros\n if self.real:\n planning_prob.robot_config.angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n planning_prob.robot_config.angles = self.robot.arm.GetJointValues()\n planning_prob.init_state = block_init_to_ros(self.pddl_blocks)\n if isinstance(self.last_obj_held, pb_robot.vobj.BodyGrasp):\n planning_prob.held_block.name = self.last_obj_held.body.readableName\n transform_to_ros(self.last_obj_held.grasp_objF, planning_prob.held_block.pose)\n success, stack_stable, reset_stable, num_success, fatal = \\\n self.plan_and_execute(planning_prob, real, T, stack=True, start_idx=num_success, ignore_resets=ignore_resets)\n print(f\"Completed tower stack with success: {success}, stable: {stack_stable}\")\n if reset_stable:\n print(f\"Completed tower reset stable: {reset_stable}\")\n\n # Write the number of successfully stacked blocks\n num_stack_success = min(len(tower), num_success)\n\n # If the full tower did not succeed, reset the moved blocks\n if not ignore_resets:\n try:\n if not (stack_stable and reset_stable):\n if self.use_vision and not stack_stable:\n self._update_block_poses(find_moved=True)\n # TODO: Return arm to home position to help with vision.\n \n planning_prob = self.build_reset_problem()\n reset_fatal = False\n num_reset_success = 0\n while len(self.moved_blocks) > 0 and not reset_fatal:\n print(f\"Resetting {len(self.moved_blocks)} blocks.\")\n reset_success, _, reset_stable, num_reset_success, reset_fatal = \\\n self.plan_and_execute(planning_prob, real, T, stack=False, start_idx=num_reset_success)\n\n except Exception as e:\n print(\"Planning/execution failed during tower reset.\")\n print(e)\n\n # Return the final planning state\n return success, stack_stable, num_stack_success\n\n\n def plan_and_execute(self, planning_prob, real=False, T=2500, stack=True, start_idx=0, ignore_resets=False):\n \"\"\"\n Requests a PDDLStream plan from a planning server and executes the resulting plan\n Returns:\n success : Flag for whether the plan execution succeeded\n stack_stable : Flag for whether stacking a stable tower was successful\n reset_stable : Flag for whether resetting a tower was successful\n num_success : Progress (in number of steps) of successful tasks\n fatal : Flag for whether the error was fatal (True) or recoverable (False)\n start_idx : Start index of planning (for recovering from partial plans)\n ignore_resets : Flag for whether to stop after resets\n \"\"\"\n # Initialize variables\n num_success = start_idx\n stack_stable = False\n reset_stable = False\n planning_active = True\n\n if self.use_planning_server:\n # Send a reset request to the planning server\n ros_req = planning_prob\n num_steps = len(ros_req.goal_state)\n trimmed_ros_req = deepcopy(ros_req)\n trimmed_ros_req.goal_state = trimmed_ros_req.goal_state[start_idx:]\n self.init_state_client.call(trimmed_ros_req)\n else:\n pddl_problems = planning_prob\n num_steps = len(pddl_problems)\n\n while num_success < num_steps:\n try:\n # PLANNING\n # If using planning server, request a plan from the server using ROS\n if self.use_planning_server:\n query_block = self.pddl_block_lookup[ros_req.goal_state[num_success].name]\n\n # Wait for a valid plan\n plan = []\n saved_world = pb_robot.utils.WorldSaver()\n while len(plan) == 0 and planning_active:\n time.sleep(5)\n print(\"Getting a plan from server...\")\n ros_resp = self.get_plan_client.call()\n if not ros_resp.planning_active:\n print(\"Planning failed on server side.\")\n # If failure happened during stacking, it is a fatal failure\n if (ros_req.goal_state[num_success].stack):\n print(f\"Failed during stacking {query_block}\")\n fatal = True\n # If failure happened during resetting, prompt user to manually reset blocks\n else:\n print(f\"Failed during resetting {query_block}\")\n input(\"Manually reset the blocks and press Enter to continue\")\n if real:\n self._update_block_poses()\n fatal = False\n return False, stack_stable, reset_stable, num_success, fatal\n if self.validate_ros_plan(ros_resp, query_block):\n plan = self.ros_to_task_plan(ros_resp, self.execution_robot, self.pddl_block_lookup)\n\n # Otherwise, plan locally\n else:\n base, blk, pose = pddl_problems[num_success]\n query_block = blk\n\n self._add_text('Planning block placement')\n self.plan()\n saved_world = pb_robot.utils.WorldSaver()\n self.robot.arm.hand.Open()\n \n # Unpack initial conditions\n fixed_objs = self.fixed + [b for b in self.pddl_blocks if b != blk]\n init = self._get_initial_pddl_state()\n goal_terms = []\n if base == self.table:\n blk_pose = pb_robot.vobj.BodyPose(blk, pose)\n if (not stack or num_success >= num_steps/2) and self.alternate_orientations:\n init += [(\"Reset\",)]\n goal_terms.append((\"AtHome\", blk))\n else:\n init += [('Pose', blk, blk_pose),\n ('Supported', blk, blk_pose, self.table, self.table_pose)]\n goal_terms.append(('AtPose', blk, blk_pose))\n goal_terms.append(('On', blk, self.table))\n else:\n init += [('RelPose', blk, base, pose)]\n goal_terms.append(('On', blk, base))\n goal = tuple(['and'] + goal_terms)\n \n # Plan with PDDLStream\n pddl_info = get_pddlstream_info(self.robot,\n fixed_objs,\n self.pddl_blocks,\n add_slanted_grasps=True,\n approach_frame='global',\n use_vision=self.use_vision,\n home_pose=pose)\n plan, cost = pddlstream_plan(pddl_info, init, goal, \n search_sample_ratio=1.0, \n max_time=INF)\n if plan is None:\n print(\"\\nFailed to plan\\n\")\n fatal = False\n return False, stack_stable, reset_stable, num_success, fatal\n saved_world.restore()\n\n print(\"\\nGot plan:\")\n print(plan)\n\n # Once we have a plan, execute it\n obstacles = [f for f in self.fixed if f is not None]\n if not self.use_planning_server:\n self.plan()\n ExecuteActions(plan, real=False, pause=False, wait=False, obstacles=obstacles)\n self.execute()\n ExecuteActions(plan, real=real, pause=True, wait=False, prompt=False, obstacles=obstacles, \n sim_fatal_failure_prob=0.0, sim_recoverable_failure_prob=0.0)\n\n # Manage the moved blocks (add to the set when stacking, remove when unstacking)\n desired_pose = query_block.get_base_link_pose()\n if query_block not in self.moved_blocks:\n self.moved_blocks.add(query_block)\n else:\n self.moved_blocks.remove(query_block)\n\n # Check stability\n if not real:\n self.step_simulation(T, vis_frames=False)\n #input('Press enter to check stability.')\n if stack:\n stable = self.check_stability(real, query_block, desired_pose)\n else:\n stable = True # Don't care about stability on reset\n\n if stable == 0.:\n prompt = input('Tower NOT stable. Is this true? [y: Unstable / n: Stable]')\n if prompt == 'n':\n stable = 1.\n #input('Continue?')\n\n # Manage the success status of the plan\n if stable == 0.:\n print(\"Unstable after execution!\")\n return True, stack_stable, reset_stable, num_success, False\n else:\n num_success += 1\n if stack and num_success == num_steps/2:\n print(\"Completed tower stack!\")\n stack_stable = True\n stack = False\n if ignore_resets:\n return True, stack_stable, reset_stable, num_success, False\n elif num_success == num_steps:\n print(\"Completed tower reset!\")\n reset_stable = True\n return True, stack_stable, reset_stable, num_success, False\n\n except ExecutionFailure as e:\n print(\"Planning/execution failed.\")\n print(e)\n saved_world.restore()\n if real:\n self._update_block_poses()\n self.robot.arm.SetJointValues(self.real_arm.convertToList(self.real_arm.joint_angles()))\n self.last_obj_held = e.obj_held\n return False, stack_stable, reset_stable, num_success, e.fatal\n\n\n def check_stability(self, real, block_pddl, desired_pose, max_tries=2):\n if self.use_vision:\n # Get pose of blocks using wrist camera.\n try:\n poses = self._get_block_poses_wrist().poses\n except:\n print('Service call to get block poses failed during check stability. Exiting.')\n sys.exit()\n\n # Check if pose is close to desired_pose.\n visible = False\n for named_pose in poses:\n if named_pose.block_id in block_pddl.readableName.split('_')[-1]:\n visible = True\n pose = named_pose.pose.pose\n\n des_pos = desired_pose[0]\n obs_pos = (pose.position.x, pose.position.y, pose.position.z)\n print('[Check Stability] Desired Pos:', des_pos)\n print('[Check Stability] Detected Pos:', obs_pos)\n # First check if the pose is too far away.\n dist = numpy.linalg.norm(numpy.array(obs_pos)-numpy.array(des_pos))\n print(f'[Check Stability] Position Distance (>0.04): {dist}')\n if dist > 0.04:\n return 0.\n # Also check that the block is flat on the table.\n orn = desired_pose[1]\n obs_orn = pyquaternion.Quaternion(pose.orientation.w, pose.orientation.x, pose.orientation.y, pose.orientation.z)\n des_orn = pyquaternion.Quaternion(orn[3], orn[0], orn[1], orn[2])\n angle = (des_orn.inverse*obs_orn).angle\n angle = numpy.abs(numpy.rad2deg(angle))\n print(f'[Check Stability] Orientation Distance (> 15): {angle}')\n if angle > 15:\n return 0.\n\n # If block isn't visible, return 0.\n if not visible:\n print('[Check Stability] Object not visible to camera.')\n return 0.\n\n else:\n end_pose = block_pddl.get_base_link_point()\n dist = numpy.linalg.norm(numpy.array(end_pose) - numpy.array(desired_pose[0]))\n # print(f\"Distance is {dist}\")\n # print(f\"Block dimensions are {block_pddl.get_dimensions()}\")\n if dist > 0.01:\n print('Unstable!')\n return 0.\n return 1.\n\n\n def validate_ros_plan(self, ros_resp, tgt_block):\n \"\"\" Validates a ROS plan to move a block against the expected target block name \"\"\"\n if len(ros_resp.plan) == 0:\n return True\n else:\n plan_blocks = [t.obj1 for t in ros_resp.plan if t.type == \"pick\"]\n if len(plan_blocks) > 0:\n plan_block = plan_blocks[0]\n else:\n return False\n print(f\"Received plan to move {plan_block} and expected to move {tgt_block}\")\n return (tgt_block.readableName == plan_block)\n\n\n def robot_state_callback(self, msg):\n \"\"\" Processes robot state errors and raises execution failures for planning \"\"\"\n cur_time = time.time()\n if (cur_time - self.arm_last_error_time) < self.arm_error_check_time:\n return\n\n self.arm_last_error_time = cur_time\n cur_errors = msg.current_errors\n # if cur_errors.cartesian_reflex:\n # reason = \"Cartesian reflex error detected!\"\n # raise ExecutionFailure(reason=reason, fatal=False)\n if cur_errors.communication_constraints_violation:\n reason = \"Communication constraints violation detected!\"\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_position_limits_violation:\n reason = \"Joint position limits violation detected!\"\n raise ExecutionFailure(reason=reason, fatal=True)\n if cur_errors.joint_motion_generator_position_limits_violation:\n reason = \"Joint motion generator position limits violation detected!\"\n raise ExecutionFailure(reason=reason, fatal=True)\n\n\n def learning_server_callback(self, ros_req, base_xy=(0.5, -0.3)):\n \"\"\" Service callback function to plan and execute a tower from active learning script \"\"\"\n from stacking_ros.srv import PlanTowerResponse\n from tamp.ros_utils import ros_to_tower\n tower = ros_to_tower(ros_req.tower_info)\n success, stable, num_stack_stable = self.simulate_tower(\n tower, True, real=self.real, base_xy=base_xy)\n resp = PlanTowerResponse()\n resp.success = success\n resp.stable = stable\n resp.num_stack_stable = num_stack_stable\n return resp\n\n\n def step_simulation(self, T, vis_frames=False, lifeTime=0.1):\n p.setGravity(0, 0, -10, physicsClientId=self._execution_client_id)\n p.setGravity(0, 0, -10, physicsClientId=self._planning_client_id)\n\n q = self.robot.get_joint_positions()\n\n for _ in range(T):\n p.stepSimulation(physicsClientId=self._execution_client_id)\n p.stepSimulation(physicsClientId=self._planning_client_id)\n\n self.execute()\n self.execution_robot.set_joint_positions(self.robot.joints, q)\n self.plan()\n self.robot.set_joint_positions(self.robot.joints, q)\n\n time.sleep(1/2400.)\n\n if vis_frames:\n length = 0.1\n for pddl_block in self.pddl_blocks:\n pos, quat = pddl_block.get_pose()\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n\n p.addUserDebugLine(pos, new_x, [1,0,0], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)\n p.addUserDebugLine(pos, new_y, [0,1,0], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)\n p.addUserDebugLine(pos, new_z, [0,0,1], lineWidth=3, lifeTime=lifeTime, physicsClientId=self._execution_client_id)\n\n\n def simulate_action(self, action, block_ix, T=50, vis_sim=False, vis_placement=False):\n \"\"\"\n Perform the given action to with the given block. An observation\n should be returned in the reference frame of the platform.\n :param action: Place action which describes the relative pose of the block to the platform surface.\n :param real_block: Belief representation of the block to perform the action on.\n :param T: How many timesteps to simulate the block falling for.\n :param vis_sim: Ununsed.\n :return: (action, T, end_pose) End pose should be TODO: what frame?\n \n TODO: Not sure if this method works at the moment...\n \"\"\"\n assert(self.platform_table is not None)\n real_block = self.belief_blocks[block_ix]\n pddl_block = self.pddl_blocks[block_ix]\n\n original_pose = pddl_block.get_base_link_pose()\n\n # Set up the PDDLStream problem for the placing the given block on the\n # platform with the specified action.\n self.pddl_info = get_pddlstream_info(self.robot,\n self.fixed,\n self.pddl_blocks,\n add_slanted_grasps=False,\n approach_frame='gripper',\n use_vision=self.use_vision)\n init = self._get_initial_pddl_state()\n\n # Figure out the correct transformation matrix based on the action.\n real_block.set_pose(Pose(ZERO_POS, Quaternion(*action.rot.as_quat())))\n rotated_block = get_rotated_block(real_block)\n\n x = action.pos[0]\n y = action.pos[1]\n z = self.platform_table.get_dimensions()[2]/2. + rotated_block.dimensions[2]/2 #+ 1e-5\n tform = numpy.array([[1., 0., 0., x],\n [0., 1., 0., y],\n [0., 0., 1., z],\n [0., 0., 0., 1.]])\n tform[0:3, 0:3] = action.rot.as_matrix()\n\n # Code to visualize where the block will be placed.\n if vis_placement:\n surface_tform = pb_robot.geometry.tform_from_pose(self.platform_table.get_base_link_pose())\n body_tform = surface_tform@tform\n length, lifeTime = 0.2, 0.0\n\n pos, quat = pb_robot.geometry.pose_from_tform(body_tform)\n new_x = transformation([length, 0.0, 0.0], pos, quat)\n new_y = transformation([0.0, length, 0.0], pos, quat)\n new_z = transformation([0.0, 0.0, length], pos, quat)\n\n p.addUserDebugLine(pos, new_x, [1,0,0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_y, [0,1,0], lifeTime=lifeTime)\n p.addUserDebugLine(pos, new_z, [0,0,1], lifeTime=lifeTime)\n\n init += [('RelPose', pddl_block, self.platform_table, tform)]\n goal = ('On', pddl_block, self.platform_table)\n\n # Solve the PDDLStream problem.\n print('Init:', init)\n print('Goal:', goal)\n self.plan_and_execute(init, goal, search_sample_ratio=1000)\n\n # Execute the action.\n # TODO: Check gravity compensation in the arm.\n\n self.step_simulation(T)\n end_pose = self._get_observed_pose(pddl_block, action)\n observation = (action, T, end_pose)\n self.step_simulation(500-T)\n\n # Put block back in original position.\n\n # TODO: Check if block is on the table or platform to start.\n self.pddl_info = get_pddlstream_info(self.robot,\n self.fixed,\n self.pddl_blocks,\n add_slanted_grasps=True,\n approach_frame='gripper',\n use_vision=self.use_vision)\n\n init = self._get_initial_pddl_state()\n goal_pose = pb_robot.vobj.BodyPose(pddl_block, original_pose)\n init += [('Pose', pddl_block, goal_pose),\n ('Supported', pddl_block, goal_pose, self.table, self.table_pose)]\n goal = ('and', ('AtPose', pddl_block, goal_pose),\n ('On', pddl_block, self.table))\n\n # Solve the PDDLStream problem.\n print('Init:', init)\n print('Goal:', goal)\n success = self.plan_and_execute(init, goal, max_time=100., search_sample_ratio=1000)\n return observation\n\n\n\nclass PandaClientAgent:\n \"\"\"\n Lightweight client to call a PandaAgent as a service for active learning\n \"\"\"\n\n def __init__(self):\n import rospy\n rospy.init_node(\"panda_client\")\n self.restart_services()\n\n\n def restart_services(self):\n import rospy\n from stacking_ros.srv import PlanTower\n print(\"Waiting for Panda Agent server...\")\n rospy.wait_for_service(\"/plan_tower\")\n print(\"Done\")\n self.client = rospy.ServiceProxy(\n \"/plan_tower\", PlanTower)\n\n\n def simulate_tower(self, tower, vis, real=False):\n \"\"\" \n Call the PandaAgent server's `simulate_tower` method to plan and execute a tower.\n\n Returns:\n success : Flag indicating success of execution (True/False)\n stable : Flag indicating (0 or 1)\n num_stack_success : Number of blocks successfully stacked\n \"\"\"\n from stacking_ros.srv import PlanTowerRequest\n from tamp.ros_utils import tower_to_ros, ros_to_tower\n request = PlanTowerRequest()\n request.tower_info = tower_to_ros(tower)\n\n if vis:\n w = World(tower)\n env = Environment([w], vis_sim=True, vis_frames=True)\n env.step(vis_frames=True)\n for b in tower:\n print('----- Block info -----')\n print(b.name)\n print(b.dimensions)\n print(b.pose)\n print(b.rotation)\n response = self.client.call(request)\n\n if vis:\n env.disconnect()\n\n return response.success, response.stable, response.num_stack_stable\n",
"step-ids": [
20,
22,
24,
25,
26
]
}
|
[
20,
22,
24,
25,
26
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(B + S.count('?') if T == 1 else max(B - S.count('?'), (B - S.count(
'?')) % 2))
<|reserved_special_token_1|>
S = input()
T = int(input())
B = abs(S.count('L') - S.count('R')) + abs(S.count('U') - S.count('D'))
print(B + S.count('?') if T == 1 else max(B - S.count('?'), (B - S.count(
'?')) % 2))
|
flexible
|
{
"blob_id": "ce263424b856c07e04bd66cda7ebda646583b1fe",
"index": 5962,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(B + S.count('?') if T == 1 else max(B - S.count('?'), (B - S.count(\n '?')) % 2))\n",
"step-3": "S = input()\nT = int(input())\nB = abs(S.count('L') - S.count('R')) + abs(S.count('U') - S.count('D'))\nprint(B + S.count('?') if T == 1 else max(B - S.count('?'), (B - S.count(\n '?')) % 2))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def get_grocery_id(upc):
cmd = 'SELECT id FROM grocery WHERE upc = ?'
rtVal = do_command(cmd, [upc])
if len(rtVal) > 0:
return rtVal[0]['id']
else:
return -1
<|reserved_special_token_0|>
def remove_grocery(upc):
id = get_grocery_id(upc)
if id != -1:
cmd = 'DELETE FROM inventory WHERE grocery_id = ?'
do_command_no_return(cmd, [id])
cmd = 'DELETE FROM changes where grocery_id = ?'
do_command_no_return(cmd, [id])
cmd = 'DELETE FROM grocery where id = ?'
do_command_no_return(cmd, [id])
def produce_input(plu, name):
cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'
rtVal = do_insert(cmd, [name, plu])
return rtVal
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_grocery(upc):
cmd = 'SELECT name FROM grocery WHERE upc = ?'
rtVal = do_command(cmd, [upc])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))}
<|reserved_special_token_0|>
def get_grocery_id(upc):
cmd = 'SELECT id FROM grocery WHERE upc = ?'
rtVal = do_command(cmd, [upc])
if len(rtVal) > 0:
return rtVal[0]['id']
else:
return -1
<|reserved_special_token_0|>
def remove_grocery(upc):
id = get_grocery_id(upc)
if id != -1:
cmd = 'DELETE FROM inventory WHERE grocery_id = ?'
do_command_no_return(cmd, [id])
cmd = 'DELETE FROM changes where grocery_id = ?'
do_command_no_return(cmd, [id])
cmd = 'DELETE FROM grocery where id = ?'
do_command_no_return(cmd, [id])
def produce_input(plu, name):
cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'
rtVal = do_insert(cmd, [name, plu])
return rtVal
def get_produce(plu):
cmd = 'SELECT name FROM produce WHERE plu = ?'
rtVal = do_command(cmd, [plu])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))}
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_grocery(upc):
cmd = 'SELECT name FROM grocery WHERE upc = ?'
rtVal = do_command(cmd, [upc])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))}
<|reserved_special_token_0|>
def get_grocery_id(upc):
cmd = 'SELECT id FROM grocery WHERE upc = ?'
rtVal = do_command(cmd, [upc])
if len(rtVal) > 0:
return rtVal[0]['id']
else:
return -1
def get_grocery_name(upc):
cmd = 'SELECT name FROM grocery WHERE upc = ?'
rtVal = do_command((cmd, [upc]))
return rtVal[0]
<|reserved_special_token_0|>
def remove_grocery(upc):
id = get_grocery_id(upc)
if id != -1:
cmd = 'DELETE FROM inventory WHERE grocery_id = ?'
do_command_no_return(cmd, [id])
cmd = 'DELETE FROM changes where grocery_id = ?'
do_command_no_return(cmd, [id])
cmd = 'DELETE FROM grocery where id = ?'
do_command_no_return(cmd, [id])
def produce_input(plu, name):
cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'
rtVal = do_insert(cmd, [name, plu])
return rtVal
def get_produce(plu):
cmd = 'SELECT name FROM produce WHERE plu = ?'
rtVal = do_command(cmd, [plu])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))}
<|reserved_special_token_1|>
from db import do_command, do_command_no_return, do_insert
def get_grocery(upc):
cmd = 'SELECT name FROM grocery WHERE upc = ?'
rtVal = do_command(cmd, [upc])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))}
def grocery_input(upc, name):
cmd = 'INSERT INTO grocery (name, upc) VALUES (?, ?)'
rtVal = do_insert(cmd, [name, upc])
return rtVal
def get_grocery_id(upc):
cmd = 'SELECT id FROM grocery WHERE upc = ?'
rtVal = do_command(cmd, [upc])
if len(rtVal) > 0:
return rtVal[0]['id']
else:
return -1
def get_grocery_name(upc):
cmd = 'SELECT name FROM grocery WHERE upc = ?'
rtVal = do_command((cmd, [upc]))
return rtVal[0]
def grocery_exists(upc):
cmd = 'SELECT id FROM grocery WHERE upc = ?'
rtVal = do_command(cmd, [upc])
return bool(len(rtVal))
def remove_grocery(upc):
id = get_grocery_id(upc)
if id != -1:
cmd = 'DELETE FROM inventory WHERE grocery_id = ?'
do_command_no_return(cmd, [id])
cmd = 'DELETE FROM changes where grocery_id = ?'
do_command_no_return(cmd, [id])
cmd = 'DELETE FROM grocery where id = ?'
do_command_no_return(cmd, [id])
def produce_input(plu, name):
cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'
rtVal = do_insert(cmd, [name, plu])
return rtVal
def get_produce(plu):
cmd = 'SELECT name FROM produce WHERE plu = ?'
rtVal = do_command(cmd, [plu])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))}
<|reserved_special_token_1|>
from db import do_command, do_command_no_return, do_insert
def get_grocery(upc):
cmd = "SELECT name FROM grocery WHERE upc = ?"
rtVal = do_command(cmd, [upc])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))}
def grocery_input(upc, name):
cmd = "INSERT INTO grocery (name, upc) VALUES (?, ?)"
rtVal = do_insert(cmd, [name, upc])
return rtVal
def get_grocery_id(upc):
cmd = "SELECT id FROM grocery WHERE upc = ?"
rtVal = do_command(cmd, [upc])
if len(rtVal) > 0:
return rtVal[0]['id']
else:
return -1
def get_grocery_name(upc):
cmd = "SELECT name FROM grocery WHERE upc = ?"
rtVal = do_command((cmd, [upc]))
return rtVal[0]
def grocery_exists(upc):
cmd = "SELECT id FROM grocery WHERE upc = ?"
rtVal = do_command(cmd, [upc])
return bool(len(rtVal))
def remove_grocery(upc):
id = get_grocery_id(upc)
if id != -1:
cmd = "DELETE FROM inventory WHERE grocery_id = ?"
do_command_no_return(cmd, [id])
cmd = "DELETE FROM changes where grocery_id = ?"
do_command_no_return(cmd, [id])
cmd = "DELETE FROM grocery where id = ?"
do_command_no_return(cmd, [id])
def produce_input(plu, name):
cmd = "INSERT INTO produce (name, plu) VALUES (?, ?)"
rtVal = do_insert(cmd, [name, plu])
return rtVal
def get_produce(plu):
cmd = "SELECT name FROM produce WHERE plu = ?"
rtVal = do_command(cmd, [plu])
length = len(rtVal)
if length > 0:
return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}
return {'success': bool(len(rtVal))}
|
flexible
|
{
"blob_id": "92b24fe82929ed4590e5350188673c2245136d03",
"index": 5554,
"step-1": "<mask token>\n\n\ndef get_grocery_id(upc):\n cmd = 'SELECT id FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n if len(rtVal) > 0:\n return rtVal[0]['id']\n else:\n return -1\n\n\n<mask token>\n\n\ndef remove_grocery(upc):\n id = get_grocery_id(upc)\n if id != -1:\n cmd = 'DELETE FROM inventory WHERE grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM changes where grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM grocery where id = ?'\n do_command_no_return(cmd, [id])\n\n\ndef produce_input(plu, name):\n cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'\n rtVal = do_insert(cmd, [name, plu])\n return rtVal\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_grocery(upc):\n cmd = 'SELECT name FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n\n\n<mask token>\n\n\ndef get_grocery_id(upc):\n cmd = 'SELECT id FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n if len(rtVal) > 0:\n return rtVal[0]['id']\n else:\n return -1\n\n\n<mask token>\n\n\ndef remove_grocery(upc):\n id = get_grocery_id(upc)\n if id != -1:\n cmd = 'DELETE FROM inventory WHERE grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM changes where grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM grocery where id = ?'\n do_command_no_return(cmd, [id])\n\n\ndef produce_input(plu, name):\n cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'\n rtVal = do_insert(cmd, [name, plu])\n return rtVal\n\n\ndef get_produce(plu):\n cmd = 'SELECT name FROM produce WHERE plu = ?'\n rtVal = do_command(cmd, [plu])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n",
"step-3": "<mask token>\n\n\ndef get_grocery(upc):\n cmd = 'SELECT name FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n\n\n<mask token>\n\n\ndef get_grocery_id(upc):\n cmd = 'SELECT id FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n if len(rtVal) > 0:\n return rtVal[0]['id']\n else:\n return -1\n\n\ndef get_grocery_name(upc):\n cmd = 'SELECT name FROM grocery WHERE upc = ?'\n rtVal = do_command((cmd, [upc]))\n return rtVal[0]\n\n\n<mask token>\n\n\ndef remove_grocery(upc):\n id = get_grocery_id(upc)\n if id != -1:\n cmd = 'DELETE FROM inventory WHERE grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM changes where grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM grocery where id = ?'\n do_command_no_return(cmd, [id])\n\n\ndef produce_input(plu, name):\n cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'\n rtVal = do_insert(cmd, [name, plu])\n return rtVal\n\n\ndef get_produce(plu):\n cmd = 'SELECT name FROM produce WHERE plu = ?'\n rtVal = do_command(cmd, [plu])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n",
"step-4": "from db import do_command, do_command_no_return, do_insert\n\n\ndef get_grocery(upc):\n cmd = 'SELECT name FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n\n\ndef grocery_input(upc, name):\n cmd = 'INSERT INTO grocery (name, upc) VALUES (?, ?)'\n rtVal = do_insert(cmd, [name, upc])\n return rtVal\n\n\ndef get_grocery_id(upc):\n cmd = 'SELECT id FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n if len(rtVal) > 0:\n return rtVal[0]['id']\n else:\n return -1\n\n\ndef get_grocery_name(upc):\n cmd = 'SELECT name FROM grocery WHERE upc = ?'\n rtVal = do_command((cmd, [upc]))\n return rtVal[0]\n\n\ndef grocery_exists(upc):\n cmd = 'SELECT id FROM grocery WHERE upc = ?'\n rtVal = do_command(cmd, [upc])\n return bool(len(rtVal))\n\n\ndef remove_grocery(upc):\n id = get_grocery_id(upc)\n if id != -1:\n cmd = 'DELETE FROM inventory WHERE grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM changes where grocery_id = ?'\n do_command_no_return(cmd, [id])\n cmd = 'DELETE FROM grocery where id = ?'\n do_command_no_return(cmd, [id])\n\n\ndef produce_input(plu, name):\n cmd = 'INSERT INTO produce (name, plu) VALUES (?, ?)'\n rtVal = do_insert(cmd, [name, plu])\n return rtVal\n\n\ndef get_produce(plu):\n cmd = 'SELECT name FROM produce WHERE plu = ?'\n rtVal = do_command(cmd, [plu])\n length = len(rtVal)\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n return {'success': bool(len(rtVal))}\n",
"step-5": "from db import do_command, do_command_no_return, do_insert\n\n\ndef get_grocery(upc):\n cmd = \"SELECT name FROM grocery WHERE upc = ?\"\n rtVal = do_command(cmd, [upc])\n\n length = len(rtVal)\n\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n\n return {'success': bool(len(rtVal))}\n\n\ndef grocery_input(upc, name):\n cmd = \"INSERT INTO grocery (name, upc) VALUES (?, ?)\"\n rtVal = do_insert(cmd, [name, upc])\n\n return rtVal\n\n\ndef get_grocery_id(upc):\n cmd = \"SELECT id FROM grocery WHERE upc = ?\"\n rtVal = do_command(cmd, [upc])\n\n if len(rtVal) > 0:\n return rtVal[0]['id']\n else:\n return -1\n\n\ndef get_grocery_name(upc):\n cmd = \"SELECT name FROM grocery WHERE upc = ?\"\n rtVal = do_command((cmd, [upc]))\n return rtVal[0]\n\n\ndef grocery_exists(upc):\n cmd = \"SELECT id FROM grocery WHERE upc = ?\"\n rtVal = do_command(cmd, [upc])\n\n return bool(len(rtVal))\n\n\ndef remove_grocery(upc):\n id = get_grocery_id(upc)\n\n if id != -1:\n cmd = \"DELETE FROM inventory WHERE grocery_id = ?\"\n do_command_no_return(cmd, [id])\n cmd = \"DELETE FROM changes where grocery_id = ?\"\n do_command_no_return(cmd, [id])\n cmd = \"DELETE FROM grocery where id = ?\"\n do_command_no_return(cmd, [id])\n\n\ndef produce_input(plu, name):\n cmd = \"INSERT INTO produce (name, plu) VALUES (?, ?)\"\n rtVal = do_insert(cmd, [name, plu])\n\n return rtVal\n\ndef get_produce(plu):\n cmd = \"SELECT name FROM produce WHERE plu = ?\"\n rtVal = do_command(cmd, [plu])\n\n length = len(rtVal)\n\n if length > 0:\n return {'success': bool(len(rtVal)), 'grocery': rtVal[0]}\n\n return {'success': bool(len(rtVal))}",
"step-ids": [
3,
5,
6,
9,
10
]
}
|
[
3,
5,
6,
9,
10
] |
import sklearn
import pandas as pd
import numpy as np
from sklearn import datasets, ensemble
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
import statistics as st
import itertools
from sklearn.model_selection import cross_val_score
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingRegressor
from sklearn.ensemble import HistGradientBoostingClassifier
from statsmodels import regression as reg
import statsmodels.api as regMods
from scipy.stats import norm
from scipy.stats import gamma
from scipy.stats import expon
from scipy.stats import poisson
from scipy.stats import binom
from scipy.stats import t
import plotly.express as px
import plotly.figure_factory as ff
heart=pd.read_csv(r"C:\Users\fredr\Documents\StatTool\BZAN540\Homework\HW6\HeartDisease.csv")
heart.columns
train, test = train_test_split(heart[['x1', 'x2', 'x3', 'x4', 'x5','HeartDisease']], test_size=0.2)
y_train=train['HeartDisease']
x_train=train[['x1', 'x2', 'x3', 'x4', 'x5']]
x_test=test[['x1', 'x2', 'x3', 'x4', 'x5']]
y_test=test['HeartDisease']
#boosting to predict heart disease
#make expand grid function to get all combos of the parameters
def expandgrid(*itrs):
product = list(itertools.product(*itrs))
return {'Var{}'.format(i+1):[x[i] for x in product] for i in range(len(itrs))}
#set the range for the parameter values:
n_estimators=np.arange(300, 450, 50) #the number of trees to fit
max_depth=np.arange(3, 5, 1)
min_samples_split=np.arange(3,4,1)
learning_rate=np.arange(0.001,0.004,0.001)
a=expandgrid(n_estimators,max_depth, min_samples_split,learning_rate)
params=pd.DataFrame.from_dict(a)
len(params)
#time the code ???
#looping through the possible parameters for the model and store the estimated validation rmse
ValAcc=list(range(0,len(params)))
for i in range(0,len(params)):
scores = cross_val_score(HistGradientBoostingClassifier(min_samples_leaf=params['Var3'].iloc[i],
max_depth=params['Var2'].iloc[i],
learning_rate=params['Var4'].iloc[i],max_iter=params['Var1'].iloc[i]).fit(x_train, y_train),
x_train, y_train, cv=4,scoring='accuracy')
acc=st.mean(scores)
ValAcc[i]=acc
ValAcc
max(ValAcc)
pars=list(params.iloc[ValAcc==max(ValAcc)].iloc[0])
pars.append(max(ValAcc))
pars
bestPos=np.array(np.where(np.array(ValAcc)==max(ValAcc))).tolist()[0][0]
#fit the best model on Train then predict on Test if mean acc close to val then fit on entire data
bestPos
bestMod=HistGradientBoostingClassifier(min_samples_leaf=params['Var3'].iloc[bestPos],
max_depth=params['Var2'].iloc[bestPos],
learning_rate=params['Var4'].iloc[bestPos],max_iter=params['Var1'].iloc[bestPos]).fit(x_train, y_train)
#gets the predicted values on the test data
bestMod.predict(x_test)
len(y_test[bestMod.predict(x_test)==y_test])/len(y_test) #67% acc on test
#create a dataset with one row and each col is a ind var from model fit above, then input data per var to fill df then predict y on the values in this df
df_i=pd.DataFrame({'x1':np.mean(heart['x1']), 'x2':np.mean(heart['x2']),'x3':np.mean(heart['x3']),'x4':np.mean(heart['x4']),'x5':np.mean(heart['x5'])},index=[0])
if(bestMod.predict(df_i)==0):
print('Predicted: No Heart Disease')
else:
print('Predicted: Has Heart Disease')
#plot two densities centered on the mean of the var and the selected value of the var for all vars
#start with treating each var as a normal distro then plot a density curve where the
#mean is the mean of the var and another curve on same plot where the mean is the selected value from the input of a normal distro set sd to the sd of the var
#for both in the plots, generate random vars the size of the data, except for history heart disease treat as beta with p=actuap prob for var and p=random value that is
#greater than .5
#generates random values from a normal distro with mean=loc and sd=scale
norm.rvs(size=10000,loc=3,scale=8)
#x1:
x1=190
mean=np.mean(heart['x1'])
sd=np.std(heart['x1'])
meanx1_2=x1
xActual=norm.rvs(size=len(heart),loc=mean,scale=sd)
xInput=norm.rvs(size=len(heart),loc=meanx1_2,scale=sd)
group_labels = ['actual','center_selected']
hist_data=[xActual,xInput]
fig = ff.create_distplot(hist_data,group_labels)
fig.show()
|
normal
|
{
"blob_id": "0d862715524bd35347626e7708c7c8f8b370bb3a",
"index": 7769,
"step-1": "<mask token>\n\n\ndef expandgrid(*itrs):\n product = list(itertools.product(*itrs))\n return {'Var{}'.format(i + 1): [x[i] for x in product] for i in range(\n len(itrs))}\n\n\n<mask token>\n",
"step-2": "<mask token>\nheart.columns\n<mask token>\n\n\ndef expandgrid(*itrs):\n product = list(itertools.product(*itrs))\n return {'Var{}'.format(i + 1): [x[i] for x in product] for i in range(\n len(itrs))}\n\n\n<mask token>\nlen(params)\n<mask token>\nfor i in range(0, len(params)):\n scores = cross_val_score(HistGradientBoostingClassifier(\n min_samples_leaf=params['Var3'].iloc[i], max_depth=params['Var2'].\n iloc[i], learning_rate=params['Var4'].iloc[i], max_iter=params[\n 'Var1'].iloc[i]).fit(x_train, y_train), x_train, y_train, cv=4,\n scoring='accuracy')\n acc = st.mean(scores)\n ValAcc[i] = acc\nValAcc\nmax(ValAcc)\n<mask token>\npars.append(max(ValAcc))\npars\n<mask token>\nbestPos\n<mask token>\nbestMod.predict(x_test)\nlen(y_test[bestMod.predict(x_test) == y_test]) / len(y_test)\n<mask token>\nif bestMod.predict(df_i) == 0:\n print('Predicted: No Heart Disease')\nelse:\n print('Predicted: Has Heart Disease')\nnorm.rvs(size=10000, loc=3, scale=8)\n<mask token>\nfig.show()\n",
"step-3": "<mask token>\nheart = pd.read_csv(\n 'C:\\\\Users\\\\fredr\\\\Documents\\\\StatTool\\\\BZAN540\\\\Homework\\\\HW6\\\\HeartDisease.csv'\n )\nheart.columns\ntrain, test = train_test_split(heart[['x1', 'x2', 'x3', 'x4', 'x5',\n 'HeartDisease']], test_size=0.2)\ny_train = train['HeartDisease']\nx_train = train[['x1', 'x2', 'x3', 'x4', 'x5']]\nx_test = test[['x1', 'x2', 'x3', 'x4', 'x5']]\ny_test = test['HeartDisease']\n\n\ndef expandgrid(*itrs):\n product = list(itertools.product(*itrs))\n return {'Var{}'.format(i + 1): [x[i] for x in product] for i in range(\n len(itrs))}\n\n\nn_estimators = np.arange(300, 450, 50)\nmax_depth = np.arange(3, 5, 1)\nmin_samples_split = np.arange(3, 4, 1)\nlearning_rate = np.arange(0.001, 0.004, 0.001)\na = expandgrid(n_estimators, max_depth, min_samples_split, learning_rate)\nparams = pd.DataFrame.from_dict(a)\nlen(params)\nValAcc = list(range(0, len(params)))\nfor i in range(0, len(params)):\n scores = cross_val_score(HistGradientBoostingClassifier(\n min_samples_leaf=params['Var3'].iloc[i], max_depth=params['Var2'].\n iloc[i], learning_rate=params['Var4'].iloc[i], max_iter=params[\n 'Var1'].iloc[i]).fit(x_train, y_train), x_train, y_train, cv=4,\n scoring='accuracy')\n acc = st.mean(scores)\n ValAcc[i] = acc\nValAcc\nmax(ValAcc)\npars = list(params.iloc[ValAcc == max(ValAcc)].iloc[0])\npars.append(max(ValAcc))\npars\nbestPos = np.array(np.where(np.array(ValAcc) == max(ValAcc))).tolist()[0][0]\nbestPos\nbestMod = HistGradientBoostingClassifier(min_samples_leaf=params['Var3'].\n iloc[bestPos], max_depth=params['Var2'].iloc[bestPos], learning_rate=\n params['Var4'].iloc[bestPos], max_iter=params['Var1'].iloc[bestPos]).fit(\n x_train, y_train)\nbestMod.predict(x_test)\nlen(y_test[bestMod.predict(x_test) == y_test]) / len(y_test)\ndf_i = pd.DataFrame({'x1': np.mean(heart['x1']), 'x2': np.mean(heart['x2']),\n 'x3': np.mean(heart['x3']), 'x4': np.mean(heart['x4']), 'x5': np.mean(\n heart['x5'])}, index=[0])\nif bestMod.predict(df_i) == 0:\n print('Predicted: No Heart Disease')\nelse:\n print('Predicted: Has Heart Disease')\nnorm.rvs(size=10000, loc=3, scale=8)\nx1 = 190\nmean = np.mean(heart['x1'])\nsd = np.std(heart['x1'])\nmeanx1_2 = x1\nxActual = norm.rvs(size=len(heart), loc=mean, scale=sd)\nxInput = norm.rvs(size=len(heart), loc=meanx1_2, scale=sd)\ngroup_labels = ['actual', 'center_selected']\nhist_data = [xActual, xInput]\nfig = ff.create_distplot(hist_data, group_labels)\nfig.show()\n",
"step-4": "import sklearn\nimport pandas as pd\nimport numpy as np\nfrom sklearn import datasets, ensemble\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nimport statistics as st\nimport itertools\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.experimental import enable_hist_gradient_boosting\nfrom sklearn.ensemble import HistGradientBoostingRegressor\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom statsmodels import regression as reg\nimport statsmodels.api as regMods\nfrom scipy.stats import norm\nfrom scipy.stats import gamma\nfrom scipy.stats import expon\nfrom scipy.stats import poisson\nfrom scipy.stats import binom\nfrom scipy.stats import t\nimport plotly.express as px\nimport plotly.figure_factory as ff\nheart = pd.read_csv(\n 'C:\\\\Users\\\\fredr\\\\Documents\\\\StatTool\\\\BZAN540\\\\Homework\\\\HW6\\\\HeartDisease.csv'\n )\nheart.columns\ntrain, test = train_test_split(heart[['x1', 'x2', 'x3', 'x4', 'x5',\n 'HeartDisease']], test_size=0.2)\ny_train = train['HeartDisease']\nx_train = train[['x1', 'x2', 'x3', 'x4', 'x5']]\nx_test = test[['x1', 'x2', 'x3', 'x4', 'x5']]\ny_test = test['HeartDisease']\n\n\ndef expandgrid(*itrs):\n product = list(itertools.product(*itrs))\n return {'Var{}'.format(i + 1): [x[i] for x in product] for i in range(\n len(itrs))}\n\n\nn_estimators = np.arange(300, 450, 50)\nmax_depth = np.arange(3, 5, 1)\nmin_samples_split = np.arange(3, 4, 1)\nlearning_rate = np.arange(0.001, 0.004, 0.001)\na = expandgrid(n_estimators, max_depth, min_samples_split, learning_rate)\nparams = pd.DataFrame.from_dict(a)\nlen(params)\nValAcc = list(range(0, len(params)))\nfor i in range(0, len(params)):\n scores = cross_val_score(HistGradientBoostingClassifier(\n min_samples_leaf=params['Var3'].iloc[i], max_depth=params['Var2'].\n iloc[i], learning_rate=params['Var4'].iloc[i], max_iter=params[\n 'Var1'].iloc[i]).fit(x_train, y_train), x_train, y_train, cv=4,\n scoring='accuracy')\n acc = st.mean(scores)\n ValAcc[i] = acc\nValAcc\nmax(ValAcc)\npars = list(params.iloc[ValAcc == max(ValAcc)].iloc[0])\npars.append(max(ValAcc))\npars\nbestPos = np.array(np.where(np.array(ValAcc) == max(ValAcc))).tolist()[0][0]\nbestPos\nbestMod = HistGradientBoostingClassifier(min_samples_leaf=params['Var3'].\n iloc[bestPos], max_depth=params['Var2'].iloc[bestPos], learning_rate=\n params['Var4'].iloc[bestPos], max_iter=params['Var1'].iloc[bestPos]).fit(\n x_train, y_train)\nbestMod.predict(x_test)\nlen(y_test[bestMod.predict(x_test) == y_test]) / len(y_test)\ndf_i = pd.DataFrame({'x1': np.mean(heart['x1']), 'x2': np.mean(heart['x2']),\n 'x3': np.mean(heart['x3']), 'x4': np.mean(heart['x4']), 'x5': np.mean(\n heart['x5'])}, index=[0])\nif bestMod.predict(df_i) == 0:\n print('Predicted: No Heart Disease')\nelse:\n print('Predicted: Has Heart Disease')\nnorm.rvs(size=10000, loc=3, scale=8)\nx1 = 190\nmean = np.mean(heart['x1'])\nsd = np.std(heart['x1'])\nmeanx1_2 = x1\nxActual = norm.rvs(size=len(heart), loc=mean, scale=sd)\nxInput = norm.rvs(size=len(heart), loc=meanx1_2, scale=sd)\ngroup_labels = ['actual', 'center_selected']\nhist_data = [xActual, xInput]\nfig = ff.create_distplot(hist_data, group_labels)\nfig.show()\n",
"step-5": "import sklearn\r\nimport pandas as pd \r\n\r\nimport numpy as np\r\nfrom sklearn import datasets, ensemble\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.model_selection import train_test_split\r\nimport statistics as st\r\nimport itertools\r\nfrom sklearn.model_selection import cross_val_score\r\nfrom sklearn.experimental import enable_hist_gradient_boosting \r\nfrom sklearn.ensemble import HistGradientBoostingRegressor\r\nfrom sklearn.ensemble import HistGradientBoostingClassifier\r\nfrom statsmodels import regression as reg\r\nimport statsmodels.api as regMods \r\nfrom scipy.stats import norm\r\nfrom scipy.stats import gamma\r\nfrom scipy.stats import expon\r\nfrom scipy.stats import poisson\r\nfrom scipy.stats import binom\r\nfrom scipy.stats import t \r\nimport plotly.express as px\r\nimport plotly.figure_factory as ff\r\n\r\nheart=pd.read_csv(r\"C:\\Users\\fredr\\Documents\\StatTool\\BZAN540\\Homework\\HW6\\HeartDisease.csv\") \r\nheart.columns\r\n\r\ntrain, test = train_test_split(heart[['x1', 'x2', 'x3', 'x4', 'x5','HeartDisease']], test_size=0.2)\r\ny_train=train['HeartDisease']\r\nx_train=train[['x1', 'x2', 'x3', 'x4', 'x5']]\r\n\r\nx_test=test[['x1', 'x2', 'x3', 'x4', 'x5']]\r\ny_test=test['HeartDisease']\r\n\r\n#boosting to predict heart disease \r\n\r\n#make expand grid function to get all combos of the parameters \r\ndef expandgrid(*itrs):\r\n product = list(itertools.product(*itrs))\r\n return {'Var{}'.format(i+1):[x[i] for x in product] for i in range(len(itrs))}\r\n\r\n#set the range for the parameter values:\r\nn_estimators=np.arange(300, 450, 50) #the number of trees to fit \r\nmax_depth=np.arange(3, 5, 1)\r\nmin_samples_split=np.arange(3,4,1)\r\nlearning_rate=np.arange(0.001,0.004,0.001)\r\na=expandgrid(n_estimators,max_depth, min_samples_split,learning_rate)\r\nparams=pd.DataFrame.from_dict(a)\r\nlen(params)\r\n\r\n#time the code ??? \r\n#looping through the possible parameters for the model and store the estimated validation rmse\r\nValAcc=list(range(0,len(params)))\r\nfor i in range(0,len(params)):\r\n scores = cross_val_score(HistGradientBoostingClassifier(min_samples_leaf=params['Var3'].iloc[i],\r\n max_depth=params['Var2'].iloc[i],\r\n learning_rate=params['Var4'].iloc[i],max_iter=params['Var1'].iloc[i]).fit(x_train, y_train), \r\n x_train, y_train, cv=4,scoring='accuracy')\r\n acc=st.mean(scores)\r\n ValAcc[i]=acc\r\n\r\nValAcc\r\nmax(ValAcc)\r\npars=list(params.iloc[ValAcc==max(ValAcc)].iloc[0])\r\npars.append(max(ValAcc))\r\npars\r\nbestPos=np.array(np.where(np.array(ValAcc)==max(ValAcc))).tolist()[0][0]\r\n#fit the best model on Train then predict on Test if mean acc close to val then fit on entire data \r\nbestPos\r\n\r\nbestMod=HistGradientBoostingClassifier(min_samples_leaf=params['Var3'].iloc[bestPos],\r\n max_depth=params['Var2'].iloc[bestPos],\r\n learning_rate=params['Var4'].iloc[bestPos],max_iter=params['Var1'].iloc[bestPos]).fit(x_train, y_train)\r\n\r\n#gets the predicted values on the test data \r\nbestMod.predict(x_test)\r\nlen(y_test[bestMod.predict(x_test)==y_test])/len(y_test) #67% acc on test \r\n#create a dataset with one row and each col is a ind var from model fit above, then input data per var to fill df then predict y on the values in this df \r\ndf_i=pd.DataFrame({'x1':np.mean(heart['x1']), 'x2':np.mean(heart['x2']),'x3':np.mean(heart['x3']),'x4':np.mean(heart['x4']),'x5':np.mean(heart['x5'])},index=[0])\r\n\r\nif(bestMod.predict(df_i)==0):\r\n print('Predicted: No Heart Disease')\r\nelse:\r\n print('Predicted: Has Heart Disease')\r\n\r\n\r\n#plot two densities centered on the mean of the var and the selected value of the var for all vars \r\n#start with treating each var as a normal distro then plot a density curve where the \r\n#mean is the mean of the var and another curve on same plot where the mean is the selected value from the input of a normal distro set sd to the sd of the var \r\n#for both in the plots, generate random vars the size of the data, except for history heart disease treat as beta with p=actuap prob for var and p=random value that is\r\n#greater than .5 \r\n\r\n#generates random values from a normal distro with mean=loc and sd=scale \r\nnorm.rvs(size=10000,loc=3,scale=8)\r\n#x1:\r\nx1=190\r\nmean=np.mean(heart['x1'])\r\nsd=np.std(heart['x1'])\r\nmeanx1_2=x1\r\nxActual=norm.rvs(size=len(heart),loc=mean,scale=sd)\r\nxInput=norm.rvs(size=len(heart),loc=meanx1_2,scale=sd)\r\n\r\ngroup_labels = ['actual','center_selected']\r\nhist_data=[xActual,xInput]\r\nfig = ff.create_distplot(hist_data,group_labels)\r\nfig.show()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from rest_framework.pagination import PageNumberPagination
class QuoteListPagination(PageNumberPagination):
page_size = 30
|
normal
|
{
"blob_id": "4245da12eb7f9dd08c863e368efbd0bcf0b8fa04",
"index": 6816,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass QuoteListPagination(PageNumberPagination):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass QuoteListPagination(PageNumberPagination):\n page_size = 30\n",
"step-4": "from rest_framework.pagination import PageNumberPagination\n\n\nclass QuoteListPagination(PageNumberPagination):\n page_size = 30\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def do_upgrade(env, ver, cursor):
"""Change schema name from taskboard_schema to agiletools_version
"""
cursor.execute('UPDATE system SET name=%s WHERE name=%s', (
'agiletools_version', 'taskboard_schema'))
<|reserved_special_token_1|>
from trac.db import DatabaseManager
def do_upgrade(env, ver, cursor):
"""Change schema name from taskboard_schema to agiletools_version
"""
cursor.execute('UPDATE system SET name=%s WHERE name=%s', (
'agiletools_version', 'taskboard_schema'))
<|reserved_special_token_1|>
from trac.db import DatabaseManager
def do_upgrade(env, ver, cursor):
"""Change schema name from taskboard_schema to agiletools_version
"""
cursor.execute('UPDATE system SET name=%s WHERE name=%s',
("agiletools_version", "taskboard_schema"))
|
flexible
|
{
"blob_id": "56ed5bb22d77f4d8c061f97d832a60ed9a106549",
"index": 5231,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef do_upgrade(env, ver, cursor):\n \"\"\"Change schema name from taskboard_schema to agiletools_version\n \"\"\"\n cursor.execute('UPDATE system SET name=%s WHERE name=%s', (\n 'agiletools_version', 'taskboard_schema'))\n",
"step-3": "from trac.db import DatabaseManager\n\n\ndef do_upgrade(env, ver, cursor):\n \"\"\"Change schema name from taskboard_schema to agiletools_version\n \"\"\"\n cursor.execute('UPDATE system SET name=%s WHERE name=%s', (\n 'agiletools_version', 'taskboard_schema'))\n",
"step-4": "from trac.db import DatabaseManager\n\ndef do_upgrade(env, ver, cursor):\n \"\"\"Change schema name from taskboard_schema to agiletools_version\n \"\"\"\n cursor.execute('UPDATE system SET name=%s WHERE name=%s',\n (\"agiletools_version\", \"taskboard_schema\"))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
main(sys.argv)
<|reserved_special_token_1|>
import sys
from ulang.runtime.main import main
main(sys.argv)
|
flexible
|
{
"blob_id": "e0c5498d9b18a6a32fcd2725ef4f6a1adaef6c68",
"index": 2098,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmain(sys.argv)\n",
"step-3": "import sys\nfrom ulang.runtime.main import main\nmain(sys.argv)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__all__ = [
'mesh_add_vertex_to_face_edge'
]
def mesh_add_vertex_to_face_edge(mesh, key, fkey, v):
"""Add an existing vertex of the mesh to an existing face.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh data structure.
key : hashable
The identifier of the vertex.
fkey : hashable
The identifier of the face.
v : hashable
The identifier of the vertex before which the new vertex should be added.
Notes
-----
The algorithm is merely there for convenience.
It does not check if the resulting mesh is still valid.
Examples
--------
Consider the following points and one face definition and the resulting mesh.
>>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]
>>> faces = [[0, 1, 2, 3]]
>>> mesh = Mesh.from_vertices_and_faces(points, faces)
>>> mesh.number_of_vertices()
5
>>> mesh.number_of_faces()
1
>>> mesh.face_degree(0)
4
>>> mesh.vertex_degree(4)
0
To add the isolated vertex to the single mesh face
>>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)
>>> mesh.face_degree(0)
5
>>> mesh.vertex_degree(4)
2
"""
vertices = mesh.face_vertices(fkey)
i = vertices.index(v)
u = vertices[i - 1]
vertices.insert(key, i - 1)
mesh.halfedge[u][key] = fkey
mesh.halfedge[key][v] = fkey
if u not in mesh.halfedge[key]:
mesh.halfedge[key][u] = None
if key not in mesh.halfedge[v]:
mesh.halfedge[v][key] = None
del mesh.halfedge[u][v]
if u in mesh.halfedge[v]:
del mesh.halfedge[v][u]
if (u, v) in mesh.edgedata:
del mesh.edgedata[u, v]
if (v, u) in mesh.edgedata:
del mesh.edgedata[v, u]
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
normal
|
{
"blob_id": "d9b6efce92e30267a9f992c4fea698fe14e0c3e4",
"index": 1398,
"step-1": "<mask token>\n\n\ndef mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n \"\"\"Add an existing vertex of the mesh to an existing face.\n\n Parameters\n ----------\n mesh : compas.datastructures.Mesh\n The mesh data structure.\n key : hashable\n The identifier of the vertex.\n fkey : hashable\n The identifier of the face.\n v : hashable\n The identifier of the vertex before which the new vertex should be added.\n\n Notes\n -----\n The algorithm is merely there for convenience.\n It does not check if the resulting mesh is still valid.\n\n Examples\n --------\n Consider the following points and one face definition and the resulting mesh.\n\n >>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]\n >>> faces = [[0, 1, 2, 3]]\n >>> mesh = Mesh.from_vertices_and_faces(points, faces)\n >>> mesh.number_of_vertices()\n 5\n >>> mesh.number_of_faces()\n 1\n >>> mesh.face_degree(0)\n 4\n >>> mesh.vertex_degree(4)\n 0\n\n To add the isolated vertex to the single mesh face\n\n >>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)\n >>> mesh.face_degree(0)\n 5\n >>> mesh.vertex_degree(4)\n 2\n\n \"\"\"\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n \"\"\"Add an existing vertex of the mesh to an existing face.\n\n Parameters\n ----------\n mesh : compas.datastructures.Mesh\n The mesh data structure.\n key : hashable\n The identifier of the vertex.\n fkey : hashable\n The identifier of the face.\n v : hashable\n The identifier of the vertex before which the new vertex should be added.\n\n Notes\n -----\n The algorithm is merely there for convenience.\n It does not check if the resulting mesh is still valid.\n\n Examples\n --------\n Consider the following points and one face definition and the resulting mesh.\n\n >>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]\n >>> faces = [[0, 1, 2, 3]]\n >>> mesh = Mesh.from_vertices_and_faces(points, faces)\n >>> mesh.number_of_vertices()\n 5\n >>> mesh.number_of_faces()\n 1\n >>> mesh.face_degree(0)\n 4\n >>> mesh.vertex_degree(4)\n 0\n\n To add the isolated vertex to the single mesh face\n\n >>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)\n >>> mesh.face_degree(0)\n 5\n >>> mesh.vertex_degree(4)\n 2\n\n \"\"\"\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]\n\n\nif __name__ == '__main__':\n pass\n",
"step-3": "<mask token>\n__all__ = ['mesh_add_vertex_to_face_edge']\n\n\ndef mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n \"\"\"Add an existing vertex of the mesh to an existing face.\n\n Parameters\n ----------\n mesh : compas.datastructures.Mesh\n The mesh data structure.\n key : hashable\n The identifier of the vertex.\n fkey : hashable\n The identifier of the face.\n v : hashable\n The identifier of the vertex before which the new vertex should be added.\n\n Notes\n -----\n The algorithm is merely there for convenience.\n It does not check if the resulting mesh is still valid.\n\n Examples\n --------\n Consider the following points and one face definition and the resulting mesh.\n\n >>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]\n >>> faces = [[0, 1, 2, 3]]\n >>> mesh = Mesh.from_vertices_and_faces(points, faces)\n >>> mesh.number_of_vertices()\n 5\n >>> mesh.number_of_faces()\n 1\n >>> mesh.face_degree(0)\n 4\n >>> mesh.vertex_degree(4)\n 0\n\n To add the isolated vertex to the single mesh face\n\n >>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)\n >>> mesh.face_degree(0)\n 5\n >>> mesh.vertex_degree(4)\n 2\n\n \"\"\"\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n__all__ = ['mesh_add_vertex_to_face_edge']\n\n\ndef mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n \"\"\"Add an existing vertex of the mesh to an existing face.\n\n Parameters\n ----------\n mesh : compas.datastructures.Mesh\n The mesh data structure.\n key : hashable\n The identifier of the vertex.\n fkey : hashable\n The identifier of the face.\n v : hashable\n The identifier of the vertex before which the new vertex should be added.\n\n Notes\n -----\n The algorithm is merely there for convenience.\n It does not check if the resulting mesh is still valid.\n\n Examples\n --------\n Consider the following points and one face definition and the resulting mesh.\n\n >>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]\n >>> faces = [[0, 1, 2, 3]]\n >>> mesh = Mesh.from_vertices_and_faces(points, faces)\n >>> mesh.number_of_vertices()\n 5\n >>> mesh.number_of_faces()\n 1\n >>> mesh.face_degree(0)\n 4\n >>> mesh.vertex_degree(4)\n 0\n\n To add the isolated vertex to the single mesh face\n\n >>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)\n >>> mesh.face_degree(0)\n 5\n >>> mesh.vertex_degree(4)\n 2\n\n \"\"\"\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\n\n__all__ = [\n 'mesh_add_vertex_to_face_edge'\n]\n\n\ndef mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n \"\"\"Add an existing vertex of the mesh to an existing face.\n\n Parameters\n ----------\n mesh : compas.datastructures.Mesh\n The mesh data structure.\n key : hashable\n The identifier of the vertex.\n fkey : hashable\n The identifier of the face.\n v : hashable\n The identifier of the vertex before which the new vertex should be added.\n\n Notes\n -----\n The algorithm is merely there for convenience.\n It does not check if the resulting mesh is still valid.\n\n Examples\n --------\n Consider the following points and one face definition and the resulting mesh.\n\n >>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]\n >>> faces = [[0, 1, 2, 3]]\n >>> mesh = Mesh.from_vertices_and_faces(points, faces)\n >>> mesh.number_of_vertices()\n 5\n >>> mesh.number_of_faces()\n 1\n >>> mesh.face_degree(0)\n 4\n >>> mesh.vertex_degree(4)\n 0\n\n To add the isolated vertex to the single mesh face\n\n >>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)\n >>> mesh.face_degree(0)\n 5\n >>> mesh.vertex_degree(4)\n 2\n\n \"\"\"\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == \"__main__\":\n pass\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('core',
'0020_festival_boxoffice_close_festival_boxoffice_open')]
operations = [migrations.AlterModelOptions(name='user', options={})]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('core',
'0020_festival_boxoffice_close_festival_boxoffice_open')]
operations = [migrations.AlterModelOptions(name='user', options={})]
<|reserved_special_token_1|>
# Generated by Django 4.0.5 on 2023-02-14 18:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0020_festival_boxoffice_close_festival_boxoffice_open'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={},
),
]
|
flexible
|
{
"blob_id": "e9bf5a40360d35f32bd2ad5aa404225f49895a14",
"index": 4221,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core',\n '0020_festival_boxoffice_close_festival_boxoffice_open')]\n operations = [migrations.AlterModelOptions(name='user', options={})]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core',\n '0020_festival_boxoffice_close_festival_boxoffice_open')]\n operations = [migrations.AlterModelOptions(name='user', options={})]\n",
"step-5": "# Generated by Django 4.0.5 on 2023-02-14 18:57\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0020_festival_boxoffice_close_festival_boxoffice_open'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='user',\n options={},\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import cv2
from DataTypes import FishPosition
class FishSensor(object):
def __init__(self):
self.cap = cv2.VideoCapture(0)
self.cap.set(3, 280)
self.cap.set(4, 192)
#cv2.namedWindow("image")
#lower_b, lower_g, lower_r = 0, 0, 80
lower_b, lower_g, lower_r = 0, 55, 130
#upper_b, upper_g, upper_r = 130, 75, 115
upper_b, upper_g, upper_r = 100, 145, 195
self.lower = np.array([lower_b, lower_g, lower_r], dtype='uint8')
self.upper = np.array([upper_b, upper_g, upper_r], dtype='uint8')
self.old_x, self.old_y = 0.0, 0.0
self.old_count = 0
def poll(self):
ret, frame = self.cap.read()
mask = cv2.inRange(frame, self.lower, self.upper)
idx_rows, idx_cols = np.where(mask)
if len(idx_rows > 0):
row = int(round(idx_rows.mean()))
col = int(round(idx_cols.mean()))
marked_frame = cv2.circle(frame, (col, row), 5, (0, 0, 255), -1)
x = float(col)/(280/2)-1.0
y = float(row)/(192/2)-1.0
self.old_x = x
self.old_y = y
self.old_count = 0
else:
if self.old_count > 5:
x = 0.0
y = 0.0
else:
x = self.old_x
y = self.old_y
self.old_count += 1
#cv2.imshow("image", frame)
#key = cv2.waitKey(1)
return FishPosition(x=x, y=y)
if __name__ == "__main__":
cap = cv2.VideoCapture(0)
cap.set(3, 280)
cap.set(4, 192)
def onClick(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print x, y, frame[y, x]
cv2.namedWindow("image")
cv2.setMouseCallback("image", onClick)
#lower_b, lower_g, lower_r = 0, 0, 80
lower_b, lower_g, lower_r = 0, 55, 130
#upper_b, upper_g, upper_r = 130, 75, 115
upper_b, upper_g, upper_r = 100, 145, 195
mode = 0
while True:
ret, frame = cap.read()
lower = np.array([lower_b, lower_g, lower_r], dtype='uint8')
upper = np.array([upper_b, upper_g, upper_r], dtype='uint8')
mask = cv2.inRange(frame, lower, upper)
idx_rows, idx_cols = np.where(mask)
if len(idx_rows > 0):
row = int(round(idx_rows.mean()))
col = int(round(idx_cols.mean()))
marked_frame = cv2.circle(frame, (col, row), 5, (0, 0, 255), -1)
print "%.3f, %.3f" % (float(col) / (280.0/2) - 1,
float(row) / (192.0/2) - 1)
#cv2.imshow("image", marked_frame)
else:
pass
#cv2.imshow("image", frame)
if mode:
output = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow("image", output)
else:
cv2.imshow("image", frame)
key = cv2.waitKey(1)
if key & 0xFF == ord('q'):
break
if key & 0xFF == ord('w'):
lower_b += 5
if key & 0xFF == ord('s'):
lower_b -= 5
if key & 0xFF == ord('e'):
lower_g += 5
if key & 0xFF == ord('d'):
lower_g -= 5
if key & 0xFF == ord('r'):
lower_r += 5
if key & 0xFF == ord('f'):
lower_r -= 5
if key & 0xFF == ord('t'):
upper_b += 5
if key & 0xFF == ord('g'):
upper_b -= 5
if key & 0xFF == ord('y'):
upper_g += 5
if key & 0xFF == ord('h'):
upper_g -= 5
if key & 0xFF == ord('u'):
upper_r += 5
if key & 0xFF == ord('j'):
upper_r -= 5
if key & 0xFF == ord('m'):
mode = 1 if mode == 0 else 0
if ord('a') <= (key & 0xFF) <= ord('z'):
print (lower_b, lower_g, lower_r), (upper_b, upper_g, upper_r)
cap.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "9cea27abebda10deefa9e05ddefa72c893b1eb18",
"index": 1676,
"step-1": "import numpy as np\nimport cv2\nfrom DataTypes import FishPosition\n\nclass FishSensor(object):\n def __init__(self):\n\t self.cap = cv2.VideoCapture(0)\n\t self.cap.set(3, 280)\n\t self.cap.set(4, 192)\n\n\t #cv2.namedWindow(\"image\")\n\n\t #lower_b, lower_g, lower_r = 0, 0, 80\n lower_b, lower_g, lower_r = 0, 55, 130\n\t #upper_b, upper_g, upper_r = 130, 75, 115\n\t upper_b, upper_g, upper_r = 100, 145, 195\n\t self.lower = np.array([lower_b, lower_g, lower_r], dtype='uint8')\n\t self.upper = np.array([upper_b, upper_g, upper_r], dtype='uint8')\n\n\t self.old_x, self.old_y = 0.0, 0.0\n self.old_count = 0\n\n def poll(self):\n ret, frame = self.cap.read()\n mask = cv2.inRange(frame, self.lower, self.upper)\n\n\tidx_rows, idx_cols = np.where(mask)\n\tif len(idx_rows > 0):\n\t\trow = int(round(idx_rows.mean()))\n\t\tcol = int(round(idx_cols.mean()))\n\t\tmarked_frame = cv2.circle(frame, (col, row), 5, (0, 0, 255), -1)\n\t\t\n\t x = float(col)/(280/2)-1.0\n y = float(row)/(192/2)-1.0\n\t\tself.old_x = x\n\t\tself.old_y = y\n self.old_count = 0\n\telse:\n\t\tif self.old_count > 5:\n\t\t\tx = 0.0\n\t\t\ty = 0.0\n\t\telse:\n\t\t\tx = self.old_x\n\t\t\ty = self.old_y\n self.old_count += 1\n\t\t\n\t#cv2.imshow(\"image\", frame)\n\t#key = cv2.waitKey(1)\n\treturn FishPosition(x=x, y=y)\n\nif __name__ == \"__main__\":\n cap = cv2.VideoCapture(0)\n cap.set(3, 280)\n cap.set(4, 192)\n\n def onClick(event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n print x, y, frame[y, x]\n\n cv2.namedWindow(\"image\")\n cv2.setMouseCallback(\"image\", onClick)\n\n #lower_b, lower_g, lower_r = 0, 0, 80\n lower_b, lower_g, lower_r = 0, 55, 130\n #upper_b, upper_g, upper_r = 130, 75, 115\n upper_b, upper_g, upper_r = 100, 145, 195\n mode = 0\n\n while True:\n ret, frame = cap.read()\n lower = np.array([lower_b, lower_g, lower_r], dtype='uint8')\n upper = np.array([upper_b, upper_g, upper_r], dtype='uint8')\n mask = cv2.inRange(frame, lower, upper)\n\n\tidx_rows, idx_cols = np.where(mask)\n\tif len(idx_rows > 0):\n\t\trow = int(round(idx_rows.mean()))\n\t\tcol = int(round(idx_cols.mean()))\n\t\tmarked_frame = cv2.circle(frame, (col, row), 5, (0, 0, 255), -1)\n\t\tprint \"%.3f, %.3f\" % (float(col) / (280.0/2) - 1,\n\t\t\t\t float(row) / (192.0/2) - 1)\n\t\t#cv2.imshow(\"image\", marked_frame)\n\telse:\n\t\tpass\n\t\t#cv2.imshow(\"image\", frame)\n\t\n\tif mode:\n\t\toutput = cv2.bitwise_and(frame, frame, mask=mask)\n\t\tcv2.imshow(\"image\", output)\n\telse:\n\t\tcv2.imshow(\"image\", frame)\n\n\tkey = cv2.waitKey(1)\n\tif key & 0xFF == ord('q'):\n\t\tbreak\n\tif key & 0xFF == ord('w'):\n\t\tlower_b += 5\n\tif key & 0xFF == ord('s'):\n\t\tlower_b -= 5\n\tif key & 0xFF == ord('e'):\n\t\tlower_g += 5\n\tif key & 0xFF == ord('d'):\n\t\tlower_g -= 5\n\tif key & 0xFF == ord('r'):\n\t\tlower_r += 5\n\tif key & 0xFF == ord('f'):\n\t\tlower_r -= 5\n\tif key & 0xFF == ord('t'):\n\t\tupper_b += 5\n\tif key & 0xFF == ord('g'):\n\t\tupper_b -= 5\n\tif key & 0xFF == ord('y'):\n\t\tupper_g += 5\n\tif key & 0xFF == ord('h'):\n\t\tupper_g -= 5\n\tif key & 0xFF == ord('u'):\n\t\tupper_r += 5\n\tif key & 0xFF == ord('j'):\n\t\tupper_r -= 5\n\tif key & 0xFF == ord('m'):\n\t\tmode = 1 if mode == 0 else 0\n\tif ord('a') <= (key & 0xFF) <= ord('z'):\n\t\tprint (lower_b, lower_g, lower_r), (upper_b, upper_g, upper_r)\n\n cap.release()\n cv2.destroyAllWindows()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.urls import path
from . import views # 현재 패키지에서 views 모듈을 가져옴
urlpatterns = [
path('', views.home, name='home'),
path('ppt1',views.ppt1,name='ppt1'),
path('ppt2',views.ppt2,name='ppt2'),
]
|
normal
|
{
"blob_id": "9db1887c5379623687d1dea343d72122bab66303",
"index": 2143,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.home, name='home'), path('ppt1', views.ppt1,\n name='ppt1'), path('ppt2', views.ppt2, name='ppt2')]\n",
"step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('', views.home, name='home'), path('ppt1', views.ppt1,\n name='ppt1'), path('ppt2', views.ppt2, name='ppt2')]\n",
"step-4": "from django.urls import path\n\nfrom . import views # 현재 패키지에서 views 모듈을 가져옴\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('ppt1',views.ppt1,name='ppt1'),\n path('ppt2',views.ppt2,name='ppt2'),\n\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from rest_framework import serializers
from django.contrib import auth
from rest_framework.exceptions import ValidationError
from django.contrib.auth.password_validation import validate_password
from django.utils.translation import gettext as _
from rest_users.utils.api import _build_initial_user
User = auth.get_user_model()
class LoginUserSerializer(serializers.Serializer):
login = serializers.CharField()
password = serializers.CharField()
def get_authenticated_user(self):
login, password = self.validated_data['login'], self.validated_data['password']
user = None
login_field_names = [User.USERNAME_FIELD, User.EMAIL_FIELD]
for field_name in login_field_names:
kwargs = {
field_name: login,
'password': password,
}
user = auth.authenticate(**kwargs)
if user:
break
return user
class LogoutSerializer(serializers.Serializer):
revoke_token = serializers.BooleanField(default=False)
class UserGetProfileSerializer(serializers.ModelSerializer):
class Meta:
model = User
exclude = ('password',)
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = User
exclude = ('password',)
class RegisterUserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
def validate_password(self, password):
user = _build_initial_user(self.initial_data)
validate_password(password, user=user)
return password
def get_fields(self):
fields = super().get_fields()
fields['password_confirm'] = serializers.CharField(write_only=True)
return fields
def validate(self, attrs):
if attrs['password'] != attrs['password_confirm']:
raise ValidationError(_("Passwords don't match"))
return attrs
def create(self, validated_data):
data = validated_data.copy()
del data['password_confirm']
return self.Meta.model.objects.create_user(**data)
class ChangePasswordSerializer(serializers.Serializer):
old_password = serializers.CharField()
password = serializers.CharField()
def validate_old_password(self, old_password):
user = self.context['request'].user
if not user.check_password(old_password):
raise serializers.ValidationError(_("Old password is not correct"))
return old_password
def validate_password(self, password):
user = self.context['request'].user
validate_password(password, user=user)
return password
def get_fields(self):
fields = super().get_fields()
fields['password_confirm'] = serializers.CharField()
return fields
def validate(self, attrs):
if attrs['password'] != attrs['password_confirm']:
raise serializers.ValidationError(_("Passwords don't match"))
return attrs
|
normal
|
{
"blob_id": "88e34878cdad908ed4ac30da82355aaa46ed719b",
"index": 5429,
"step-1": "<mask token>\n\n\nclass RegisterUserSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n def validate_password(self, password):\n user = _build_initial_user(self.initial_data)\n validate_password(password, user=user)\n return password\n\n def get_fields(self):\n fields = super().get_fields()\n fields['password_confirm'] = serializers.CharField(write_only=True)\n return fields\n\n def validate(self, attrs):\n if attrs['password'] != attrs['password_confirm']:\n raise ValidationError(_(\"Passwords don't match\"))\n return attrs\n <mask token>\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField()\n password = serializers.CharField()\n\n def validate_old_password(self, old_password):\n user = self.context['request'].user\n if not user.check_password(old_password):\n raise serializers.ValidationError(_('Old password is not correct'))\n return old_password\n\n def validate_password(self, password):\n user = self.context['request'].user\n validate_password(password, user=user)\n return password\n\n def get_fields(self):\n fields = super().get_fields()\n fields['password_confirm'] = serializers.CharField()\n return fields\n\n def validate(self, attrs):\n if attrs['password'] != attrs['password_confirm']:\n raise serializers.ValidationError(_(\"Passwords don't match\"))\n return attrs\n",
"step-2": "<mask token>\n\n\nclass UserGetProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n exclude = 'password',\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n exclude = 'password',\n\n\nclass RegisterUserSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n def validate_password(self, password):\n user = _build_initial_user(self.initial_data)\n validate_password(password, user=user)\n return password\n\n def get_fields(self):\n fields = super().get_fields()\n fields['password_confirm'] = serializers.CharField(write_only=True)\n return fields\n\n def validate(self, attrs):\n if attrs['password'] != attrs['password_confirm']:\n raise ValidationError(_(\"Passwords don't match\"))\n return attrs\n\n def create(self, validated_data):\n data = validated_data.copy()\n del data['password_confirm']\n return self.Meta.model.objects.create_user(**data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField()\n password = serializers.CharField()\n\n def validate_old_password(self, old_password):\n user = self.context['request'].user\n if not user.check_password(old_password):\n raise serializers.ValidationError(_('Old password is not correct'))\n return old_password\n\n def validate_password(self, password):\n user = self.context['request'].user\n validate_password(password, user=user)\n return password\n\n def get_fields(self):\n fields = super().get_fields()\n fields['password_confirm'] = serializers.CharField()\n return fields\n\n def validate(self, attrs):\n if attrs['password'] != attrs['password_confirm']:\n raise serializers.ValidationError(_(\"Passwords don't match\"))\n return attrs\n",
"step-3": "<mask token>\n\n\nclass LogoutSerializer(serializers.Serializer):\n <mask token>\n\n\nclass UserGetProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n exclude = 'password',\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n exclude = 'password',\n\n\nclass RegisterUserSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n def validate_password(self, password):\n user = _build_initial_user(self.initial_data)\n validate_password(password, user=user)\n return password\n\n def get_fields(self):\n fields = super().get_fields()\n fields['password_confirm'] = serializers.CharField(write_only=True)\n return fields\n\n def validate(self, attrs):\n if attrs['password'] != attrs['password_confirm']:\n raise ValidationError(_(\"Passwords don't match\"))\n return attrs\n\n def create(self, validated_data):\n data = validated_data.copy()\n del data['password_confirm']\n return self.Meta.model.objects.create_user(**data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField()\n password = serializers.CharField()\n\n def validate_old_password(self, old_password):\n user = self.context['request'].user\n if not user.check_password(old_password):\n raise serializers.ValidationError(_('Old password is not correct'))\n return old_password\n\n def validate_password(self, password):\n user = self.context['request'].user\n validate_password(password, user=user)\n return password\n\n def get_fields(self):\n fields = super().get_fields()\n fields['password_confirm'] = serializers.CharField()\n return fields\n\n def validate(self, attrs):\n if attrs['password'] != attrs['password_confirm']:\n raise serializers.ValidationError(_(\"Passwords don't match\"))\n return attrs\n",
"step-4": "<mask token>\n\n\nclass LogoutSerializer(serializers.Serializer):\n revoke_token = serializers.BooleanField(default=False)\n\n\nclass UserGetProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n exclude = 'password',\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n exclude = 'password',\n\n\nclass RegisterUserSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = User\n fields = '__all__'\n\n def validate_password(self, password):\n user = _build_initial_user(self.initial_data)\n validate_password(password, user=user)\n return password\n\n def get_fields(self):\n fields = super().get_fields()\n fields['password_confirm'] = serializers.CharField(write_only=True)\n return fields\n\n def validate(self, attrs):\n if attrs['password'] != attrs['password_confirm']:\n raise ValidationError(_(\"Passwords don't match\"))\n return attrs\n\n def create(self, validated_data):\n data = validated_data.copy()\n del data['password_confirm']\n return self.Meta.model.objects.create_user(**data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField()\n password = serializers.CharField()\n\n def validate_old_password(self, old_password):\n user = self.context['request'].user\n if not user.check_password(old_password):\n raise serializers.ValidationError(_('Old password is not correct'))\n return old_password\n\n def validate_password(self, password):\n user = self.context['request'].user\n validate_password(password, user=user)\n return password\n\n def get_fields(self):\n fields = super().get_fields()\n fields['password_confirm'] = serializers.CharField()\n return fields\n\n def validate(self, attrs):\n if attrs['password'] != attrs['password_confirm']:\n raise serializers.ValidationError(_(\"Passwords don't match\"))\n return attrs\n",
"step-5": "from rest_framework import serializers\nfrom django.contrib import auth\nfrom rest_framework.exceptions import ValidationError\nfrom django.contrib.auth.password_validation import validate_password\nfrom django.utils.translation import gettext as _\nfrom rest_users.utils.api import _build_initial_user\n\nUser = auth.get_user_model()\n\n\nclass LoginUserSerializer(serializers.Serializer):\n login = serializers.CharField()\n password = serializers.CharField()\n\n def get_authenticated_user(self):\n login, password = self.validated_data['login'], self.validated_data['password']\n user = None\n login_field_names = [User.USERNAME_FIELD, User.EMAIL_FIELD]\n\n for field_name in login_field_names:\n kwargs = {\n field_name: login,\n 'password': password,\n }\n user = auth.authenticate(**kwargs)\n if user:\n break\n\n return user\n\n\nclass LogoutSerializer(serializers.Serializer):\n revoke_token = serializers.BooleanField(default=False)\n\n\nclass UserGetProfileSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n exclude = ('password',)\n\n\nclass UserProfileSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n exclude = ('password',)\n\n\nclass RegisterUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = '__all__'\n\n def validate_password(self, password):\n user = _build_initial_user(self.initial_data)\n validate_password(password, user=user)\n return password\n\n def get_fields(self):\n fields = super().get_fields()\n fields['password_confirm'] = serializers.CharField(write_only=True)\n return fields\n\n def validate(self, attrs):\n if attrs['password'] != attrs['password_confirm']:\n raise ValidationError(_(\"Passwords don't match\"))\n return attrs\n\n def create(self, validated_data):\n data = validated_data.copy()\n del data['password_confirm']\n return self.Meta.model.objects.create_user(**data)\n\n\nclass ChangePasswordSerializer(serializers.Serializer):\n old_password = serializers.CharField()\n password = serializers.CharField()\n\n def validate_old_password(self, old_password):\n user = self.context['request'].user\n if not user.check_password(old_password):\n raise serializers.ValidationError(_(\"Old password is not correct\"))\n return old_password\n\n def validate_password(self, password):\n user = self.context['request'].user\n validate_password(password, user=user)\n return password\n\n def get_fields(self):\n fields = super().get_fields()\n fields['password_confirm'] = serializers.CharField()\n return fields\n\n def validate(self, attrs):\n if attrs['password'] != attrs['password_confirm']:\n raise serializers.ValidationError(_(\"Passwords don't match\"))\n return attrs\n\n",
"step-ids": [
10,
13,
14,
15,
21
]
}
|
[
10,
13,
14,
15,
21
] |
"""Identifying Antecedent Pronoun"""
from question import Question,Packet
qdict={
"correct pronoun-antecedent agreement":[
"<u>He</u> came home to <u>his</u> own car.",
"<u>He</u> found <u>his</u> sneakers in the garage.",
"<u>Harry</u> gave <u>himself</u> a baseball for Christmas.",
"<u>Jill</u> found <u>her</u> missing sock on top of the dresser.",
"<u>The man named Voldemort</u> gave the girl named Hermione <u>his</u> own surprising gift for Christmas.",
"<u>The boy</u> gave the girl <u>his</u> tiny little pot for Christmas.",
"<u>They</u> found <u>themselves</u> in the midst of a great struggle with Greyback.",
"<u>The man named Voldemort</u> discovered that he held the secret to <u>his</u> success in his own hands.",
"<u>The man named Voldemort</u> hated <u>himself</u> after Harry defeated him.",
"The man named Voldemort found his wand to be too weak for Dumbledore.",
"The man named Voldemort found his wand in need of serious repair.",
"We found ourselves in the midst of a huge explosion.",
"I found myself in a real fit of pain.",
"Somebody has left their bag on the floor.",
"A can of lima beans sits on its shelf.",
"Josh and Jill made their presentation Monday.",
"Josh and Fiona made their presentation yesterday.",
"On Tuesday, Gandalf and Bilbo made their speech.",
"The jury read its verdict.",
"The crowd found its home inside the tree.",
"The flock went its own way for the summer.",
"Jury members gave their individual opinions.",
"The flocks gave their quaks in agreement with the jury.",
"The school had its roof repaired over the summer.",
"The swarm of bees had its nest inside Greyback's werewolf home.",
"The herd of cattle gathered into its cramp little barn for the night.",
"The two boys who owned that <u>home</u> found fortune inside one of <u>its</u> rooms.",
"The children, who were sometimes happy, had their own rooms.",
"They were so bored with the lecture, <u>they</u> found themselves drooling on <u>their</u> own homework.",
],
"incorrect pronoun-antecedent agreement":[
"The boy gave the girl its tiny little pot for Christmas.",
"He found yourself sneakers in the garage.",
"They found them sneakers to be in the locker.",
"He gave themselves a baseball outside the locker.",
"They gave himself something fun to do during the lecture.",
"The man named Voldemort gave the girl named Hermione their own surprising gift for Christmas.",
"The man named Voldemort discovered that he held the secret to her success in her own hands.",
"The man named Voldemort hated myself after Harry defeated them.",
"The man named Voldemort found herself to be too weak for Dumbledore.",
"The man named Voldemort found yourself in need of serious repair.",
"President Lincoln delivered her Gettysburg Address in 1863.",
"A can of pinto beans sits on it's shelf.",
"Josh and Jill made his presentation Monday.",
"Josh and Jane made her presentation yesterday.",
"On Tuesday, Tom and Mr Riddle made his speech.",
"The jury read their verdict.",
"The crowd found their home inside the tree.",
"The flock went their own way for the summer.",
"Jury members gave his individual opinions.",
"The flocks gave its quaks in agreement with the jury.",
"The school had their roof repaired over the summer.",
"The swarm of bees had their nest inside Greyback's werewolf home.",
"The herd of cattle gathered into their cramp barn for the night.",
"The two <u>boys</u> who owned that home found fortune inside one of <u>his</u> own rooms.",
"The two <u>boys</u> who owned that home found fortune inside one of <u>her</u> own rooms.",
"The <u>children</u>, who were sometimes happy, had <u>its</u> own rooms.",
"They were so bored with the lecture, <u>they</u> found themselves drooling on <u>his</u> own homework.",
"He was so tired <u>he</u> fell asleep on <u>their</u> own chair.",
"<u>She</u> was so tired <u>he</u> fell asleep on his own chair.",
"He was so tired <u>he</u> fell asleep on <u>her</u> own chair.",
"They found himself strong in the face of Greyback.",
]
}
def make_packet(number=1):
return Packet([Question(x,qdict=qdict,qsection="Antecedent Agreement") for x in qdict.keys()]).make_packet(number)
if __name__=="__main__":
print "testing..."
assert [Question(x,qdict=qdict,qsection="Antecedent Agreement") for x in qdict.keys()][0].get_Question()
print make_packet(10)
|
normal
|
{
"blob_id": "94b1e0280eff165f63e117969d5e1bf9d1e35193",
"index": 1598,
"step-1": "\"\"\"Identifying Antecedent Pronoun\"\"\"\nfrom question import Question,Packet\n\nqdict={\n\"correct pronoun-antecedent agreement\":[\n\"<u>He</u> came home to <u>his</u> own car.\",\n\"<u>He</u> found <u>his</u> sneakers in the garage.\",\n\"<u>Harry</u> gave <u>himself</u> a baseball for Christmas.\",\n\"<u>Jill</u> found <u>her</u> missing sock on top of the dresser.\",\n\"<u>The man named Voldemort</u> gave the girl named Hermione <u>his</u> own surprising gift for Christmas.\",\n\"<u>The boy</u> gave the girl <u>his</u> tiny little pot for Christmas.\",\n\"<u>They</u> found <u>themselves</u> in the midst of a great struggle with Greyback.\",\n\"<u>The man named Voldemort</u> discovered that he held the secret to <u>his</u> success in his own hands.\",\n\"<u>The man named Voldemort</u> hated <u>himself</u> after Harry defeated him.\",\n\"The man named Voldemort found his wand to be too weak for Dumbledore.\",\n\"The man named Voldemort found his wand in need of serious repair.\",\n\"We found ourselves in the midst of a huge explosion.\",\n\"I found myself in a real fit of pain.\",\n\"Somebody has left their bag on the floor.\",\n\"A can of lima beans sits on its shelf.\",\n\"Josh and Jill made their presentation Monday.\",\n\"Josh and Fiona made their presentation yesterday.\",\n\"On Tuesday, Gandalf and Bilbo made their speech.\",\n\"The jury read its verdict.\",\n\"The crowd found its home inside the tree.\",\n\"The flock went its own way for the summer.\",\n\"Jury members gave their individual opinions.\",\n\"The flocks gave their quaks in agreement with the jury.\",\n\"The school had its roof repaired over the summer.\",\n\"The swarm of bees had its nest inside Greyback's werewolf home.\",\n\"The herd of cattle gathered into its cramp little barn for the night.\",\n\"The two boys who owned that <u>home</u> found fortune inside one of <u>its</u> rooms.\",\n\"The children, who were sometimes happy, had their own rooms.\",\n\"They were so bored with the lecture, <u>they</u> found themselves drooling on <u>their</u> own homework.\",\n],\n\"incorrect pronoun-antecedent agreement\":[\n\"The boy gave the girl its tiny little pot for Christmas.\",\n\"He found yourself sneakers in the garage.\",\n\"They found them sneakers to be in the locker.\",\n\"He gave themselves a baseball outside the locker.\",\n\"They gave himself something fun to do during the lecture.\",\n\"The man named Voldemort gave the girl named Hermione their own surprising gift for Christmas.\",\n\"The man named Voldemort discovered that he held the secret to her success in her own hands.\",\n\"The man named Voldemort hated myself after Harry defeated them.\",\n\"The man named Voldemort found herself to be too weak for Dumbledore.\",\n\"The man named Voldemort found yourself in need of serious repair.\",\n\"President Lincoln delivered her Gettysburg Address in 1863.\",\n\"A can of pinto beans sits on it's shelf.\",\n\"Josh and Jill made his presentation Monday.\",\n\"Josh and Jane made her presentation yesterday.\",\n\"On Tuesday, Tom and Mr Riddle made his speech.\",\n\"The jury read their verdict.\",\n\"The crowd found their home inside the tree.\",\n\"The flock went their own way for the summer.\",\n\"Jury members gave his individual opinions.\",\n\"The flocks gave its quaks in agreement with the jury.\",\n\"The school had their roof repaired over the summer.\",\n\"The swarm of bees had their nest inside Greyback's werewolf home.\",\n\"The herd of cattle gathered into their cramp barn for the night.\",\n\"The two <u>boys</u> who owned that home found fortune inside one of <u>his</u> own rooms.\",\n\"The two <u>boys</u> who owned that home found fortune inside one of <u>her</u> own rooms.\",\n\"The <u>children</u>, who were sometimes happy, had <u>its</u> own rooms.\",\n\"They were so bored with the lecture, <u>they</u> found themselves drooling on <u>his</u> own homework.\",\n\"He was so tired <u>he</u> fell asleep on <u>their</u> own chair.\",\n\"<u>She</u> was so tired <u>he</u> fell asleep on his own chair.\",\n\"He was so tired <u>he</u> fell asleep on <u>her</u> own chair.\",\n\"They found himself strong in the face of Greyback.\",\n]\n}\ndef make_packet(number=1):\n\treturn Packet([Question(x,qdict=qdict,qsection=\"Antecedent Agreement\") for x in qdict.keys()]).make_packet(number)\n\nif __name__==\"__main__\":\n\tprint \"testing...\"\n\tassert [Question(x,qdict=qdict,qsection=\"Antecedent Agreement\") for x in qdict.keys()][0].get_Question()\n\tprint make_packet(10)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def install(package):
if hasattr(pip, 'main'):
pip.main(['install', package])
else:
pip._internal.main(['install', package])
<|reserved_special_token_0|>
def get_gdrive_service():
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json'
, SCOPES)
creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('drive', 'v3', credentials=creds)
def downloadFile(id, name):
service = get_gdrive_service()
request = service.files().get_media(fileId=id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
with io.open('.' + '/' + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
<|reserved_special_token_0|>
def check_duplicate_image(items):
map = {}
image_name_list = []
duplicate_image = []
for item in items:
file_type = item['mimeType']
if file_type == 'image/jpeg':
image_name_list.append(item['name'])
value = []
value.append(item['name'])
value.append(item['webViewLink'])
map[item['id']] = value
csv_map[item['name']] = item['webViewLink']
downloadFile(item['id'], item['name'])
duplicate_image = duplicate_image_list(image_name_list)
return duplicate_image
def renameFile(service, items, newName):
count = 1
for item in items:
id = item['id']
name = item['name']
mime_type = item['mimeType']
file = service.files().get(fileId=id).execute()
del file['id']
if 'jpeg' in mime_type:
file['name'] = newName + str(count) + '.jpg'
if 'png' in mime_type:
file['name'] = newName + str(count) + '.png'
updated_file = service.files().update(fileId=id, body=file).execute()
count = count + 1
<|reserved_special_token_0|>
def list_files(items, service):
folder_count = 0
image_count = 0
imglist = []
count = 0
testtechm_id = ''
nm_name = []
img_count = []
list_all_folder_name = []
rows = []
overview_map = {}
img_nm = 0
for item in items:
name = item['name']
mime_type = item['mimeType']
if name == 'Test Techm':
testtechm_id = item['parents'][0]
for item in items:
id = item['id']
name = item['name']
mime_type = item['mimeType']
if mime_type == 'application/vnd.google-apps.folder':
folder_count = folder_count + 1
if mime_type == 'image/jpeg':
image_count = image_count + 1
if mime_type == 'application/vnd.google-apps.folder' and item['parents'
][0] == testtechm_id:
list_all_folder_name.append(item['name'])
name1 = count_image(id)
nm_name.append(name1)
img_count.append(len(name1))
overview_map[item['name']] = name1
rows.append((id, name, mime_type, folder_count))
imglist.append(count)
rows.append((id, name, mime_type, folder_count))
lt, duplicate_ct = check_duplicate_image_new(items)
duplicateImagehtml(folder_count, image_count, duplicate_ct, items)
draw_chart_create_report(list_all_folder_name, image_count,
duplicate_ct, overview_map, folder_count)
def createDeviceCSV():
fileName = 'DuplicateImage.csv'
with open(fileName, 'w') as csvFile:
writer = csv.writer(csvFile)
row = ['Image Name', 'Image Url']
writer.writerow(row)
count = 0
for k, v in csv_map.items():
row = [k, v]
writer.writerow(row)
count = count + 1
csvFile.close()
def duplicateImagehtml(folder_count, image_count, duplicate_ct, items):
uri = []
map1, count = check_duplicate_image_new(items)
for k, v in map1.items():
name_url = []
name_url.append(k)
name_url.append(str(len(v)))
name_url.append(str(v))
uri.append(name_url)
fb = open('duplicateData.html', 'w')
message = """ <html> <head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['table']});
google.charts.setOnLoadCallback(drawTable);
function drawTable() {
var data3 = new google.visualization.DataTable();
data3.addColumn('string', 'Name');
data3.addColumn('string', 'Count');
data3.addRows([
['Total Folders', '""" + str(folder_count) + """'],
['Total Images', '""" + str(image_count) + """'],
['Duplicate Images', '""" + str(duplicate_ct) + """']]);
var table2 = new google.visualization.Table(document.getElementById('table_div_base'));
table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});
var data = new google.visualization.DataTable();
data.addColumn('string', 'Image Name');
data.addColumn('string', 'Image Count');
data.addColumn('string', 'Image Url');
data.addRows(""" + str(uri) + """);
var table = new google.visualization.Table(document.getElementById('table_div'));
table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});
}
</script>
</head>
<body><h2 style="text-align: center">Google Drive Summary Table</h2>
<div id="table_div_base" style="width: 100%; height: 200px; display:inline-block;border-style: solid"></div>
<h2 style="text-align: center" >List of Duplicate Image</h2>
<div id="table_div" style="width: 100%; height: 500px; display:inline-block;border-style: solid"></div>
</body></html>"""
fb.write(message)
fb.close()
print('Duplicate image data preparing.. ')
def draw_chart_create_report(folder_count, image_count, duplicate_ct, map,
folder_count_real):
fb = open('gDriveOverview.html', 'w')
values = list(map.values())
newlist = []
folder_name = list(map.keys())
total_image_count = []
duplicate_image_count_in_folder = []
for v in values:
newlist.append(duplicate_image_list(v))
total_image_count.append(len(v))
for n in newlist:
duplicate_image_count_in_folder.append(len(n))
m1 = """<html>
<head>
<h1 style ="color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;">Google Drive Data Overview</h1>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['bar','corechart','table']});
google.charts.setOnLoadCallback(drawChart);
function drawChart() {
var paiData = google.visualization.arrayToDataTable([
['Drive', 'Drive Data'],
['Total Images', """ + str(image_count) + """],
['Total duplicate Images', """ + str(duplicate_ct) + """],
['Total Folder', """ + str(folder_count_real) + """]
]);
var paiOptions = {
title: 'Google Drive Overview'
};
var chart = new google.visualization.PieChart(document.getElementById('piechart'));
chart.draw(paiData, paiOptions);
var barData = google.visualization.arrayToDataTable("""
fb.write(m1)
barchart_data = []
barchart_data.append(['Folders', 'Total no of Images',
'Total no of duplicate Images'])
for i in range(len(values)):
item_list = []
item_list.append(folder_count[i])
item_list.append(total_image_count[i])
item_list.append(duplicate_image_count_in_folder[i])
barchart_data.append(item_list)
fb.write(m1)
m3 = str(barchart_data) + """);
var barOptions = {
chart: { title: 'Google Drive Folderwise Overview',
subtitle: 'This report is created on '+new Date(),
}};
var chart = new google.charts.Bar(document.getElementById('bar_chart'));
chart.draw(barData, google.charts.Bar.convertOptions(barOptions));
}
</script>
</head>
<body>
<div style="width:100%; margin:0px auto;">
<div id="piechart" style="width: 900px; height: 500px; display:inline-block;"></div>
<div id="bar_chart" style="width: 900px; height: 500px; display:inline-block;"></div>
</div>
<div>
<h2>
<p style="float:right;color:red;">** <a href="duplicateData.html" target="_blank">Click here to know more about duplicate image data</a></p>
</h2></div></body></html>
"""
fb.write(m3)
fb.close()
print('Bar and Pie chart creating.... ')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def install(package):
if hasattr(pip, 'main'):
pip.main(['install', package])
else:
pip._internal.main(['install', package])
<|reserved_special_token_0|>
def get_gdrive_service():
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json'
, SCOPES)
creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('drive', 'v3', credentials=creds)
def downloadFile(id, name):
service = get_gdrive_service()
request = service.files().get_media(fileId=id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
with io.open('.' + '/' + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
<|reserved_special_token_0|>
def check_duplicate_image_new(items):
print('Images is loading to memory..')
map = {}
list = []
message = set()
duplicate_image = []
final_result = {}
if not items:
print('No files found.')
else:
for item in items:
if item['mimeType'] == 'image/jpeg':
list.append(item['name'])
value = []
value.append(item['name'])
value.append(item['webViewLink'])
if item['name'] in map:
val = set()
val.add(item['webViewLink'])
map[item['name']] = item['webViewLink']
else:
map[item['name']] = item['webViewLink']
downloadFile(item['id'], item['name'])
match = []
flag = False
for i in range(len(list) - 1):
temp = []
dp_count = 0
flag = False
if list[i] not in match:
flag = True
for j in range(i + 1, len(list)):
istrue = is_duplicate(list[i], list[j])
if istrue == True:
dp_count = dp_count + 1
temp.append(list[j])
if list[j] not in match:
match.append(list[j])
if list[i] not in match:
match.append(list[i])
if len(match) == 0:
match.append(list[i])
match.append(list[j])
if flag == True and dp_count != 0:
final_result[list[i]] = temp
m = {}
tdct = 0
for x, y in final_result.items():
res = y
tdct = tdct + len(res)
s = set()
for i in res:
for item in items:
if item['mimeType'] == 'image/jpeg':
if item['name'] == i:
s.add(item['webViewLink'])
m[x] = s
return m, tdct
def duplicate_image_list(imagelist):
dup_list = []
if len(imagelist) >= 1:
for i in range(len(imagelist) - 1):
count = 0
l = []
for j in range(i + 1, len(imagelist)):
image1 = cv2.imread(imagelist[i])
image2 = cv2.imread(imagelist[j])
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference)
if result is True:
l.append(imagelist[j])
count = count + 1
dup_list.append(imagelist[i])
except:
i = 0
return dup_list
<|reserved_special_token_0|>
def check_duplicate_image(items):
map = {}
image_name_list = []
duplicate_image = []
for item in items:
file_type = item['mimeType']
if file_type == 'image/jpeg':
image_name_list.append(item['name'])
value = []
value.append(item['name'])
value.append(item['webViewLink'])
map[item['id']] = value
csv_map[item['name']] = item['webViewLink']
downloadFile(item['id'], item['name'])
duplicate_image = duplicate_image_list(image_name_list)
return duplicate_image
def renameFile(service, items, newName):
count = 1
for item in items:
id = item['id']
name = item['name']
mime_type = item['mimeType']
file = service.files().get(fileId=id).execute()
del file['id']
if 'jpeg' in mime_type:
file['name'] = newName + str(count) + '.jpg'
if 'png' in mime_type:
file['name'] = newName + str(count) + '.png'
updated_file = service.files().update(fileId=id, body=file).execute()
count = count + 1
<|reserved_special_token_0|>
def list_files(items, service):
folder_count = 0
image_count = 0
imglist = []
count = 0
testtechm_id = ''
nm_name = []
img_count = []
list_all_folder_name = []
rows = []
overview_map = {}
img_nm = 0
for item in items:
name = item['name']
mime_type = item['mimeType']
if name == 'Test Techm':
testtechm_id = item['parents'][0]
for item in items:
id = item['id']
name = item['name']
mime_type = item['mimeType']
if mime_type == 'application/vnd.google-apps.folder':
folder_count = folder_count + 1
if mime_type == 'image/jpeg':
image_count = image_count + 1
if mime_type == 'application/vnd.google-apps.folder' and item['parents'
][0] == testtechm_id:
list_all_folder_name.append(item['name'])
name1 = count_image(id)
nm_name.append(name1)
img_count.append(len(name1))
overview_map[item['name']] = name1
rows.append((id, name, mime_type, folder_count))
imglist.append(count)
rows.append((id, name, mime_type, folder_count))
lt, duplicate_ct = check_duplicate_image_new(items)
duplicateImagehtml(folder_count, image_count, duplicate_ct, items)
draw_chart_create_report(list_all_folder_name, image_count,
duplicate_ct, overview_map, folder_count)
def createDeviceCSV():
fileName = 'DuplicateImage.csv'
with open(fileName, 'w') as csvFile:
writer = csv.writer(csvFile)
row = ['Image Name', 'Image Url']
writer.writerow(row)
count = 0
for k, v in csv_map.items():
row = [k, v]
writer.writerow(row)
count = count + 1
csvFile.close()
def duplicateImagehtml(folder_count, image_count, duplicate_ct, items):
uri = []
map1, count = check_duplicate_image_new(items)
for k, v in map1.items():
name_url = []
name_url.append(k)
name_url.append(str(len(v)))
name_url.append(str(v))
uri.append(name_url)
fb = open('duplicateData.html', 'w')
message = """ <html> <head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['table']});
google.charts.setOnLoadCallback(drawTable);
function drawTable() {
var data3 = new google.visualization.DataTable();
data3.addColumn('string', 'Name');
data3.addColumn('string', 'Count');
data3.addRows([
['Total Folders', '""" + str(folder_count) + """'],
['Total Images', '""" + str(image_count) + """'],
['Duplicate Images', '""" + str(duplicate_ct) + """']]);
var table2 = new google.visualization.Table(document.getElementById('table_div_base'));
table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});
var data = new google.visualization.DataTable();
data.addColumn('string', 'Image Name');
data.addColumn('string', 'Image Count');
data.addColumn('string', 'Image Url');
data.addRows(""" + str(uri) + """);
var table = new google.visualization.Table(document.getElementById('table_div'));
table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});
}
</script>
</head>
<body><h2 style="text-align: center">Google Drive Summary Table</h2>
<div id="table_div_base" style="width: 100%; height: 200px; display:inline-block;border-style: solid"></div>
<h2 style="text-align: center" >List of Duplicate Image</h2>
<div id="table_div" style="width: 100%; height: 500px; display:inline-block;border-style: solid"></div>
</body></html>"""
fb.write(message)
fb.close()
print('Duplicate image data preparing.. ')
def draw_chart_create_report(folder_count, image_count, duplicate_ct, map,
folder_count_real):
fb = open('gDriveOverview.html', 'w')
values = list(map.values())
newlist = []
folder_name = list(map.keys())
total_image_count = []
duplicate_image_count_in_folder = []
for v in values:
newlist.append(duplicate_image_list(v))
total_image_count.append(len(v))
for n in newlist:
duplicate_image_count_in_folder.append(len(n))
m1 = """<html>
<head>
<h1 style ="color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;">Google Drive Data Overview</h1>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['bar','corechart','table']});
google.charts.setOnLoadCallback(drawChart);
function drawChart() {
var paiData = google.visualization.arrayToDataTable([
['Drive', 'Drive Data'],
['Total Images', """ + str(image_count) + """],
['Total duplicate Images', """ + str(duplicate_ct) + """],
['Total Folder', """ + str(folder_count_real) + """]
]);
var paiOptions = {
title: 'Google Drive Overview'
};
var chart = new google.visualization.PieChart(document.getElementById('piechart'));
chart.draw(paiData, paiOptions);
var barData = google.visualization.arrayToDataTable("""
fb.write(m1)
barchart_data = []
barchart_data.append(['Folders', 'Total no of Images',
'Total no of duplicate Images'])
for i in range(len(values)):
item_list = []
item_list.append(folder_count[i])
item_list.append(total_image_count[i])
item_list.append(duplicate_image_count_in_folder[i])
barchart_data.append(item_list)
fb.write(m1)
m3 = str(barchart_data) + """);
var barOptions = {
chart: { title: 'Google Drive Folderwise Overview',
subtitle: 'This report is created on '+new Date(),
}};
var chart = new google.charts.Bar(document.getElementById('bar_chart'));
chart.draw(barData, google.charts.Bar.convertOptions(barOptions));
}
</script>
</head>
<body>
<div style="width:100%; margin:0px auto;">
<div id="piechart" style="width: 900px; height: 500px; display:inline-block;"></div>
<div id="bar_chart" style="width: 900px; height: 500px; display:inline-block;"></div>
</div>
<div>
<h2>
<p style="float:right;color:red;">** <a href="duplicateData.html" target="_blank">Click here to know more about duplicate image data</a></p>
</h2></div></body></html>
"""
fb.write(m3)
fb.close()
print('Bar and Pie chart creating.... ')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def install(package):
if hasattr(pip, 'main'):
pip.main(['install', package])
else:
pip._internal.main(['install', package])
<|reserved_special_token_0|>
def get_gdrive_service():
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json'
, SCOPES)
creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('drive', 'v3', credentials=creds)
def downloadFile(id, name):
service = get_gdrive_service()
request = service.files().get_media(fileId=id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
with io.open('.' + '/' + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
<|reserved_special_token_0|>
def check_duplicate_image_new(items):
print('Images is loading to memory..')
map = {}
list = []
message = set()
duplicate_image = []
final_result = {}
if not items:
print('No files found.')
else:
for item in items:
if item['mimeType'] == 'image/jpeg':
list.append(item['name'])
value = []
value.append(item['name'])
value.append(item['webViewLink'])
if item['name'] in map:
val = set()
val.add(item['webViewLink'])
map[item['name']] = item['webViewLink']
else:
map[item['name']] = item['webViewLink']
downloadFile(item['id'], item['name'])
match = []
flag = False
for i in range(len(list) - 1):
temp = []
dp_count = 0
flag = False
if list[i] not in match:
flag = True
for j in range(i + 1, len(list)):
istrue = is_duplicate(list[i], list[j])
if istrue == True:
dp_count = dp_count + 1
temp.append(list[j])
if list[j] not in match:
match.append(list[j])
if list[i] not in match:
match.append(list[i])
if len(match) == 0:
match.append(list[i])
match.append(list[j])
if flag == True and dp_count != 0:
final_result[list[i]] = temp
m = {}
tdct = 0
for x, y in final_result.items():
res = y
tdct = tdct + len(res)
s = set()
for i in res:
for item in items:
if item['mimeType'] == 'image/jpeg':
if item['name'] == i:
s.add(item['webViewLink'])
m[x] = s
return m, tdct
def duplicate_image_list(imagelist):
dup_list = []
if len(imagelist) >= 1:
for i in range(len(imagelist) - 1):
count = 0
l = []
for j in range(i + 1, len(imagelist)):
image1 = cv2.imread(imagelist[i])
image2 = cv2.imread(imagelist[j])
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference)
if result is True:
l.append(imagelist[j])
count = count + 1
dup_list.append(imagelist[i])
except:
i = 0
return dup_list
<|reserved_special_token_0|>
def check_duplicate_image(items):
map = {}
image_name_list = []
duplicate_image = []
for item in items:
file_type = item['mimeType']
if file_type == 'image/jpeg':
image_name_list.append(item['name'])
value = []
value.append(item['name'])
value.append(item['webViewLink'])
map[item['id']] = value
csv_map[item['name']] = item['webViewLink']
downloadFile(item['id'], item['name'])
duplicate_image = duplicate_image_list(image_name_list)
return duplicate_image
def renameFile(service, items, newName):
count = 1
for item in items:
id = item['id']
name = item['name']
mime_type = item['mimeType']
file = service.files().get(fileId=id).execute()
del file['id']
if 'jpeg' in mime_type:
file['name'] = newName + str(count) + '.jpg'
if 'png' in mime_type:
file['name'] = newName + str(count) + '.png'
updated_file = service.files().update(fileId=id, body=file).execute()
count = count + 1
def count_image(id):
imageList = []
service = get_gdrive_service()
results = service.files().list(pageSize=1000, q="'{}' in parents".
format(id)).execute()
items = results.get('files', [])
for item in items:
mime_Type = item['mimeType']
if mime_Type == 'image/jpeg':
imageList.append(item['name'])
if mime_Type == 'application/vnd.google-apps.folder':
imageList.extend(count_image(item['id']))
return imageList
def list_files(items, service):
folder_count = 0
image_count = 0
imglist = []
count = 0
testtechm_id = ''
nm_name = []
img_count = []
list_all_folder_name = []
rows = []
overview_map = {}
img_nm = 0
for item in items:
name = item['name']
mime_type = item['mimeType']
if name == 'Test Techm':
testtechm_id = item['parents'][0]
for item in items:
id = item['id']
name = item['name']
mime_type = item['mimeType']
if mime_type == 'application/vnd.google-apps.folder':
folder_count = folder_count + 1
if mime_type == 'image/jpeg':
image_count = image_count + 1
if mime_type == 'application/vnd.google-apps.folder' and item['parents'
][0] == testtechm_id:
list_all_folder_name.append(item['name'])
name1 = count_image(id)
nm_name.append(name1)
img_count.append(len(name1))
overview_map[item['name']] = name1
rows.append((id, name, mime_type, folder_count))
imglist.append(count)
rows.append((id, name, mime_type, folder_count))
lt, duplicate_ct = check_duplicate_image_new(items)
duplicateImagehtml(folder_count, image_count, duplicate_ct, items)
draw_chart_create_report(list_all_folder_name, image_count,
duplicate_ct, overview_map, folder_count)
def createDeviceCSV():
fileName = 'DuplicateImage.csv'
with open(fileName, 'w') as csvFile:
writer = csv.writer(csvFile)
row = ['Image Name', 'Image Url']
writer.writerow(row)
count = 0
for k, v in csv_map.items():
row = [k, v]
writer.writerow(row)
count = count + 1
csvFile.close()
def duplicateImagehtml(folder_count, image_count, duplicate_ct, items):
uri = []
map1, count = check_duplicate_image_new(items)
for k, v in map1.items():
name_url = []
name_url.append(k)
name_url.append(str(len(v)))
name_url.append(str(v))
uri.append(name_url)
fb = open('duplicateData.html', 'w')
message = """ <html> <head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['table']});
google.charts.setOnLoadCallback(drawTable);
function drawTable() {
var data3 = new google.visualization.DataTable();
data3.addColumn('string', 'Name');
data3.addColumn('string', 'Count');
data3.addRows([
['Total Folders', '""" + str(folder_count) + """'],
['Total Images', '""" + str(image_count) + """'],
['Duplicate Images', '""" + str(duplicate_ct) + """']]);
var table2 = new google.visualization.Table(document.getElementById('table_div_base'));
table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});
var data = new google.visualization.DataTable();
data.addColumn('string', 'Image Name');
data.addColumn('string', 'Image Count');
data.addColumn('string', 'Image Url');
data.addRows(""" + str(uri) + """);
var table = new google.visualization.Table(document.getElementById('table_div'));
table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});
}
</script>
</head>
<body><h2 style="text-align: center">Google Drive Summary Table</h2>
<div id="table_div_base" style="width: 100%; height: 200px; display:inline-block;border-style: solid"></div>
<h2 style="text-align: center" >List of Duplicate Image</h2>
<div id="table_div" style="width: 100%; height: 500px; display:inline-block;border-style: solid"></div>
</body></html>"""
fb.write(message)
fb.close()
print('Duplicate image data preparing.. ')
def draw_chart_create_report(folder_count, image_count, duplicate_ct, map,
folder_count_real):
fb = open('gDriveOverview.html', 'w')
values = list(map.values())
newlist = []
folder_name = list(map.keys())
total_image_count = []
duplicate_image_count_in_folder = []
for v in values:
newlist.append(duplicate_image_list(v))
total_image_count.append(len(v))
for n in newlist:
duplicate_image_count_in_folder.append(len(n))
m1 = """<html>
<head>
<h1 style ="color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;">Google Drive Data Overview</h1>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['bar','corechart','table']});
google.charts.setOnLoadCallback(drawChart);
function drawChart() {
var paiData = google.visualization.arrayToDataTable([
['Drive', 'Drive Data'],
['Total Images', """ + str(image_count) + """],
['Total duplicate Images', """ + str(duplicate_ct) + """],
['Total Folder', """ + str(folder_count_real) + """]
]);
var paiOptions = {
title: 'Google Drive Overview'
};
var chart = new google.visualization.PieChart(document.getElementById('piechart'));
chart.draw(paiData, paiOptions);
var barData = google.visualization.arrayToDataTable("""
fb.write(m1)
barchart_data = []
barchart_data.append(['Folders', 'Total no of Images',
'Total no of duplicate Images'])
for i in range(len(values)):
item_list = []
item_list.append(folder_count[i])
item_list.append(total_image_count[i])
item_list.append(duplicate_image_count_in_folder[i])
barchart_data.append(item_list)
fb.write(m1)
m3 = str(barchart_data) + """);
var barOptions = {
chart: { title: 'Google Drive Folderwise Overview',
subtitle: 'This report is created on '+new Date(),
}};
var chart = new google.charts.Bar(document.getElementById('bar_chart'));
chart.draw(barData, google.charts.Bar.convertOptions(barOptions));
}
</script>
</head>
<body>
<div style="width:100%; margin:0px auto;">
<div id="piechart" style="width: 900px; height: 500px; display:inline-block;"></div>
<div id="bar_chart" style="width: 900px; height: 500px; display:inline-block;"></div>
</div>
<div>
<h2>
<p style="float:right;color:red;">** <a href="duplicateData.html" target="_blank">Click here to know more about duplicate image data</a></p>
</h2></div></body></html>
"""
fb.write(m3)
fb.close()
print('Bar and Pie chart creating.... ')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def install(package):
if hasattr(pip, 'main'):
pip.main(['install', package])
else:
pip._internal.main(['install', package])
<|reserved_special_token_0|>
def get_gdrive_service():
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json'
, SCOPES)
creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
return build('drive', 'v3', credentials=creds)
def downloadFile(id, name):
service = get_gdrive_service()
request = service.files().get_media(fileId=id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
with io.open('.' + '/' + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
def is_duplicate(img1, img2):
response = False
image1 = cv2.imread(img1)
image2 = cv2.imread(img2)
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference)
if result is True:
response = True
except:
i = 0
return response
def check_duplicate_image_new(items):
print('Images is loading to memory..')
map = {}
list = []
message = set()
duplicate_image = []
final_result = {}
if not items:
print('No files found.')
else:
for item in items:
if item['mimeType'] == 'image/jpeg':
list.append(item['name'])
value = []
value.append(item['name'])
value.append(item['webViewLink'])
if item['name'] in map:
val = set()
val.add(item['webViewLink'])
map[item['name']] = item['webViewLink']
else:
map[item['name']] = item['webViewLink']
downloadFile(item['id'], item['name'])
match = []
flag = False
for i in range(len(list) - 1):
temp = []
dp_count = 0
flag = False
if list[i] not in match:
flag = True
for j in range(i + 1, len(list)):
istrue = is_duplicate(list[i], list[j])
if istrue == True:
dp_count = dp_count + 1
temp.append(list[j])
if list[j] not in match:
match.append(list[j])
if list[i] not in match:
match.append(list[i])
if len(match) == 0:
match.append(list[i])
match.append(list[j])
if flag == True and dp_count != 0:
final_result[list[i]] = temp
m = {}
tdct = 0
for x, y in final_result.items():
res = y
tdct = tdct + len(res)
s = set()
for i in res:
for item in items:
if item['mimeType'] == 'image/jpeg':
if item['name'] == i:
s.add(item['webViewLink'])
m[x] = s
return m, tdct
def duplicate_image_list(imagelist):
dup_list = []
if len(imagelist) >= 1:
for i in range(len(imagelist) - 1):
count = 0
l = []
for j in range(i + 1, len(imagelist)):
image1 = cv2.imread(imagelist[i])
image2 = cv2.imread(imagelist[j])
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference)
if result is True:
l.append(imagelist[j])
count = count + 1
dup_list.append(imagelist[i])
except:
i = 0
return dup_list
<|reserved_special_token_0|>
def check_duplicate_image(items):
map = {}
image_name_list = []
duplicate_image = []
for item in items:
file_type = item['mimeType']
if file_type == 'image/jpeg':
image_name_list.append(item['name'])
value = []
value.append(item['name'])
value.append(item['webViewLink'])
map[item['id']] = value
csv_map[item['name']] = item['webViewLink']
downloadFile(item['id'], item['name'])
duplicate_image = duplicate_image_list(image_name_list)
return duplicate_image
def renameFile(service, items, newName):
count = 1
for item in items:
id = item['id']
name = item['name']
mime_type = item['mimeType']
file = service.files().get(fileId=id).execute()
del file['id']
if 'jpeg' in mime_type:
file['name'] = newName + str(count) + '.jpg'
if 'png' in mime_type:
file['name'] = newName + str(count) + '.png'
updated_file = service.files().update(fileId=id, body=file).execute()
count = count + 1
def count_image(id):
imageList = []
service = get_gdrive_service()
results = service.files().list(pageSize=1000, q="'{}' in parents".
format(id)).execute()
items = results.get('files', [])
for item in items:
mime_Type = item['mimeType']
if mime_Type == 'image/jpeg':
imageList.append(item['name'])
if mime_Type == 'application/vnd.google-apps.folder':
imageList.extend(count_image(item['id']))
return imageList
def list_files(items, service):
folder_count = 0
image_count = 0
imglist = []
count = 0
testtechm_id = ''
nm_name = []
img_count = []
list_all_folder_name = []
rows = []
overview_map = {}
img_nm = 0
for item in items:
name = item['name']
mime_type = item['mimeType']
if name == 'Test Techm':
testtechm_id = item['parents'][0]
for item in items:
id = item['id']
name = item['name']
mime_type = item['mimeType']
if mime_type == 'application/vnd.google-apps.folder':
folder_count = folder_count + 1
if mime_type == 'image/jpeg':
image_count = image_count + 1
if mime_type == 'application/vnd.google-apps.folder' and item['parents'
][0] == testtechm_id:
list_all_folder_name.append(item['name'])
name1 = count_image(id)
nm_name.append(name1)
img_count.append(len(name1))
overview_map[item['name']] = name1
rows.append((id, name, mime_type, folder_count))
imglist.append(count)
rows.append((id, name, mime_type, folder_count))
lt, duplicate_ct = check_duplicate_image_new(items)
duplicateImagehtml(folder_count, image_count, duplicate_ct, items)
draw_chart_create_report(list_all_folder_name, image_count,
duplicate_ct, overview_map, folder_count)
def createDeviceCSV():
fileName = 'DuplicateImage.csv'
with open(fileName, 'w') as csvFile:
writer = csv.writer(csvFile)
row = ['Image Name', 'Image Url']
writer.writerow(row)
count = 0
for k, v in csv_map.items():
row = [k, v]
writer.writerow(row)
count = count + 1
csvFile.close()
def duplicateImagehtml(folder_count, image_count, duplicate_ct, items):
uri = []
map1, count = check_duplicate_image_new(items)
for k, v in map1.items():
name_url = []
name_url.append(k)
name_url.append(str(len(v)))
name_url.append(str(v))
uri.append(name_url)
fb = open('duplicateData.html', 'w')
message = """ <html> <head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['table']});
google.charts.setOnLoadCallback(drawTable);
function drawTable() {
var data3 = new google.visualization.DataTable();
data3.addColumn('string', 'Name');
data3.addColumn('string', 'Count');
data3.addRows([
['Total Folders', '""" + str(folder_count) + """'],
['Total Images', '""" + str(image_count) + """'],
['Duplicate Images', '""" + str(duplicate_ct) + """']]);
var table2 = new google.visualization.Table(document.getElementById('table_div_base'));
table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});
var data = new google.visualization.DataTable();
data.addColumn('string', 'Image Name');
data.addColumn('string', 'Image Count');
data.addColumn('string', 'Image Url');
data.addRows(""" + str(uri) + """);
var table = new google.visualization.Table(document.getElementById('table_div'));
table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});
}
</script>
</head>
<body><h2 style="text-align: center">Google Drive Summary Table</h2>
<div id="table_div_base" style="width: 100%; height: 200px; display:inline-block;border-style: solid"></div>
<h2 style="text-align: center" >List of Duplicate Image</h2>
<div id="table_div" style="width: 100%; height: 500px; display:inline-block;border-style: solid"></div>
</body></html>"""
fb.write(message)
fb.close()
print('Duplicate image data preparing.. ')
def draw_chart_create_report(folder_count, image_count, duplicate_ct, map,
folder_count_real):
fb = open('gDriveOverview.html', 'w')
values = list(map.values())
newlist = []
folder_name = list(map.keys())
total_image_count = []
duplicate_image_count_in_folder = []
for v in values:
newlist.append(duplicate_image_list(v))
total_image_count.append(len(v))
for n in newlist:
duplicate_image_count_in_folder.append(len(n))
m1 = """<html>
<head>
<h1 style ="color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;">Google Drive Data Overview</h1>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['bar','corechart','table']});
google.charts.setOnLoadCallback(drawChart);
function drawChart() {
var paiData = google.visualization.arrayToDataTable([
['Drive', 'Drive Data'],
['Total Images', """ + str(image_count) + """],
['Total duplicate Images', """ + str(duplicate_ct) + """],
['Total Folder', """ + str(folder_count_real) + """]
]);
var paiOptions = {
title: 'Google Drive Overview'
};
var chart = new google.visualization.PieChart(document.getElementById('piechart'));
chart.draw(paiData, paiOptions);
var barData = google.visualization.arrayToDataTable("""
fb.write(m1)
barchart_data = []
barchart_data.append(['Folders', 'Total no of Images',
'Total no of duplicate Images'])
for i in range(len(values)):
item_list = []
item_list.append(folder_count[i])
item_list.append(total_image_count[i])
item_list.append(duplicate_image_count_in_folder[i])
barchart_data.append(item_list)
fb.write(m1)
m3 = str(barchart_data) + """);
var barOptions = {
chart: { title: 'Google Drive Folderwise Overview',
subtitle: 'This report is created on '+new Date(),
}};
var chart = new google.charts.Bar(document.getElementById('bar_chart'));
chart.draw(barData, google.charts.Bar.convertOptions(barOptions));
}
</script>
</head>
<body>
<div style="width:100%; margin:0px auto;">
<div id="piechart" style="width: 900px; height: 500px; display:inline-block;"></div>
<div id="bar_chart" style="width: 900px; height: 500px; display:inline-block;"></div>
</div>
<div>
<h2>
<p style="float:right;color:red;">** <a href="duplicateData.html" target="_blank">Click here to know more about duplicate image data</a></p>
</h2></div></body></html>
"""
fb.write(m3)
fb.close()
print('Bar and Pie chart creating.... ')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import csv
import io
import pickle
import os
import pip
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
import cv2
import numpy as np
SCOPES = ['https://www.googleapis.com/auth/drive.metadata',
'https://www.googleapis.com/auth/drive.file',
'https://www.googleapis.com/auth/drive']
def install(package):
if hasattr(pip, 'main'):
pip.main(['install', package])
else:
pip._internal.main(['install', package])
def create_folder(service):
file_metadata = {
'name': 'Test Techm',
'mimeType': 'application/vnd.google-apps.folder'
}
file = service.files().create(body=file_metadata,
fields='id').execute()
print('Folder ID: %s' % file.get('id'))
def get_gdrive_service():
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
# return Google Drive API service
return build('drive', 'v3', credentials=creds)
def downloadFile(id, name):
service = get_gdrive_service()
request = service.files().get_media(fileId=id)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
with io.open("." + "/" + name, 'wb') as f:
fh.seek(0)
f.write(fh.read())
def is_duplicate(img1,img2):
response=False
image1 = cv2.imread(img1)
image2 = cv2.imread(img2)
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) #if difference is all zeros it will return False
if result is True:
response=True
#duplicate_image.append(list[i])
#print("{} images is matching with {} Occurred {} times ".format(img1,img1,list.count(img1)))
except:
i=0
return response
def check_duplicate_image_new(items):
print("Images is loading to memory..")
#"""given items returned by Google Drive API, prints them in a tabular way"""
map= {}
list=[]
message= set()
duplicate_image=[]
final_result={}
if not items:
print('No files found.')
else:
for item in items:
if item["mimeType"] == "image/jpeg":
list.append(item["name"])
#Creating Map
value=[]
value.append(item["name"])
value.append(item["webViewLink"])
if item["name"] in map:
val=set()
val.add(item["webViewLink"])
map[item["name"]]=item["webViewLink"]
else:
map[item["name"]]=item["webViewLink"]
#Dowloading Image
downloadFile(item["id"],item["name"])
match=[]
flag=False
for i in range(len(list)-1):
temp=[]
dp_count=0
flag=False
if list[i] not in match :
flag=True
for j in range(i+1,len(list)):
istrue=is_duplicate(list[i],list[j])
if istrue==True:
dp_count=dp_count+1
temp.append(list[j])
if list[j] not in match:
match.append(list[j])
if list[i] not in match:
match.append(list[i])
if len(match)==0:
match.append(list[i])
match.append(list[j])
if flag==True and dp_count !=0:
#print(list[i]," - ",dp_count)
final_result[list[i]]=temp
m={}
tdct=0
for x, y in final_result.items():
res=y
tdct=tdct+len(res)
s=set()
for i in res:
#s=set()
for item in items:
if item["mimeType"] == "image/jpeg":
if item["name"]==i:
s.add(item["webViewLink"])
m[x]=s
return m,tdct
def duplicate_image_list(imagelist):
#print(len(imagelist))
dup_list = []
if len(imagelist) >= 1:
for i in range(len(imagelist) - 1):
count=0
l=[]
for j in range(i + 1, len(imagelist)):
image1 = cv2.imread(imagelist[i])
image2 = cv2.imread(imagelist[j])
try:
difference = cv2.subtract(image1, image2)
result = not np.any(difference) # if difference is all zeros it will return False
if result is True:
#print(imagelist[i],"Matching with ",imagelist[j])
l.append(imagelist[j])
count=count+1
dup_list.append(imagelist[i])
except:
i = 0
return dup_list
csv_map = {}
def check_duplicate_image(items):
# """given items returned by Google Drive API, prints them in a tabular way"""
map = {}
image_name_list = []
duplicate_image = []
for item in items:
file_type = item["mimeType"]
if file_type == "image/jpeg":
image_name_list.append(item["name"])
#append url or
# Creating Map
value = []
value.append(item["name"])
value.append(item["webViewLink"])
map[item["id"]] = value
csv_map[item["name"]] = item["webViewLink"]
# Dowloading Image
downloadFile(item["id"], item["name"])
duplicate_image = duplicate_image_list(image_name_list)
return duplicate_image
def renameFile(service,items, newName):
count=1
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
file = service.files().get(fileId=id).execute()
del file['id']
if "jpeg" in mime_type:
file['name'] = newName+str(count)+ ".jpg";
if "png" in mime_type:
file['name'] = newName+str(count)+ ".png";
updated_file = service.files().update(fileId=id, body=file).execute()
count=count+1
def count_image(id):
imageList = []
service = get_gdrive_service()
results = service.files().list(pageSize=1000, q="'{}' in parents".format(id)).execute()
items = results.get('files', [])
for item in items:
mime_Type = item["mimeType"]
if mime_Type == "image/jpeg":
imageList.append(item["name"])
if mime_Type == "application/vnd.google-apps.folder":
imageList.extend(count_image(item["id"]))
return imageList
def list_files(items, service):
folder_count = 0
image_count = 0
imglist = []
count = 0
testtechm_id = ''
nm_name = []
img_count = []
list_all_folder_name=[]
rows = []
overview_map = {}
img_nm=0
for item in items:
name = item["name"]
mime_type = item["mimeType"]
if name == 'Test Techm':
testtechm_id = item['parents'][0]
for item in items:
id = item["id"]
name = item["name"]
mime_type = item["mimeType"]
if mime_type == "application/vnd.google-apps.folder":
folder_count = folder_count + 1
if mime_type == "image/jpeg":
# renameFile(item["id"],"rajj_img"+str(image_count))
image_count = image_count + 1
if mime_type == "application/vnd.google-apps.folder" and item["parents"][0] == testtechm_id:
list_all_folder_name.append(item["name"])
name1 = count_image(id)
nm_name.append(name1)
img_count.append(len(name1))
overview_map[item["name"]] = name1
rows.append((id, name, mime_type, folder_count))
imglist.append(count)
rows.append((id, name, mime_type, folder_count))
#duplicate_count = len(check_duplicate_image(items))
lt,duplicate_ct=check_duplicate_image_new(items)
duplicateImagehtml(folder_count, image_count, duplicate_ct,items)
# overview chart report page
draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count)
def createDeviceCSV():
fileName = 'DuplicateImage.csv'
with open(fileName, 'w') as csvFile:
writer = csv.writer(csvFile)
row = ["Image Name", 'Image Url']
writer.writerow(row)
count = 0
for k, v in csv_map.items():
row = [k, v]
writer.writerow(row)
count = count + 1
#print("Device's adding into csv: " + str(count))
csvFile.close()
#print('Device CSV File creation is Done file name is ', fileName)
def duplicateImagehtml(folder_count, image_count, duplicate_ct,items):
uri = []
map1,count=check_duplicate_image_new(items)
for k, v in map1.items():
name_url = []
name_url.append(k)
name_url.append(str(len(v)))
name_url.append(str(v))
uri.append(name_url)
fb = open('duplicateData.html', 'w')
message = """ <html> <head>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['table']});
google.charts.setOnLoadCallback(drawTable);
function drawTable() {
var data3 = new google.visualization.DataTable();
data3.addColumn('string', 'Name');
data3.addColumn('string', 'Count');
data3.addRows([
['Total Folders', '""" + str(folder_count) + """'],
['Total Images', '""" + str(image_count) + """'],
['Duplicate Images', '""" + str(duplicate_ct) + """']]);
var table2 = new google.visualization.Table(document.getElementById('table_div_base'));
table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});
var data = new google.visualization.DataTable();
data.addColumn('string', 'Image Name');
data.addColumn('string', 'Image Count');
data.addColumn('string', 'Image Url');
data.addRows(""" + str(uri) + """);
var table = new google.visualization.Table(document.getElementById('table_div'));
table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});
}
</script>
</head>
<body><h2 style="text-align: center">Google Drive Summary Table</h2>
<div id="table_div_base" style="width: 100%; height: 200px; display:inline-block;border-style: solid"></div>
<h2 style="text-align: center" >List of Duplicate Image</h2>
<div id="table_div" style="width: 100%; height: 500px; display:inline-block;border-style: solid"></div>
</body></html>"""
fb.write(message)
fb.close()
print("Duplicate image data preparing.. ")
# webbrowser.open_new_tab('helloworld.html')
def draw_chart_create_report(folder_count, image_count, duplicate_ct, map,folder_count_real):
#folder_count=len(folder_count)
fb = open('gDriveOverview.html', 'w')
values = list(map.values())
newlist = []
folder_name = list(map.keys())
total_image_count = []
duplicate_image_count_in_folder = []
for v in values:
newlist.append(duplicate_image_list(v))
total_image_count.append(len(v))
for n in newlist:
duplicate_image_count_in_folder.append(len(n))
# create plot
#print(total_image_count, duplicate_image_count_in_folder, map.keys())
m1 = """<html>
<head>
<h1 style ="color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;">Google Drive Data Overview</h1>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
google.charts.load('current', {'packages':['bar','corechart','table']});
google.charts.setOnLoadCallback(drawChart);
function drawChart() {
var paiData = google.visualization.arrayToDataTable([
['Drive', 'Drive Data'],
['Total Images', """ + str(image_count) + """],
['Total duplicate Images', """ + str(duplicate_ct) + """],
['Total Folder', """ + str(folder_count_real) + """]
]);
var paiOptions = {
title: 'Google Drive Overview'
};
var chart = new google.visualization.PieChart(document.getElementById('piechart'));
chart.draw(paiData, paiOptions);
var barData = google.visualization.arrayToDataTable("""
fb.write(m1)
barchart_data = []
barchart_data.append(['Folders', 'Total no of Images', 'Total no of duplicate Images'])
for i in range(len(values)):
item_list = []
item_list.append(folder_count[i])
item_list.append(total_image_count[i])
item_list.append(duplicate_image_count_in_folder[i])
barchart_data.append(item_list)
fb.write(m1)
m3 = str(barchart_data) + """);
var barOptions = {
chart: { title: 'Google Drive Folderwise Overview',
subtitle: 'This report is created on '+new Date(),
}};
var chart = new google.charts.Bar(document.getElementById('bar_chart'));
chart.draw(barData, google.charts.Bar.convertOptions(barOptions));
}
</script>
</head>
<body>
<div style="width:100%; margin:0px auto;">
<div id="piechart" style="width: 900px; height: 500px; display:inline-block;"></div>
<div id="bar_chart" style="width: 900px; height: 500px; display:inline-block;"></div>
</div>
<div>
<h2>
<p style="float:right;color:red;">** <a href="duplicateData.html" target="_blank">Click here to know more about duplicate image data</a></p>
</h2></div></body></html>
"""
fb.write(m3)
fb.close()
print("Bar and Pie chart creating.... ")
def main():
service = get_gdrive_service()
print("Wait a moment script is running ..!!!")
results = service.files().list(pageSize=1000,
fields="nextPageToken,files(id, name,mimeType,parents,webViewLink)").execute()
items = results.get('files', [])
if not items:
# empty drive
print('No files found.')
else:
# create_folder(service)
print("-----_")
name="g_image_"
renameFile(service,items,name)
print("==============================")
#check_duplicate_image(items)
# createDeviceCSV()
list_files(items, service)
if __name__ == '__main__':
main()
print("Script is done ..!!!")
|
flexible
|
{
"blob_id": "f32b9dc36b2452fea8c8f284fbf800f22608c3ae",
"index": 8541,
"step-1": "<mask token>\n\n\ndef install(package):\n if hasattr(pip, 'main'):\n pip.main(['install', package])\n else:\n pip._internal.main(['install', package])\n\n\n<mask token>\n\n\ndef get_gdrive_service():\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=0)\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return build('drive', 'v3', credentials=creds)\n\n\ndef downloadFile(id, name):\n service = get_gdrive_service()\n request = service.files().get_media(fileId=id)\n fh = io.BytesIO()\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n with io.open('.' + '/' + name, 'wb') as f:\n fh.seek(0)\n f.write(fh.read())\n\n\n<mask token>\n\n\ndef check_duplicate_image(items):\n map = {}\n image_name_list = []\n duplicate_image = []\n for item in items:\n file_type = item['mimeType']\n if file_type == 'image/jpeg':\n image_name_list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n map[item['id']] = value\n csv_map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n duplicate_image = duplicate_image_list(image_name_list)\n return duplicate_image\n\n\ndef renameFile(service, items, newName):\n count = 1\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n file = service.files().get(fileId=id).execute()\n del file['id']\n if 'jpeg' in mime_type:\n file['name'] = newName + str(count) + '.jpg'\n if 'png' in mime_type:\n file['name'] = newName + str(count) + '.png'\n updated_file = service.files().update(fileId=id, body=file).execute()\n count = count + 1\n\n\n<mask token>\n\n\ndef list_files(items, service):\n folder_count = 0\n image_count = 0\n imglist = []\n count = 0\n testtechm_id = ''\n nm_name = []\n img_count = []\n list_all_folder_name = []\n rows = []\n overview_map = {}\n img_nm = 0\n for item in items:\n name = item['name']\n mime_type = item['mimeType']\n if name == 'Test Techm':\n testtechm_id = item['parents'][0]\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n if mime_type == 'application/vnd.google-apps.folder':\n folder_count = folder_count + 1\n if mime_type == 'image/jpeg':\n image_count = image_count + 1\n if mime_type == 'application/vnd.google-apps.folder' and item['parents'\n ][0] == testtechm_id:\n list_all_folder_name.append(item['name'])\n name1 = count_image(id)\n nm_name.append(name1)\n img_count.append(len(name1))\n overview_map[item['name']] = name1\n rows.append((id, name, mime_type, folder_count))\n imglist.append(count)\n rows.append((id, name, mime_type, folder_count))\n lt, duplicate_ct = check_duplicate_image_new(items)\n duplicateImagehtml(folder_count, image_count, duplicate_ct, items)\n draw_chart_create_report(list_all_folder_name, image_count,\n duplicate_ct, overview_map, folder_count)\n\n\ndef createDeviceCSV():\n fileName = 'DuplicateImage.csv'\n with open(fileName, 'w') as csvFile:\n writer = csv.writer(csvFile)\n row = ['Image Name', 'Image Url']\n writer.writerow(row)\n count = 0\n for k, v in csv_map.items():\n row = [k, v]\n writer.writerow(row)\n count = count + 1\n csvFile.close()\n\n\ndef duplicateImagehtml(folder_count, image_count, duplicate_ct, items):\n uri = []\n map1, count = check_duplicate_image_new(items)\n for k, v in map1.items():\n name_url = []\n name_url.append(k)\n name_url.append(str(len(v)))\n name_url.append(str(v))\n uri.append(name_url)\n fb = open('duplicateData.html', 'w')\n message = \"\"\" <html> <head>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['table']});\n google.charts.setOnLoadCallback(drawTable);\n function drawTable() {\n var data3 = new google.visualization.DataTable();\n data3.addColumn('string', 'Name');\n data3.addColumn('string', 'Count');\n data3.addRows([\n ['Total Folders', '\"\"\" + str(folder_count) + \"\"\"'],\n ['Total Images', '\"\"\" + str(image_count) + \"\"\"'],\n ['Duplicate Images', '\"\"\" + str(duplicate_ct) + \"\"\"']]);\n\n var table2 = new google.visualization.Table(document.getElementById('table_div_base'));\n\n table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});\n var data = new google.visualization.DataTable();\n data.addColumn('string', 'Image Name');\n data.addColumn('string', 'Image Count');\n data.addColumn('string', 'Image Url');\n data.addRows(\"\"\" + str(uri) + \"\"\");\n var table = new google.visualization.Table(document.getElementById('table_div'));\n table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});\n }\n </script>\n </head>\n <body><h2 style=\"text-align: center\">Google Drive Summary Table</h2>\n <div id=\"table_div_base\" style=\"width: 100%; height: 200px; display:inline-block;border-style: solid\"></div>\n <h2 style=\"text-align: center\" >List of Duplicate Image</h2>\n <div id=\"table_div\" style=\"width: 100%; height: 500px; display:inline-block;border-style: solid\"></div>\n </body></html>\"\"\"\n fb.write(message)\n fb.close()\n print('Duplicate image data preparing.. ')\n\n\ndef draw_chart_create_report(folder_count, image_count, duplicate_ct, map,\n folder_count_real):\n fb = open('gDriveOverview.html', 'w')\n values = list(map.values())\n newlist = []\n folder_name = list(map.keys())\n total_image_count = []\n duplicate_image_count_in_folder = []\n for v in values:\n newlist.append(duplicate_image_list(v))\n total_image_count.append(len(v))\n for n in newlist:\n duplicate_image_count_in_folder.append(len(n))\n m1 = \"\"\"<html>\n <head>\n <h1 style =\"color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;\">Google Drive Data Overview</h1>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['bar','corechart','table']});\n google.charts.setOnLoadCallback(drawChart);\n function drawChart() {\n var paiData = google.visualization.arrayToDataTable([\n ['Drive', 'Drive Data'],\n ['Total Images', \"\"\" + str(image_count) + \"\"\"],\n ['Total duplicate Images', \"\"\" + str(duplicate_ct) + \"\"\"],\n ['Total Folder', \"\"\" + str(folder_count_real) + \"\"\"]\n ]);\n var paiOptions = {\n title: 'Google Drive Overview'\n };\n var chart = new google.visualization.PieChart(document.getElementById('piechart'));\n chart.draw(paiData, paiOptions);\n var barData = google.visualization.arrayToDataTable(\"\"\"\n fb.write(m1)\n barchart_data = []\n barchart_data.append(['Folders', 'Total no of Images',\n 'Total no of duplicate Images'])\n for i in range(len(values)):\n item_list = []\n item_list.append(folder_count[i])\n item_list.append(total_image_count[i])\n item_list.append(duplicate_image_count_in_folder[i])\n barchart_data.append(item_list)\n fb.write(m1)\n m3 = str(barchart_data) + \"\"\");\n \n var barOptions = {\n chart: { title: 'Google Drive Folderwise Overview',\n subtitle: 'This report is created on '+new Date(),\n }};\n \n var chart = new google.charts.Bar(document.getElementById('bar_chart'));\n \n chart.draw(barData, google.charts.Bar.convertOptions(barOptions));\n }\n </script>\n </head>\n <body>\n <div style=\"width:100%; margin:0px auto;\">\n <div id=\"piechart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n <div id=\"bar_chart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n </div>\n <div>\n <h2>\n <p style=\"float:right;color:red;\">** <a href=\"duplicateData.html\" target=\"_blank\">Click here to know more about duplicate image data</a></p>\n </h2></div></body></html>\n \"\"\"\n fb.write(m3)\n fb.close()\n print('Bar and Pie chart creating.... ')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef install(package):\n if hasattr(pip, 'main'):\n pip.main(['install', package])\n else:\n pip._internal.main(['install', package])\n\n\n<mask token>\n\n\ndef get_gdrive_service():\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=0)\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return build('drive', 'v3', credentials=creds)\n\n\ndef downloadFile(id, name):\n service = get_gdrive_service()\n request = service.files().get_media(fileId=id)\n fh = io.BytesIO()\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n with io.open('.' + '/' + name, 'wb') as f:\n fh.seek(0)\n f.write(fh.read())\n\n\n<mask token>\n\n\ndef check_duplicate_image_new(items):\n print('Images is loading to memory..')\n map = {}\n list = []\n message = set()\n duplicate_image = []\n final_result = {}\n if not items:\n print('No files found.')\n else:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n if item['name'] in map:\n val = set()\n val.add(item['webViewLink'])\n map[item['name']] = item['webViewLink']\n else:\n map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n match = []\n flag = False\n for i in range(len(list) - 1):\n temp = []\n dp_count = 0\n flag = False\n if list[i] not in match:\n flag = True\n for j in range(i + 1, len(list)):\n istrue = is_duplicate(list[i], list[j])\n if istrue == True:\n dp_count = dp_count + 1\n temp.append(list[j])\n if list[j] not in match:\n match.append(list[j])\n if list[i] not in match:\n match.append(list[i])\n if len(match) == 0:\n match.append(list[i])\n match.append(list[j])\n if flag == True and dp_count != 0:\n final_result[list[i]] = temp\n m = {}\n tdct = 0\n for x, y in final_result.items():\n res = y\n tdct = tdct + len(res)\n s = set()\n for i in res:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n if item['name'] == i:\n s.add(item['webViewLink'])\n m[x] = s\n return m, tdct\n\n\ndef duplicate_image_list(imagelist):\n dup_list = []\n if len(imagelist) >= 1:\n for i in range(len(imagelist) - 1):\n count = 0\n l = []\n for j in range(i + 1, len(imagelist)):\n image1 = cv2.imread(imagelist[i])\n image2 = cv2.imread(imagelist[j])\n try:\n difference = cv2.subtract(image1, image2)\n result = not np.any(difference)\n if result is True:\n l.append(imagelist[j])\n count = count + 1\n dup_list.append(imagelist[i])\n except:\n i = 0\n return dup_list\n\n\n<mask token>\n\n\ndef check_duplicate_image(items):\n map = {}\n image_name_list = []\n duplicate_image = []\n for item in items:\n file_type = item['mimeType']\n if file_type == 'image/jpeg':\n image_name_list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n map[item['id']] = value\n csv_map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n duplicate_image = duplicate_image_list(image_name_list)\n return duplicate_image\n\n\ndef renameFile(service, items, newName):\n count = 1\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n file = service.files().get(fileId=id).execute()\n del file['id']\n if 'jpeg' in mime_type:\n file['name'] = newName + str(count) + '.jpg'\n if 'png' in mime_type:\n file['name'] = newName + str(count) + '.png'\n updated_file = service.files().update(fileId=id, body=file).execute()\n count = count + 1\n\n\n<mask token>\n\n\ndef list_files(items, service):\n folder_count = 0\n image_count = 0\n imglist = []\n count = 0\n testtechm_id = ''\n nm_name = []\n img_count = []\n list_all_folder_name = []\n rows = []\n overview_map = {}\n img_nm = 0\n for item in items:\n name = item['name']\n mime_type = item['mimeType']\n if name == 'Test Techm':\n testtechm_id = item['parents'][0]\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n if mime_type == 'application/vnd.google-apps.folder':\n folder_count = folder_count + 1\n if mime_type == 'image/jpeg':\n image_count = image_count + 1\n if mime_type == 'application/vnd.google-apps.folder' and item['parents'\n ][0] == testtechm_id:\n list_all_folder_name.append(item['name'])\n name1 = count_image(id)\n nm_name.append(name1)\n img_count.append(len(name1))\n overview_map[item['name']] = name1\n rows.append((id, name, mime_type, folder_count))\n imglist.append(count)\n rows.append((id, name, mime_type, folder_count))\n lt, duplicate_ct = check_duplicate_image_new(items)\n duplicateImagehtml(folder_count, image_count, duplicate_ct, items)\n draw_chart_create_report(list_all_folder_name, image_count,\n duplicate_ct, overview_map, folder_count)\n\n\ndef createDeviceCSV():\n fileName = 'DuplicateImage.csv'\n with open(fileName, 'w') as csvFile:\n writer = csv.writer(csvFile)\n row = ['Image Name', 'Image Url']\n writer.writerow(row)\n count = 0\n for k, v in csv_map.items():\n row = [k, v]\n writer.writerow(row)\n count = count + 1\n csvFile.close()\n\n\ndef duplicateImagehtml(folder_count, image_count, duplicate_ct, items):\n uri = []\n map1, count = check_duplicate_image_new(items)\n for k, v in map1.items():\n name_url = []\n name_url.append(k)\n name_url.append(str(len(v)))\n name_url.append(str(v))\n uri.append(name_url)\n fb = open('duplicateData.html', 'w')\n message = \"\"\" <html> <head>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['table']});\n google.charts.setOnLoadCallback(drawTable);\n function drawTable() {\n var data3 = new google.visualization.DataTable();\n data3.addColumn('string', 'Name');\n data3.addColumn('string', 'Count');\n data3.addRows([\n ['Total Folders', '\"\"\" + str(folder_count) + \"\"\"'],\n ['Total Images', '\"\"\" + str(image_count) + \"\"\"'],\n ['Duplicate Images', '\"\"\" + str(duplicate_ct) + \"\"\"']]);\n\n var table2 = new google.visualization.Table(document.getElementById('table_div_base'));\n\n table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});\n var data = new google.visualization.DataTable();\n data.addColumn('string', 'Image Name');\n data.addColumn('string', 'Image Count');\n data.addColumn('string', 'Image Url');\n data.addRows(\"\"\" + str(uri) + \"\"\");\n var table = new google.visualization.Table(document.getElementById('table_div'));\n table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});\n }\n </script>\n </head>\n <body><h2 style=\"text-align: center\">Google Drive Summary Table</h2>\n <div id=\"table_div_base\" style=\"width: 100%; height: 200px; display:inline-block;border-style: solid\"></div>\n <h2 style=\"text-align: center\" >List of Duplicate Image</h2>\n <div id=\"table_div\" style=\"width: 100%; height: 500px; display:inline-block;border-style: solid\"></div>\n </body></html>\"\"\"\n fb.write(message)\n fb.close()\n print('Duplicate image data preparing.. ')\n\n\ndef draw_chart_create_report(folder_count, image_count, duplicate_ct, map,\n folder_count_real):\n fb = open('gDriveOverview.html', 'w')\n values = list(map.values())\n newlist = []\n folder_name = list(map.keys())\n total_image_count = []\n duplicate_image_count_in_folder = []\n for v in values:\n newlist.append(duplicate_image_list(v))\n total_image_count.append(len(v))\n for n in newlist:\n duplicate_image_count_in_folder.append(len(n))\n m1 = \"\"\"<html>\n <head>\n <h1 style =\"color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;\">Google Drive Data Overview</h1>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['bar','corechart','table']});\n google.charts.setOnLoadCallback(drawChart);\n function drawChart() {\n var paiData = google.visualization.arrayToDataTable([\n ['Drive', 'Drive Data'],\n ['Total Images', \"\"\" + str(image_count) + \"\"\"],\n ['Total duplicate Images', \"\"\" + str(duplicate_ct) + \"\"\"],\n ['Total Folder', \"\"\" + str(folder_count_real) + \"\"\"]\n ]);\n var paiOptions = {\n title: 'Google Drive Overview'\n };\n var chart = new google.visualization.PieChart(document.getElementById('piechart'));\n chart.draw(paiData, paiOptions);\n var barData = google.visualization.arrayToDataTable(\"\"\"\n fb.write(m1)\n barchart_data = []\n barchart_data.append(['Folders', 'Total no of Images',\n 'Total no of duplicate Images'])\n for i in range(len(values)):\n item_list = []\n item_list.append(folder_count[i])\n item_list.append(total_image_count[i])\n item_list.append(duplicate_image_count_in_folder[i])\n barchart_data.append(item_list)\n fb.write(m1)\n m3 = str(barchart_data) + \"\"\");\n \n var barOptions = {\n chart: { title: 'Google Drive Folderwise Overview',\n subtitle: 'This report is created on '+new Date(),\n }};\n \n var chart = new google.charts.Bar(document.getElementById('bar_chart'));\n \n chart.draw(barData, google.charts.Bar.convertOptions(barOptions));\n }\n </script>\n </head>\n <body>\n <div style=\"width:100%; margin:0px auto;\">\n <div id=\"piechart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n <div id=\"bar_chart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n </div>\n <div>\n <h2>\n <p style=\"float:right;color:red;\">** <a href=\"duplicateData.html\" target=\"_blank\">Click here to know more about duplicate image data</a></p>\n </h2></div></body></html>\n \"\"\"\n fb.write(m3)\n fb.close()\n print('Bar and Pie chart creating.... ')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef install(package):\n if hasattr(pip, 'main'):\n pip.main(['install', package])\n else:\n pip._internal.main(['install', package])\n\n\n<mask token>\n\n\ndef get_gdrive_service():\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=0)\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return build('drive', 'v3', credentials=creds)\n\n\ndef downloadFile(id, name):\n service = get_gdrive_service()\n request = service.files().get_media(fileId=id)\n fh = io.BytesIO()\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n with io.open('.' + '/' + name, 'wb') as f:\n fh.seek(0)\n f.write(fh.read())\n\n\n<mask token>\n\n\ndef check_duplicate_image_new(items):\n print('Images is loading to memory..')\n map = {}\n list = []\n message = set()\n duplicate_image = []\n final_result = {}\n if not items:\n print('No files found.')\n else:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n if item['name'] in map:\n val = set()\n val.add(item['webViewLink'])\n map[item['name']] = item['webViewLink']\n else:\n map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n match = []\n flag = False\n for i in range(len(list) - 1):\n temp = []\n dp_count = 0\n flag = False\n if list[i] not in match:\n flag = True\n for j in range(i + 1, len(list)):\n istrue = is_duplicate(list[i], list[j])\n if istrue == True:\n dp_count = dp_count + 1\n temp.append(list[j])\n if list[j] not in match:\n match.append(list[j])\n if list[i] not in match:\n match.append(list[i])\n if len(match) == 0:\n match.append(list[i])\n match.append(list[j])\n if flag == True and dp_count != 0:\n final_result[list[i]] = temp\n m = {}\n tdct = 0\n for x, y in final_result.items():\n res = y\n tdct = tdct + len(res)\n s = set()\n for i in res:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n if item['name'] == i:\n s.add(item['webViewLink'])\n m[x] = s\n return m, tdct\n\n\ndef duplicate_image_list(imagelist):\n dup_list = []\n if len(imagelist) >= 1:\n for i in range(len(imagelist) - 1):\n count = 0\n l = []\n for j in range(i + 1, len(imagelist)):\n image1 = cv2.imread(imagelist[i])\n image2 = cv2.imread(imagelist[j])\n try:\n difference = cv2.subtract(image1, image2)\n result = not np.any(difference)\n if result is True:\n l.append(imagelist[j])\n count = count + 1\n dup_list.append(imagelist[i])\n except:\n i = 0\n return dup_list\n\n\n<mask token>\n\n\ndef check_duplicate_image(items):\n map = {}\n image_name_list = []\n duplicate_image = []\n for item in items:\n file_type = item['mimeType']\n if file_type == 'image/jpeg':\n image_name_list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n map[item['id']] = value\n csv_map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n duplicate_image = duplicate_image_list(image_name_list)\n return duplicate_image\n\n\ndef renameFile(service, items, newName):\n count = 1\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n file = service.files().get(fileId=id).execute()\n del file['id']\n if 'jpeg' in mime_type:\n file['name'] = newName + str(count) + '.jpg'\n if 'png' in mime_type:\n file['name'] = newName + str(count) + '.png'\n updated_file = service.files().update(fileId=id, body=file).execute()\n count = count + 1\n\n\ndef count_image(id):\n imageList = []\n service = get_gdrive_service()\n results = service.files().list(pageSize=1000, q=\"'{}' in parents\".\n format(id)).execute()\n items = results.get('files', [])\n for item in items:\n mime_Type = item['mimeType']\n if mime_Type == 'image/jpeg':\n imageList.append(item['name'])\n if mime_Type == 'application/vnd.google-apps.folder':\n imageList.extend(count_image(item['id']))\n return imageList\n\n\ndef list_files(items, service):\n folder_count = 0\n image_count = 0\n imglist = []\n count = 0\n testtechm_id = ''\n nm_name = []\n img_count = []\n list_all_folder_name = []\n rows = []\n overview_map = {}\n img_nm = 0\n for item in items:\n name = item['name']\n mime_type = item['mimeType']\n if name == 'Test Techm':\n testtechm_id = item['parents'][0]\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n if mime_type == 'application/vnd.google-apps.folder':\n folder_count = folder_count + 1\n if mime_type == 'image/jpeg':\n image_count = image_count + 1\n if mime_type == 'application/vnd.google-apps.folder' and item['parents'\n ][0] == testtechm_id:\n list_all_folder_name.append(item['name'])\n name1 = count_image(id)\n nm_name.append(name1)\n img_count.append(len(name1))\n overview_map[item['name']] = name1\n rows.append((id, name, mime_type, folder_count))\n imglist.append(count)\n rows.append((id, name, mime_type, folder_count))\n lt, duplicate_ct = check_duplicate_image_new(items)\n duplicateImagehtml(folder_count, image_count, duplicate_ct, items)\n draw_chart_create_report(list_all_folder_name, image_count,\n duplicate_ct, overview_map, folder_count)\n\n\ndef createDeviceCSV():\n fileName = 'DuplicateImage.csv'\n with open(fileName, 'w') as csvFile:\n writer = csv.writer(csvFile)\n row = ['Image Name', 'Image Url']\n writer.writerow(row)\n count = 0\n for k, v in csv_map.items():\n row = [k, v]\n writer.writerow(row)\n count = count + 1\n csvFile.close()\n\n\ndef duplicateImagehtml(folder_count, image_count, duplicate_ct, items):\n uri = []\n map1, count = check_duplicate_image_new(items)\n for k, v in map1.items():\n name_url = []\n name_url.append(k)\n name_url.append(str(len(v)))\n name_url.append(str(v))\n uri.append(name_url)\n fb = open('duplicateData.html', 'w')\n message = \"\"\" <html> <head>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['table']});\n google.charts.setOnLoadCallback(drawTable);\n function drawTable() {\n var data3 = new google.visualization.DataTable();\n data3.addColumn('string', 'Name');\n data3.addColumn('string', 'Count');\n data3.addRows([\n ['Total Folders', '\"\"\" + str(folder_count) + \"\"\"'],\n ['Total Images', '\"\"\" + str(image_count) + \"\"\"'],\n ['Duplicate Images', '\"\"\" + str(duplicate_ct) + \"\"\"']]);\n\n var table2 = new google.visualization.Table(document.getElementById('table_div_base'));\n\n table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});\n var data = new google.visualization.DataTable();\n data.addColumn('string', 'Image Name');\n data.addColumn('string', 'Image Count');\n data.addColumn('string', 'Image Url');\n data.addRows(\"\"\" + str(uri) + \"\"\");\n var table = new google.visualization.Table(document.getElementById('table_div'));\n table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});\n }\n </script>\n </head>\n <body><h2 style=\"text-align: center\">Google Drive Summary Table</h2>\n <div id=\"table_div_base\" style=\"width: 100%; height: 200px; display:inline-block;border-style: solid\"></div>\n <h2 style=\"text-align: center\" >List of Duplicate Image</h2>\n <div id=\"table_div\" style=\"width: 100%; height: 500px; display:inline-block;border-style: solid\"></div>\n </body></html>\"\"\"\n fb.write(message)\n fb.close()\n print('Duplicate image data preparing.. ')\n\n\ndef draw_chart_create_report(folder_count, image_count, duplicate_ct, map,\n folder_count_real):\n fb = open('gDriveOverview.html', 'w')\n values = list(map.values())\n newlist = []\n folder_name = list(map.keys())\n total_image_count = []\n duplicate_image_count_in_folder = []\n for v in values:\n newlist.append(duplicate_image_list(v))\n total_image_count.append(len(v))\n for n in newlist:\n duplicate_image_count_in_folder.append(len(n))\n m1 = \"\"\"<html>\n <head>\n <h1 style =\"color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;\">Google Drive Data Overview</h1>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['bar','corechart','table']});\n google.charts.setOnLoadCallback(drawChart);\n function drawChart() {\n var paiData = google.visualization.arrayToDataTable([\n ['Drive', 'Drive Data'],\n ['Total Images', \"\"\" + str(image_count) + \"\"\"],\n ['Total duplicate Images', \"\"\" + str(duplicate_ct) + \"\"\"],\n ['Total Folder', \"\"\" + str(folder_count_real) + \"\"\"]\n ]);\n var paiOptions = {\n title: 'Google Drive Overview'\n };\n var chart = new google.visualization.PieChart(document.getElementById('piechart'));\n chart.draw(paiData, paiOptions);\n var barData = google.visualization.arrayToDataTable(\"\"\"\n fb.write(m1)\n barchart_data = []\n barchart_data.append(['Folders', 'Total no of Images',\n 'Total no of duplicate Images'])\n for i in range(len(values)):\n item_list = []\n item_list.append(folder_count[i])\n item_list.append(total_image_count[i])\n item_list.append(duplicate_image_count_in_folder[i])\n barchart_data.append(item_list)\n fb.write(m1)\n m3 = str(barchart_data) + \"\"\");\n \n var barOptions = {\n chart: { title: 'Google Drive Folderwise Overview',\n subtitle: 'This report is created on '+new Date(),\n }};\n \n var chart = new google.charts.Bar(document.getElementById('bar_chart'));\n \n chart.draw(barData, google.charts.Bar.convertOptions(barOptions));\n }\n </script>\n </head>\n <body>\n <div style=\"width:100%; margin:0px auto;\">\n <div id=\"piechart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n <div id=\"bar_chart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n </div>\n <div>\n <h2>\n <p style=\"float:right;color:red;\">** <a href=\"duplicateData.html\" target=\"_blank\">Click here to know more about duplicate image data</a></p>\n </h2></div></body></html>\n \"\"\"\n fb.write(m3)\n fb.close()\n print('Bar and Pie chart creating.... ')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef install(package):\n if hasattr(pip, 'main'):\n pip.main(['install', package])\n else:\n pip._internal.main(['install', package])\n\n\n<mask token>\n\n\ndef get_gdrive_service():\n creds = None\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file('credentials.json'\n , SCOPES)\n creds = flow.run_local_server(port=0)\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n return build('drive', 'v3', credentials=creds)\n\n\ndef downloadFile(id, name):\n service = get_gdrive_service()\n request = service.files().get_media(fileId=id)\n fh = io.BytesIO()\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n with io.open('.' + '/' + name, 'wb') as f:\n fh.seek(0)\n f.write(fh.read())\n\n\ndef is_duplicate(img1, img2):\n response = False\n image1 = cv2.imread(img1)\n image2 = cv2.imread(img2)\n try:\n difference = cv2.subtract(image1, image2)\n result = not np.any(difference)\n if result is True:\n response = True\n except:\n i = 0\n return response\n\n\ndef check_duplicate_image_new(items):\n print('Images is loading to memory..')\n map = {}\n list = []\n message = set()\n duplicate_image = []\n final_result = {}\n if not items:\n print('No files found.')\n else:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n if item['name'] in map:\n val = set()\n val.add(item['webViewLink'])\n map[item['name']] = item['webViewLink']\n else:\n map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n match = []\n flag = False\n for i in range(len(list) - 1):\n temp = []\n dp_count = 0\n flag = False\n if list[i] not in match:\n flag = True\n for j in range(i + 1, len(list)):\n istrue = is_duplicate(list[i], list[j])\n if istrue == True:\n dp_count = dp_count + 1\n temp.append(list[j])\n if list[j] not in match:\n match.append(list[j])\n if list[i] not in match:\n match.append(list[i])\n if len(match) == 0:\n match.append(list[i])\n match.append(list[j])\n if flag == True and dp_count != 0:\n final_result[list[i]] = temp\n m = {}\n tdct = 0\n for x, y in final_result.items():\n res = y\n tdct = tdct + len(res)\n s = set()\n for i in res:\n for item in items:\n if item['mimeType'] == 'image/jpeg':\n if item['name'] == i:\n s.add(item['webViewLink'])\n m[x] = s\n return m, tdct\n\n\ndef duplicate_image_list(imagelist):\n dup_list = []\n if len(imagelist) >= 1:\n for i in range(len(imagelist) - 1):\n count = 0\n l = []\n for j in range(i + 1, len(imagelist)):\n image1 = cv2.imread(imagelist[i])\n image2 = cv2.imread(imagelist[j])\n try:\n difference = cv2.subtract(image1, image2)\n result = not np.any(difference)\n if result is True:\n l.append(imagelist[j])\n count = count + 1\n dup_list.append(imagelist[i])\n except:\n i = 0\n return dup_list\n\n\n<mask token>\n\n\ndef check_duplicate_image(items):\n map = {}\n image_name_list = []\n duplicate_image = []\n for item in items:\n file_type = item['mimeType']\n if file_type == 'image/jpeg':\n image_name_list.append(item['name'])\n value = []\n value.append(item['name'])\n value.append(item['webViewLink'])\n map[item['id']] = value\n csv_map[item['name']] = item['webViewLink']\n downloadFile(item['id'], item['name'])\n duplicate_image = duplicate_image_list(image_name_list)\n return duplicate_image\n\n\ndef renameFile(service, items, newName):\n count = 1\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n file = service.files().get(fileId=id).execute()\n del file['id']\n if 'jpeg' in mime_type:\n file['name'] = newName + str(count) + '.jpg'\n if 'png' in mime_type:\n file['name'] = newName + str(count) + '.png'\n updated_file = service.files().update(fileId=id, body=file).execute()\n count = count + 1\n\n\ndef count_image(id):\n imageList = []\n service = get_gdrive_service()\n results = service.files().list(pageSize=1000, q=\"'{}' in parents\".\n format(id)).execute()\n items = results.get('files', [])\n for item in items:\n mime_Type = item['mimeType']\n if mime_Type == 'image/jpeg':\n imageList.append(item['name'])\n if mime_Type == 'application/vnd.google-apps.folder':\n imageList.extend(count_image(item['id']))\n return imageList\n\n\ndef list_files(items, service):\n folder_count = 0\n image_count = 0\n imglist = []\n count = 0\n testtechm_id = ''\n nm_name = []\n img_count = []\n list_all_folder_name = []\n rows = []\n overview_map = {}\n img_nm = 0\n for item in items:\n name = item['name']\n mime_type = item['mimeType']\n if name == 'Test Techm':\n testtechm_id = item['parents'][0]\n for item in items:\n id = item['id']\n name = item['name']\n mime_type = item['mimeType']\n if mime_type == 'application/vnd.google-apps.folder':\n folder_count = folder_count + 1\n if mime_type == 'image/jpeg':\n image_count = image_count + 1\n if mime_type == 'application/vnd.google-apps.folder' and item['parents'\n ][0] == testtechm_id:\n list_all_folder_name.append(item['name'])\n name1 = count_image(id)\n nm_name.append(name1)\n img_count.append(len(name1))\n overview_map[item['name']] = name1\n rows.append((id, name, mime_type, folder_count))\n imglist.append(count)\n rows.append((id, name, mime_type, folder_count))\n lt, duplicate_ct = check_duplicate_image_new(items)\n duplicateImagehtml(folder_count, image_count, duplicate_ct, items)\n draw_chart_create_report(list_all_folder_name, image_count,\n duplicate_ct, overview_map, folder_count)\n\n\ndef createDeviceCSV():\n fileName = 'DuplicateImage.csv'\n with open(fileName, 'w') as csvFile:\n writer = csv.writer(csvFile)\n row = ['Image Name', 'Image Url']\n writer.writerow(row)\n count = 0\n for k, v in csv_map.items():\n row = [k, v]\n writer.writerow(row)\n count = count + 1\n csvFile.close()\n\n\ndef duplicateImagehtml(folder_count, image_count, duplicate_ct, items):\n uri = []\n map1, count = check_duplicate_image_new(items)\n for k, v in map1.items():\n name_url = []\n name_url.append(k)\n name_url.append(str(len(v)))\n name_url.append(str(v))\n uri.append(name_url)\n fb = open('duplicateData.html', 'w')\n message = \"\"\" <html> <head>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['table']});\n google.charts.setOnLoadCallback(drawTable);\n function drawTable() {\n var data3 = new google.visualization.DataTable();\n data3.addColumn('string', 'Name');\n data3.addColumn('string', 'Count');\n data3.addRows([\n ['Total Folders', '\"\"\" + str(folder_count) + \"\"\"'],\n ['Total Images', '\"\"\" + str(image_count) + \"\"\"'],\n ['Duplicate Images', '\"\"\" + str(duplicate_ct) + \"\"\"']]);\n\n var table2 = new google.visualization.Table(document.getElementById('table_div_base'));\n\n table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});\n var data = new google.visualization.DataTable();\n data.addColumn('string', 'Image Name');\n data.addColumn('string', 'Image Count');\n data.addColumn('string', 'Image Url');\n data.addRows(\"\"\" + str(uri) + \"\"\");\n var table = new google.visualization.Table(document.getElementById('table_div'));\n table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});\n }\n </script>\n </head>\n <body><h2 style=\"text-align: center\">Google Drive Summary Table</h2>\n <div id=\"table_div_base\" style=\"width: 100%; height: 200px; display:inline-block;border-style: solid\"></div>\n <h2 style=\"text-align: center\" >List of Duplicate Image</h2>\n <div id=\"table_div\" style=\"width: 100%; height: 500px; display:inline-block;border-style: solid\"></div>\n </body></html>\"\"\"\n fb.write(message)\n fb.close()\n print('Duplicate image data preparing.. ')\n\n\ndef draw_chart_create_report(folder_count, image_count, duplicate_ct, map,\n folder_count_real):\n fb = open('gDriveOverview.html', 'w')\n values = list(map.values())\n newlist = []\n folder_name = list(map.keys())\n total_image_count = []\n duplicate_image_count_in_folder = []\n for v in values:\n newlist.append(duplicate_image_list(v))\n total_image_count.append(len(v))\n for n in newlist:\n duplicate_image_count_in_folder.append(len(n))\n m1 = \"\"\"<html>\n <head>\n <h1 style =\"color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;\">Google Drive Data Overview</h1>\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\n <script type=\"text/javascript\">\n google.charts.load('current', {'packages':['bar','corechart','table']});\n google.charts.setOnLoadCallback(drawChart);\n function drawChart() {\n var paiData = google.visualization.arrayToDataTable([\n ['Drive', 'Drive Data'],\n ['Total Images', \"\"\" + str(image_count) + \"\"\"],\n ['Total duplicate Images', \"\"\" + str(duplicate_ct) + \"\"\"],\n ['Total Folder', \"\"\" + str(folder_count_real) + \"\"\"]\n ]);\n var paiOptions = {\n title: 'Google Drive Overview'\n };\n var chart = new google.visualization.PieChart(document.getElementById('piechart'));\n chart.draw(paiData, paiOptions);\n var barData = google.visualization.arrayToDataTable(\"\"\"\n fb.write(m1)\n barchart_data = []\n barchart_data.append(['Folders', 'Total no of Images',\n 'Total no of duplicate Images'])\n for i in range(len(values)):\n item_list = []\n item_list.append(folder_count[i])\n item_list.append(total_image_count[i])\n item_list.append(duplicate_image_count_in_folder[i])\n barchart_data.append(item_list)\n fb.write(m1)\n m3 = str(barchart_data) + \"\"\");\n \n var barOptions = {\n chart: { title: 'Google Drive Folderwise Overview',\n subtitle: 'This report is created on '+new Date(),\n }};\n \n var chart = new google.charts.Bar(document.getElementById('bar_chart'));\n \n chart.draw(barData, google.charts.Bar.convertOptions(barOptions));\n }\n </script>\n </head>\n <body>\n <div style=\"width:100%; margin:0px auto;\">\n <div id=\"piechart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n <div id=\"bar_chart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\n </div>\n <div>\n <h2>\n <p style=\"float:right;color:red;\">** <a href=\"duplicateData.html\" target=\"_blank\">Click here to know more about duplicate image data</a></p>\n </h2></div></body></html>\n \"\"\"\n fb.write(m3)\n fb.close()\n print('Bar and Pie chart creating.... ')\n\n\n<mask token>\n",
"step-5": "import csv\r\nimport io\r\nimport pickle\r\nimport os\r\nimport pip\r\nfrom googleapiclient.discovery import build\r\nfrom google_auth_oauthlib.flow import InstalledAppFlow\r\nfrom google.auth.transport.requests import Request\r\nfrom googleapiclient.http import MediaIoBaseDownload\r\nimport cv2\r\nimport numpy as np\r\n\r\nSCOPES = ['https://www.googleapis.com/auth/drive.metadata',\r\n 'https://www.googleapis.com/auth/drive.file',\r\n 'https://www.googleapis.com/auth/drive']\r\n\r\n\r\ndef install(package):\r\n if hasattr(pip, 'main'):\r\n pip.main(['install', package])\r\n else:\r\n pip._internal.main(['install', package])\r\n\r\n\r\ndef create_folder(service):\r\n file_metadata = {\r\n 'name': 'Test Techm',\r\n 'mimeType': 'application/vnd.google-apps.folder'\r\n }\r\n file = service.files().create(body=file_metadata,\r\n fields='id').execute()\r\n print('Folder ID: %s' % file.get('id'))\r\n\r\n\r\ndef get_gdrive_service():\r\n creds = None\r\n # The file token.pickle stores the user's access and refresh tokens, and is\r\n # created automatically when the authorization flow completes for the first\r\n # time.\r\n if os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n creds = pickle.load(token)\r\n # If there are no (valid) credentials available, let the user log in.\r\n if not creds or not creds.valid:\r\n if creds and creds.expired and creds.refresh_token:\r\n creds.refresh(Request())\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'credentials.json', SCOPES)\r\n creds = flow.run_local_server(port=0)\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(creds, token)\r\n # return Google Drive API service\r\n return build('drive', 'v3', credentials=creds)\r\n\r\n\r\ndef downloadFile(id, name):\r\n service = get_gdrive_service()\r\n request = service.files().get_media(fileId=id)\r\n fh = io.BytesIO()\r\n downloader = MediaIoBaseDownload(fh, request)\r\n done = False\r\n while done is False:\r\n status, done = downloader.next_chunk()\r\n with io.open(\".\" + \"/\" + name, 'wb') as f:\r\n fh.seek(0)\r\n f.write(fh.read())\r\n\r\n\r\ndef is_duplicate(img1,img2):\r\n response=False\r\n image1 = cv2.imread(img1)\r\n image2 = cv2.imread(img2)\r\n try:\r\n difference = cv2.subtract(image1, image2)\r\n result = not np.any(difference) #if difference is all zeros it will return False\r\n if result is True:\r\n response=True\r\n #duplicate_image.append(list[i])\r\n #print(\"{} images is matching with {} Occurred {} times \".format(img1,img1,list.count(img1)))\r\n except:\r\n i=0\r\n\r\n return response\r\n\r\n\r\ndef check_duplicate_image_new(items):\r\n print(\"Images is loading to memory..\")\r\n #\"\"\"given items returned by Google Drive API, prints them in a tabular way\"\"\"\r\n map= {}\r\n list=[]\r\n message= set()\r\n duplicate_image=[]\r\n final_result={}\r\n if not items:\r\n print('No files found.')\r\n else:\r\n for item in items:\r\n if item[\"mimeType\"] == \"image/jpeg\":\r\n list.append(item[\"name\"])\r\n #Creating Map\r\n value=[]\r\n value.append(item[\"name\"])\r\n value.append(item[\"webViewLink\"])\r\n if item[\"name\"] in map:\r\n val=set()\r\n val.add(item[\"webViewLink\"])\r\n map[item[\"name\"]]=item[\"webViewLink\"]\r\n else:\r\n map[item[\"name\"]]=item[\"webViewLink\"]\r\n #Dowloading Image\r\n downloadFile(item[\"id\"],item[\"name\"])\r\n match=[]\r\n flag=False\r\n for i in range(len(list)-1):\r\n temp=[]\r\n dp_count=0\r\n flag=False\r\n if list[i] not in match :\r\n flag=True\r\n for j in range(i+1,len(list)):\r\n istrue=is_duplicate(list[i],list[j])\r\n if istrue==True:\r\n dp_count=dp_count+1\r\n temp.append(list[j])\r\n if list[j] not in match:\r\n match.append(list[j])\r\n if list[i] not in match:\r\n match.append(list[i])\r\n if len(match)==0:\r\n match.append(list[i])\r\n match.append(list[j])\r\n\r\n if flag==True and dp_count !=0:\r\n #print(list[i],\" - \",dp_count)\r\n final_result[list[i]]=temp\r\n\r\n\r\n m={}\r\n tdct=0\r\n for x, y in final_result.items():\r\n res=y\r\n tdct=tdct+len(res)\r\n s=set()\r\n for i in res:\r\n #s=set()\r\n for item in items:\r\n if item[\"mimeType\"] == \"image/jpeg\":\r\n if item[\"name\"]==i:\r\n s.add(item[\"webViewLink\"])\r\n m[x]=s\r\n return m,tdct\r\n\r\n\r\ndef duplicate_image_list(imagelist):\r\n #print(len(imagelist))\r\n dup_list = []\r\n if len(imagelist) >= 1:\r\n for i in range(len(imagelist) - 1):\r\n count=0\r\n l=[]\r\n for j in range(i + 1, len(imagelist)):\r\n image1 = cv2.imread(imagelist[i])\r\n image2 = cv2.imread(imagelist[j])\r\n try:\r\n difference = cv2.subtract(image1, image2)\r\n result = not np.any(difference) # if difference is all zeros it will return False\r\n if result is True:\r\n #print(imagelist[i],\"Matching with \",imagelist[j])\r\n l.append(imagelist[j])\r\n count=count+1\r\n dup_list.append(imagelist[i])\r\n \r\n except:\r\n i = 0\r\n return dup_list\r\n\r\n\r\ncsv_map = {}\r\n\r\n\r\ndef check_duplicate_image(items):\r\n # \"\"\"given items returned by Google Drive API, prints them in a tabular way\"\"\"\r\n map = {}\r\n image_name_list = []\r\n duplicate_image = []\r\n for item in items:\r\n file_type = item[\"mimeType\"]\r\n if file_type == \"image/jpeg\":\r\n image_name_list.append(item[\"name\"])\r\n #append url or \r\n # Creating Map\r\n value = []\r\n value.append(item[\"name\"])\r\n value.append(item[\"webViewLink\"])\r\n map[item[\"id\"]] = value\r\n csv_map[item[\"name\"]] = item[\"webViewLink\"]\r\n # Dowloading Image\r\n downloadFile(item[\"id\"], item[\"name\"])\r\n duplicate_image = duplicate_image_list(image_name_list)\r\n return duplicate_image\r\n\r\n\r\ndef renameFile(service,items, newName):\r\n count=1\r\n for item in items:\r\n id = item[\"id\"]\r\n name = item[\"name\"]\r\n mime_type = item[\"mimeType\"]\r\n file = service.files().get(fileId=id).execute()\r\n del file['id']\r\n if \"jpeg\" in mime_type:\r\n file['name'] = newName+str(count)+ \".jpg\";\r\n if \"png\" in mime_type:\r\n file['name'] = newName+str(count)+ \".png\";\r\n updated_file = service.files().update(fileId=id, body=file).execute()\r\n count=count+1\r\n\r\n\r\n\r\ndef count_image(id):\r\n imageList = []\r\n service = get_gdrive_service()\r\n results = service.files().list(pageSize=1000, q=\"'{}' in parents\".format(id)).execute()\r\n items = results.get('files', [])\r\n for item in items:\r\n mime_Type = item[\"mimeType\"]\r\n if mime_Type == \"image/jpeg\":\r\n imageList.append(item[\"name\"])\r\n if mime_Type == \"application/vnd.google-apps.folder\":\r\n imageList.extend(count_image(item[\"id\"]))\r\n\r\n return imageList\r\n\r\n\r\ndef list_files(items, service):\r\n folder_count = 0\r\n image_count = 0\r\n imglist = []\r\n count = 0\r\n testtechm_id = ''\r\n nm_name = []\r\n img_count = []\r\n list_all_folder_name=[]\r\n rows = []\r\n overview_map = {}\r\n img_nm=0\r\n for item in items:\r\n name = item[\"name\"]\r\n mime_type = item[\"mimeType\"]\r\n if name == 'Test Techm':\r\n testtechm_id = item['parents'][0]\r\n for item in items:\r\n id = item[\"id\"]\r\n name = item[\"name\"]\r\n mime_type = item[\"mimeType\"]\r\n if mime_type == \"application/vnd.google-apps.folder\":\r\n folder_count = folder_count + 1\r\n if mime_type == \"image/jpeg\":\r\n # renameFile(item[\"id\"],\"rajj_img\"+str(image_count))\r\n image_count = image_count + 1\r\n if mime_type == \"application/vnd.google-apps.folder\" and item[\"parents\"][0] == testtechm_id:\r\n list_all_folder_name.append(item[\"name\"])\r\n name1 = count_image(id)\r\n nm_name.append(name1)\r\n img_count.append(len(name1))\r\n overview_map[item[\"name\"]] = name1\r\n\r\n rows.append((id, name, mime_type, folder_count))\r\n imglist.append(count)\r\n rows.append((id, name, mime_type, folder_count))\r\n\r\n #duplicate_count = len(check_duplicate_image(items))\r\n\r\n lt,duplicate_ct=check_duplicate_image_new(items)\r\n duplicateImagehtml(folder_count, image_count, duplicate_ct,items)\r\n # overview chart report page\r\n draw_chart_create_report(list_all_folder_name, image_count, duplicate_ct, overview_map,folder_count)\r\n\r\n\r\ndef createDeviceCSV():\r\n fileName = 'DuplicateImage.csv'\r\n with open(fileName, 'w') as csvFile:\r\n writer = csv.writer(csvFile)\r\n row = [\"Image Name\", 'Image Url']\r\n writer.writerow(row)\r\n count = 0\r\n for k, v in csv_map.items():\r\n row = [k, v]\r\n writer.writerow(row)\r\n count = count + 1\r\n #print(\"Device's adding into csv: \" + str(count))\r\n csvFile.close()\r\n #print('Device CSV File creation is Done file name is ', fileName)\r\n\r\ndef duplicateImagehtml(folder_count, image_count, duplicate_ct,items):\r\n uri = []\r\n map1,count=check_duplicate_image_new(items)\r\n for k, v in map1.items():\r\n name_url = []\r\n name_url.append(k)\r\n name_url.append(str(len(v)))\r\n name_url.append(str(v))\r\n uri.append(name_url)\r\n fb = open('duplicateData.html', 'w')\r\n message = \"\"\" <html> <head>\r\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\r\n <script type=\"text/javascript\">\r\n google.charts.load('current', {'packages':['table']});\r\n google.charts.setOnLoadCallback(drawTable);\r\n function drawTable() {\r\n var data3 = new google.visualization.DataTable();\r\n data3.addColumn('string', 'Name');\r\n data3.addColumn('string', 'Count');\r\n data3.addRows([\r\n ['Total Folders', '\"\"\" + str(folder_count) + \"\"\"'],\r\n ['Total Images', '\"\"\" + str(image_count) + \"\"\"'],\r\n ['Duplicate Images', '\"\"\" + str(duplicate_ct) + \"\"\"']]);\r\n\r\n var table2 = new google.visualization.Table(document.getElementById('table_div_base'));\r\n\r\n table2.draw(data3, {showRowNumber: true, width: '100%', height: '100%'});\r\n var data = new google.visualization.DataTable();\r\n data.addColumn('string', 'Image Name');\r\n data.addColumn('string', 'Image Count');\r\n data.addColumn('string', 'Image Url');\r\n data.addRows(\"\"\" + str(uri) + \"\"\");\r\n var table = new google.visualization.Table(document.getElementById('table_div'));\r\n table.draw(data, {showRowNumber: true, width: '100%', height: '100%'});\r\n }\r\n </script>\r\n </head>\r\n <body><h2 style=\"text-align: center\">Google Drive Summary Table</h2>\r\n <div id=\"table_div_base\" style=\"width: 100%; height: 200px; display:inline-block;border-style: solid\"></div>\r\n <h2 style=\"text-align: center\" >List of Duplicate Image</h2>\r\n <div id=\"table_div\" style=\"width: 100%; height: 500px; display:inline-block;border-style: solid\"></div>\r\n </body></html>\"\"\"\r\n\r\n fb.write(message)\r\n fb.close()\r\n print(\"Duplicate image data preparing.. \")\r\n # webbrowser.open_new_tab('helloworld.html')\r\n\r\n\r\ndef draw_chart_create_report(folder_count, image_count, duplicate_ct, map,folder_count_real):\r\n #folder_count=len(folder_count)\r\n fb = open('gDriveOverview.html', 'w')\r\n values = list(map.values())\r\n newlist = []\r\n folder_name = list(map.keys())\r\n total_image_count = []\r\n duplicate_image_count_in_folder = []\r\n for v in values:\r\n newlist.append(duplicate_image_list(v))\r\n total_image_count.append(len(v))\r\n for n in newlist:\r\n duplicate_image_count_in_folder.append(len(n))\r\n # create plot\r\n #print(total_image_count, duplicate_image_count_in_folder, map.keys())\r\n m1 = \"\"\"<html>\r\n <head>\r\n <h1 style =\"color:black;text-align: center;font-size:25px;margin-left:-6px;margin-bottom:25px;width:1300px;float:left;\">Google Drive Data Overview</h1>\r\n <script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>\r\n <script type=\"text/javascript\">\r\n google.charts.load('current', {'packages':['bar','corechart','table']});\r\n google.charts.setOnLoadCallback(drawChart);\r\n function drawChart() {\r\n var paiData = google.visualization.arrayToDataTable([\r\n ['Drive', 'Drive Data'],\r\n ['Total Images', \"\"\" + str(image_count) + \"\"\"],\r\n ['Total duplicate Images', \"\"\" + str(duplicate_ct) + \"\"\"],\r\n ['Total Folder', \"\"\" + str(folder_count_real) + \"\"\"]\r\n ]);\r\n var paiOptions = {\r\n title: 'Google Drive Overview'\r\n };\r\n var chart = new google.visualization.PieChart(document.getElementById('piechart'));\r\n chart.draw(paiData, paiOptions);\r\n var barData = google.visualization.arrayToDataTable(\"\"\"\r\n fb.write(m1)\r\n barchart_data = []\r\n barchart_data.append(['Folders', 'Total no of Images', 'Total no of duplicate Images'])\r\n for i in range(len(values)):\r\n item_list = []\r\n item_list.append(folder_count[i])\r\n item_list.append(total_image_count[i])\r\n item_list.append(duplicate_image_count_in_folder[i])\r\n barchart_data.append(item_list)\r\n\r\n fb.write(m1)\r\n m3 = str(barchart_data) + \"\"\");\r\n \r\n var barOptions = {\r\n chart: { title: 'Google Drive Folderwise Overview',\r\n subtitle: 'This report is created on '+new Date(),\r\n }};\r\n \r\n var chart = new google.charts.Bar(document.getElementById('bar_chart'));\r\n \r\n chart.draw(barData, google.charts.Bar.convertOptions(barOptions));\r\n }\r\n </script>\r\n </head>\r\n <body>\r\n <div style=\"width:100%; margin:0px auto;\">\r\n <div id=\"piechart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\r\n <div id=\"bar_chart\" style=\"width: 900px; height: 500px; display:inline-block;\"></div>\r\n </div>\r\n <div>\r\n <h2>\r\n <p style=\"float:right;color:red;\">** <a href=\"duplicateData.html\" target=\"_blank\">Click here to know more about duplicate image data</a></p>\r\n </h2></div></body></html>\r\n \"\"\"\r\n fb.write(m3)\r\n fb.close()\r\n print(\"Bar and Pie chart creating.... \")\r\n\r\n\r\ndef main():\r\n service = get_gdrive_service()\r\n print(\"Wait a moment script is running ..!!!\")\r\n results = service.files().list(pageSize=1000,\r\n fields=\"nextPageToken,files(id, name,mimeType,parents,webViewLink)\").execute()\r\n items = results.get('files', [])\r\n if not items:\r\n # empty drive\r\n print('No files found.')\r\n else:\r\n # create_folder(service)\r\n print(\"-----_\")\r\n name=\"g_image_\"\r\n renameFile(service,items,name)\r\n print(\"==============================\")\r\n #check_duplicate_image(items)\r\n # createDeviceCSV()\r\n list_files(items, service)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n print(\"Script is done ..!!!\")\r\n",
"step-ids": [
9,
11,
12,
13,
19
]
}
|
[
9,
11,
12,
13,
19
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# sphinx_gallery_thumbnail_number = 3
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import NullFormatter # useful for `logit` scale
import matplotlib.ticker as ticker
import matplotlib as mpl
mpl.style.use('classic')
# Data for plotting
chi2=np.loadtxt(r'Lam0/buffer/chi2.dat')
chi4=np.loadtxt(r'Lam0/buffer/chi4.dat')
# Create figure
fig=plt.figure(figsize=(9, 3.5))
ax1=fig.add_subplot(121)
ax1.plot(chi2,color='r',linestyle='--',linewidth=2,markersize=5,label=r'$\chi^B_2$')
ax1.axis([0,300,-0.05,0.2])
ax1.set_xlabel('$T\,[\mathrm{MeV}]$', fontsize=15, color='black')
ax1.set_ylabel(r'$\chi_2$', fontsize=15, color='black')
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax1.yaxis.get_ticklabels():
label.set_fontsize(10)
# Plot two
ax2=fig.add_subplot(122)
ax2.plot(chi4,color='k',linestyle='-',linewidth=2,markersize=5,label=r'$\chi^B_4$')
ax2.axis([0,300,-0.15,0.2])
ax2.set_xlabel('$T\,[\mathrm{MeV}]$', fontsize=15, color='black')
ax2.set_ylabel(r'$\chi_4$', fontsize=15, color='black')
ax2.legend(loc=0,fontsize=7.3,frameon=False,shadow=True,handlelength=3.,borderpad=0.5,borderaxespad=1)
for label in ax2.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax2.yaxis.get_ticklabels():
label.set_fontsize(10)
fig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,
wspace=0.2)
fig.savefig("chi.pdf")
#plt.show()
|
normal
|
{
"blob_id": "66904cbe3e57d9cc1ee385cd8a4c1ba3767626bd",
"index": 923,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmpl.style.use('classic')\n<mask token>\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\n<mask token>\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"step-3": "<mask token>\nmpl.style.use('classic')\nchi2 = np.loadtxt('Lam0/buffer/chi2.dat')\nchi4 = np.loadtxt('Lam0/buffer/chi4.dat')\nfig = plt.figure(figsize=(9, 3.5))\nax1 = fig.add_subplot(121)\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\nax2 = fig.add_subplot(122)\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import NullFormatter\nimport matplotlib.ticker as ticker\nimport matplotlib as mpl\nmpl.style.use('classic')\nchi2 = np.loadtxt('Lam0/buffer/chi2.dat')\nchi4 = np.loadtxt('Lam0/buffer/chi4.dat')\nfig = plt.figure(figsize=(9, 3.5))\nax1 = fig.add_subplot(121)\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\nax2 = fig.add_subplot(122)\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# sphinx_gallery_thumbnail_number = 3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import NullFormatter # useful for `logit` scale\nimport matplotlib.ticker as ticker\nimport matplotlib as mpl\n\nmpl.style.use('classic')\n\n\n# Data for plotting\n\n\nchi2=np.loadtxt(r'Lam0/buffer/chi2.dat')\nchi4=np.loadtxt(r'Lam0/buffer/chi4.dat')\n\n\n# Create figure\nfig=plt.figure(figsize=(9, 3.5))\nax1=fig.add_subplot(121)\n\nax1.plot(chi2,color='r',linestyle='--',linewidth=2,markersize=5,label=r'$\\chi^B_2$')\n\n\nax1.axis([0,300,-0.05,0.2])\n\nax1.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel(r'$\\chi_2$', fontsize=15, color='black')\n\n\n\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\n\n\n# Plot two\nax2=fig.add_subplot(122)\n\nax2.plot(chi4,color='k',linestyle='-',linewidth=2,markersize=5,label=r'$\\chi^B_4$')\n\nax2.axis([0,300,-0.15,0.2])\n\nax2.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel(r'$\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0,fontsize=7.3,frameon=False,shadow=True,handlelength=3.,borderpad=0.5,borderaxespad=1)\n\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\n\n\n\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\n \n\nfig.savefig(\"chi.pdf\")\n\n#plt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(max_length=60)),
('email', models.EmailField(max_length=100)),
('telephone', models.CharField(max_length=12)),
('cellphone', models.CharField(max_length=12)),
('img', models.ImageField(upload_to='')),
('role', models.IntegerField(default=5, choices=[(0, 'Pastor titular'), (1, 'Pastor/a'), (2, 'Diacono/a'), (3, 'Editor/a')])),
],
),
migrations.CreateModel(
name='Preach',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('title', models.CharField(max_length=60)),
('summary', models.CharField(blank=True, max_length=500)),
('date', models.DateField()),
('published_date', models.DateField(default=datetime.datetime(2017, 5, 7, 2, 3, 52, 71419))),
('url', models.URLField()),
('img', models.ImageField(verbose_name='Imagen', upload_to='images')),
('author', models.ForeignKey(to='preaches.Author')),
],
),
migrations.CreateModel(
name='Social_media',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.IntegerField(default=0, verbose_name='Nombre de la red social', choices=[(0, 'Facebook'), (1, 'Instagram'), (2, 'Twitter')])),
('url', models.URLField()),
],
),
migrations.CreateModel(
name='Tags',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),
('name', models.CharField(verbose_name='Categoria', max_length=80)),
],
),
migrations.AddField(
model_name='preach',
name='tags',
field=models.ManyToManyField(to='preaches.Tags'),
),
migrations.AddField(
model_name='author',
name='social_media',
field=models.ManyToManyField(to='preaches.Social_media'),
),
]
|
normal
|
{
"blob_id": "4a118f9081a8b3baf0b074c8dc14eaeef4559c08",
"index": 6684,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Author', fields=[('id',\n models.AutoField(auto_created=True, serialize=False, primary_key=\n True, verbose_name='ID')), ('name', models.CharField(max_length=60)\n ), ('email', models.EmailField(max_length=100)), ('telephone',\n models.CharField(max_length=12)), ('cellphone', models.CharField(\n max_length=12)), ('img', models.ImageField(upload_to='')), ('role',\n models.IntegerField(default=5, choices=[(0, 'Pastor titular'), (1,\n 'Pastor/a'), (2, 'Diacono/a'), (3, 'Editor/a')]))]), migrations.\n CreateModel(name='Preach', fields=[('id', models.AutoField(\n auto_created=True, serialize=False, primary_key=True, verbose_name=\n 'ID')), ('title', models.CharField(max_length=60)), ('summary',\n models.CharField(blank=True, max_length=500)), ('date', models.\n DateField()), ('published_date', models.DateField(default=datetime.\n datetime(2017, 5, 7, 2, 3, 52, 71419))), ('url', models.URLField()),\n ('img', models.ImageField(verbose_name='Imagen', upload_to='images'\n )), ('author', models.ForeignKey(to='preaches.Author'))]),\n migrations.CreateModel(name='Social_media', fields=[('id', models.\n AutoField(auto_created=True, serialize=False, primary_key=True,\n verbose_name='ID')), ('name', models.IntegerField(default=0,\n verbose_name='Nombre de la red social', choices=[(0, 'Facebook'), (\n 1, 'Instagram'), (2, 'Twitter')])), ('url', models.URLField())]),\n migrations.CreateModel(name='Tags', fields=[('id', models.AutoField\n (auto_created=True, serialize=False, primary_key=True, verbose_name\n ='ID')), ('name', models.CharField(verbose_name='Categoria',\n max_length=80))]), migrations.AddField(model_name='preach', name=\n 'tags', field=models.ManyToManyField(to='preaches.Tags')),\n migrations.AddField(model_name='author', name='social_media', field\n =models.ManyToManyField(to='preaches.Social_media'))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport datetime\n\n\nclass Migration(migrations.Migration):\n dependencies = []\n operations = [migrations.CreateModel(name='Author', fields=[('id',\n models.AutoField(auto_created=True, serialize=False, primary_key=\n True, verbose_name='ID')), ('name', models.CharField(max_length=60)\n ), ('email', models.EmailField(max_length=100)), ('telephone',\n models.CharField(max_length=12)), ('cellphone', models.CharField(\n max_length=12)), ('img', models.ImageField(upload_to='')), ('role',\n models.IntegerField(default=5, choices=[(0, 'Pastor titular'), (1,\n 'Pastor/a'), (2, 'Diacono/a'), (3, 'Editor/a')]))]), migrations.\n CreateModel(name='Preach', fields=[('id', models.AutoField(\n auto_created=True, serialize=False, primary_key=True, verbose_name=\n 'ID')), ('title', models.CharField(max_length=60)), ('summary',\n models.CharField(blank=True, max_length=500)), ('date', models.\n DateField()), ('published_date', models.DateField(default=datetime.\n datetime(2017, 5, 7, 2, 3, 52, 71419))), ('url', models.URLField()),\n ('img', models.ImageField(verbose_name='Imagen', upload_to='images'\n )), ('author', models.ForeignKey(to='preaches.Author'))]),\n migrations.CreateModel(name='Social_media', fields=[('id', models.\n AutoField(auto_created=True, serialize=False, primary_key=True,\n verbose_name='ID')), ('name', models.IntegerField(default=0,\n verbose_name='Nombre de la red social', choices=[(0, 'Facebook'), (\n 1, 'Instagram'), (2, 'Twitter')])), ('url', models.URLField())]),\n migrations.CreateModel(name='Tags', fields=[('id', models.AutoField\n (auto_created=True, serialize=False, primary_key=True, verbose_name\n ='ID')), ('name', models.CharField(verbose_name='Categoria',\n max_length=80))]), migrations.AddField(model_name='preach', name=\n 'tags', field=models.ManyToManyField(to='preaches.Tags')),\n migrations.AddField(model_name='author', name='social_media', field\n =models.ManyToManyField(to='preaches.Social_media'))]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Author',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('name', models.CharField(max_length=60)),\n ('email', models.EmailField(max_length=100)),\n ('telephone', models.CharField(max_length=12)),\n ('cellphone', models.CharField(max_length=12)),\n ('img', models.ImageField(upload_to='')),\n ('role', models.IntegerField(default=5, choices=[(0, 'Pastor titular'), (1, 'Pastor/a'), (2, 'Diacono/a'), (3, 'Editor/a')])),\n ],\n ),\n migrations.CreateModel(\n name='Preach',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('title', models.CharField(max_length=60)),\n ('summary', models.CharField(blank=True, max_length=500)),\n ('date', models.DateField()),\n ('published_date', models.DateField(default=datetime.datetime(2017, 5, 7, 2, 3, 52, 71419))),\n ('url', models.URLField()),\n ('img', models.ImageField(verbose_name='Imagen', upload_to='images')),\n ('author', models.ForeignKey(to='preaches.Author')),\n ],\n ),\n migrations.CreateModel(\n name='Social_media',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('name', models.IntegerField(default=0, verbose_name='Nombre de la red social', choices=[(0, 'Facebook'), (1, 'Instagram'), (2, 'Twitter')])),\n ('url', models.URLField()),\n ],\n ),\n migrations.CreateModel(\n name='Tags',\n fields=[\n ('id', models.AutoField(auto_created=True, serialize=False, primary_key=True, verbose_name='ID')),\n ('name', models.CharField(verbose_name='Categoria', max_length=80)),\n ],\n ),\n migrations.AddField(\n model_name='preach',\n name='tags',\n field=models.ManyToManyField(to='preaches.Tags'),\n ),\n migrations.AddField(\n model_name='author',\n name='social_media',\n field=models.ManyToManyField(to='preaches.Social_media'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 1 19:16:16 2019
@author: pc
"""
from socket import *
import threading
import time
import cv2
import struct
import pickle
import zlib
import cartoon_edit
import face_capture_edit
import pencil_edit
class Video_Server(threading.Thread):
def __init__ (self, port, version, face_cap, view_version, face_shape_predictor, break_audio_aip, break_audio):
threading.Thread.__init__(self)
self.setDaemon(True)#使每个线程在主线程结束后自动退出,保证程序不会崩溃且无法销毁的情况
self.ADDR = ('',port)#指定套接字端口号
self.face_cap = face_cap
self.view_version = view_version
self.face_shape_predictor = face_shape_predictor
self.break_audio = break_audio
self.break_audio_aip = break_audio_aip
if version == 4:#IPV4 or IPV6
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6,SOCK_STREAM)
def __del__(self):
self.sock.close()
try:
cv2.destoryALLWindows()
except:
pass
print("video close")
def run(self):
detector, predictor = face_capture_edit.face_init(self.face_shape_predictor)
print("face_capture_init is ready")
print("VIDEO server starts ...")
self.sock.bind(self.ADDR)#关联特定的端口号
self.sock.listen(1)#监听
conn, addr = self.sock.accept()#服务器端创建新的套接字,与用户端连接
print("remote VIDEO client success connected ...")
data = "".encode("utf-8")#接收数据
payload_size = struct.calcsize("L")#记录当前缓冲区的数据长度,准确提取每一帧
cv2.namedWindow('Remote',cv2.WINDOW_NORMAL)
while True:
while len(data) < payload_size:#超过数据流的部分被截取掉,和下一次合并整合,不足时将合并下一帧到该帧
data +=conn.recv(81920)
packed_size = data[:payload_size]#从最初剪到指定位置,剪切操作,剪切到一个完整的一帧
data = data[payload_size:]#从指定位置剪切到末尾
msg_size = struct.unpack("L",packed_size)[0]#解压前面的头
while len(data) < msg_size:
data += conn.recv(89120)
zframe_data = data[:msg_size]
data = data[msg_size:]
frame_data = zlib.decompress(zframe_data)
frame = pickle.loads(frame_data)
if self.face_cap == 1:
frame_face = face_capture_edit.face_capture_e(frame.copy(),detector, predictor)
cv2.imshow("Face_capture", frame_face)
if self.view_version == 0:#不变样式
frame = frame
elif self.view_version == 1:#漫画
frame = cartoon_edit.cartoon_e(frame)
elif self.view_version == 2:#铅笔画
frame = pencil_edit.rgb_to_sketch(frame)
cv2.namedWindow("Remote",0);
cv2.resizeWindow("Remote", 640, 480);
cv2.imshow("Remote", frame)
if cv2.waitKey(1) & 0xff == ord('q'):
file_aip = open(self.break_audio_aip,'w')
file_audio = open(self.break_audio,'w')
break
|
normal
|
{
"blob_id": "6b138dabf57166ec971052fff7df89ae0346e083",
"index": 1582,
"step-1": "<mask token>\n\n\nclass Video_Server(threading.Thread):\n <mask token>\n <mask token>\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-2": "<mask token>\n\n\nclass Video_Server(threading.Thread):\n <mask token>\n\n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print('video close')\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-3": "<mask token>\n\n\nclass Video_Server(threading.Thread):\n\n def __init__(self, port, version, face_cap, view_version,\n face_shape_predictor, break_audio_aip, break_audio):\n threading.Thread.__init__(self)\n self.setDaemon(True)\n self.ADDR = '', port\n self.face_cap = face_cap\n self.view_version = view_version\n self.face_shape_predictor = face_shape_predictor\n self.break_audio = break_audio\n self.break_audio_aip = break_audio_aip\n if version == 4:\n self.sock = socket(AF_INET, SOCK_STREAM)\n else:\n self.sock = socket(AF_INET6, SOCK_STREAM)\n\n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print('video close')\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-4": "<mask token>\nfrom socket import *\nimport threading\nimport time\nimport cv2\nimport struct\nimport pickle\nimport zlib\nimport cartoon_edit\nimport face_capture_edit\nimport pencil_edit\n\n\nclass Video_Server(threading.Thread):\n\n def __init__(self, port, version, face_cap, view_version,\n face_shape_predictor, break_audio_aip, break_audio):\n threading.Thread.__init__(self)\n self.setDaemon(True)\n self.ADDR = '', port\n self.face_cap = face_cap\n self.view_version = view_version\n self.face_shape_predictor = face_shape_predictor\n self.break_audio = break_audio\n self.break_audio_aip = break_audio_aip\n if version == 4:\n self.sock = socket(AF_INET, SOCK_STREAM)\n else:\n self.sock = socket(AF_INET6, SOCK_STREAM)\n\n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print('video close')\n\n def run(self):\n detector, predictor = face_capture_edit.face_init(self.\n face_shape_predictor)\n print('face_capture_init is ready')\n print('VIDEO server starts ...')\n self.sock.bind(self.ADDR)\n self.sock.listen(1)\n conn, addr = self.sock.accept()\n print('remote VIDEO client success connected ...')\n data = ''.encode('utf-8')\n payload_size = struct.calcsize('L')\n cv2.namedWindow('Remote', cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:\n data += conn.recv(81920)\n packed_size = data[:payload_size]\n data = data[payload_size:]\n msg_size = struct.unpack('L', packed_size)[0]\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),\n detector, predictor)\n cv2.imshow('Face_capture', frame_face)\n if self.view_version == 0:\n frame = frame\n elif self.view_version == 1:\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow('Remote', 0)\n cv2.resizeWindow('Remote', 640, 480)\n cv2.imshow('Remote', frame)\n if cv2.waitKey(1) & 255 == ord('q'):\n file_aip = open(self.break_audio_aip, 'w')\n file_audio = open(self.break_audio, 'w')\n break\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 1 19:16:16 2019\n\n@author: pc\n\"\"\"\n\nfrom socket import *\nimport threading\nimport time\nimport cv2\nimport struct\nimport pickle\nimport zlib\nimport cartoon_edit\nimport face_capture_edit\nimport pencil_edit\n\nclass Video_Server(threading.Thread):\n def __init__ (self, port, version, face_cap, view_version, face_shape_predictor, break_audio_aip, break_audio):\n threading.Thread.__init__(self)\n self.setDaemon(True)#使每个线程在主线程结束后自动退出,保证程序不会崩溃且无法销毁的情况\n self.ADDR = ('',port)#指定套接字端口号\n self.face_cap = face_cap\n self.view_version = view_version\n self.face_shape_predictor = face_shape_predictor\n self.break_audio = break_audio\n self.break_audio_aip = break_audio_aip\n if version == 4:#IPV4 or IPV6\n self.sock = socket(AF_INET, SOCK_STREAM)\n else:\n self.sock = socket(AF_INET6,SOCK_STREAM)\n \n def __del__(self):\n self.sock.close()\n try:\n cv2.destoryALLWindows()\n except:\n pass\n print(\"video close\")\n \n def run(self):\n detector, predictor = face_capture_edit.face_init(self.face_shape_predictor) \n print(\"face_capture_init is ready\")\n print(\"VIDEO server starts ...\")\n self.sock.bind(self.ADDR)#关联特定的端口号\n self.sock.listen(1)#监听\n conn, addr = self.sock.accept()#服务器端创建新的套接字,与用户端连接\n print(\"remote VIDEO client success connected ...\")\n data = \"\".encode(\"utf-8\")#接收数据\n payload_size = struct.calcsize(\"L\")#记录当前缓冲区的数据长度,准确提取每一帧\n cv2.namedWindow('Remote',cv2.WINDOW_NORMAL)\n while True:\n while len(data) < payload_size:#超过数据流的部分被截取掉,和下一次合并整合,不足时将合并下一帧到该帧\n data +=conn.recv(81920)\n packed_size = data[:payload_size]#从最初剪到指定位置,剪切操作,剪切到一个完整的一帧\n data = data[payload_size:]#从指定位置剪切到末尾\n msg_size = struct.unpack(\"L\",packed_size)[0]#解压前面的头\n while len(data) < msg_size:\n data += conn.recv(89120)\n zframe_data = data[:msg_size]\n data = data[msg_size:]\n frame_data = zlib.decompress(zframe_data)\n frame = pickle.loads(frame_data)\n if self.face_cap == 1:\n frame_face = face_capture_edit.face_capture_e(frame.copy(),detector, predictor)\n cv2.imshow(\"Face_capture\", frame_face)\n if self.view_version == 0:#不变样式\n frame = frame\n elif self.view_version == 1:#漫画\n frame = cartoon_edit.cartoon_e(frame)\n elif self.view_version == 2:#铅笔画\n frame = pencil_edit.rgb_to_sketch(frame)\n cv2.namedWindow(\"Remote\",0);\n cv2.resizeWindow(\"Remote\", 640, 480);\n cv2.imshow(\"Remote\", frame)\n if cv2.waitKey(1) & 0xff == ord('q'):\n file_aip = open(self.break_audio_aip,'w')\n file_audio = open(self.break_audio,'w')\n break\n ",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
if input is not None:
element = S(input)
if newChild is not None:
newChild = S(newChild)
element.replaceChild(existingChild, newChild)
|
normal
|
{
"blob_id": "fdbb64159b72bf902efc3aa2eaa534e199dccf84",
"index": 8442,
"step-1": "<mask token>\n",
"step-2": "if input is not None:\n element = S(input)\nif newChild is not None:\n newChild = S(newChild)\nelement.replaceChild(existingChild, newChild)\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Antirollback clock user space support.
This daemon serves several purposes:
1. Maintain a file containing the minimum time, and periodically
update its value.
2. At startup, write the minimum time to /proc/ar_clock.
The kernel will not allow the time to be set substantially
earlier than this value (there is a small amount of wiggle
room).
"""
__author__ = 'dgentry@google.com (Denton Gentry)'
import os
import pwd
import sys
import tempfile
import time
import options
optspec = """
antirollback [options...]
--
i,interval= seconds between updates [28800]
p,persist= path to persistent file [/fiber/config/ar_clock]
u,user= setuid to this user to run
"""
# Unit tests can override these.
BIRTHDAY = 1349064000.0 # 10/1/2012
BUILD_FILENAME = '/etc/softwaredate'
PROC_AR = '/proc/ar_clock'
PROC_UPTIME = '/proc/uptime'
SLEEP = time.sleep
TIMENOW = time.time
def GetPersistTime(ar_filename):
"""Return time stored in ar_filename, or 0.0 if it does not exist."""
try:
with open(ar_filename) as f:
return float(f.read())
except (IOError, ValueError):
return 0.0
def GetBuildDate(build_filename):
"""Return build_date in floating point seconds since epoch."""
try:
with open(build_filename) as f:
return float(f.readline())
except (IOError, ValueError):
return 0.0
def GetMonotime():
"""Return a monotonically increasing count of seconds."""
return float(open(PROC_UPTIME).read().split()[0])
def GetAntirollbackTime(ar_filename):
"""Return the appropriate antirollback time to use at startup."""
now = max(TIMENOW(), GetPersistTime(ar_filename),
GetBuildDate(BUILD_FILENAME), BIRTHDAY)
return now
def StoreAntirollback(now, ar_filename, kern_f):
"""Write time to /proc/ar_clock and the persistent file."""
print 'antirollback time now ' + str(now)
sys.stdout.flush()
kern_f.write(str(now))
kern_f.flush()
tmpdir = os.path.dirname(ar_filename)
with tempfile.NamedTemporaryFile(mode='w', dir=tmpdir, delete=False) as f:
f.write(str(now) + '\n')
f.flush()
os.fsync(f.fileno())
os.rename(f.name, ar_filename)
def LoopIterate(uptime, now, sleeptime, ar_filename, kern_f):
SLEEP(sleeptime)
new_uptime = GetMonotime()
now += (new_uptime - uptime)
uptime = new_uptime
now = max(now, TIMENOW())
StoreAntirollback(now=now, ar_filename=ar_filename, kern_f=kern_f)
return (uptime, now)
def main():
o = options.Options(optspec)
(opt, _, _) = o.parse(sys.argv[1:])
kern_f = open(PROC_AR, 'w')
# Drop privileges
if opt.user:
pd = pwd.getpwnam(opt.user)
os.setuid(pd.pw_uid)
uptime = GetMonotime()
now = GetAntirollbackTime(opt.persist)
StoreAntirollback(now=now, ar_filename=opt.persist, kern_f=kern_f)
while True:
(uptime, now) = LoopIterate(uptime=uptime, now=now,
sleeptime=opt.interval,
ar_filename=opt.persist,
kern_f=kern_f)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "92e7a7825b3f49424ec69196b69aee00bc84da68",
"index": 8879,
"step-1": "#!/usr/bin/python\n# Copyright 2012 Google Inc. All Rights Reserved.\n\n\"\"\"Antirollback clock user space support.\n\nThis daemon serves several purposes:\n 1. Maintain a file containing the minimum time, and periodically\n update its value.\n 2. At startup, write the minimum time to /proc/ar_clock.\n The kernel will not allow the time to be set substantially\n earlier than this value (there is a small amount of wiggle\n room).\n\"\"\"\n\n__author__ = 'dgentry@google.com (Denton Gentry)'\n\nimport os\nimport pwd\nimport sys\nimport tempfile\nimport time\nimport options\n\n\noptspec = \"\"\"\nantirollback [options...]\n--\ni,interval= seconds between updates [28800]\np,persist= path to persistent file [/fiber/config/ar_clock]\nu,user= setuid to this user to run\n\"\"\"\n\n\n# Unit tests can override these.\nBIRTHDAY = 1349064000.0 # 10/1/2012\nBUILD_FILENAME = '/etc/softwaredate'\nPROC_AR = '/proc/ar_clock'\nPROC_UPTIME = '/proc/uptime'\nSLEEP = time.sleep\nTIMENOW = time.time\n\n\ndef GetPersistTime(ar_filename):\n \"\"\"Return time stored in ar_filename, or 0.0 if it does not exist.\"\"\"\n try:\n with open(ar_filename) as f:\n return float(f.read())\n except (IOError, ValueError):\n return 0.0\n\n\ndef GetBuildDate(build_filename):\n \"\"\"Return build_date in floating point seconds since epoch.\"\"\"\n try:\n with open(build_filename) as f:\n return float(f.readline())\n except (IOError, ValueError):\n return 0.0\n\n\ndef GetMonotime():\n \"\"\"Return a monotonically increasing count of seconds.\"\"\"\n return float(open(PROC_UPTIME).read().split()[0])\n\n\ndef GetAntirollbackTime(ar_filename):\n \"\"\"Return the appropriate antirollback time to use at startup.\"\"\"\n now = max(TIMENOW(), GetPersistTime(ar_filename),\n GetBuildDate(BUILD_FILENAME), BIRTHDAY)\n return now\n\n\ndef StoreAntirollback(now, ar_filename, kern_f):\n \"\"\"Write time to /proc/ar_clock and the persistent file.\"\"\"\n print 'antirollback time now ' + str(now)\n sys.stdout.flush()\n kern_f.write(str(now))\n kern_f.flush()\n tmpdir = os.path.dirname(ar_filename)\n with tempfile.NamedTemporaryFile(mode='w', dir=tmpdir, delete=False) as f:\n f.write(str(now) + '\\n')\n f.flush()\n os.fsync(f.fileno())\n os.rename(f.name, ar_filename)\n\n\ndef LoopIterate(uptime, now, sleeptime, ar_filename, kern_f):\n SLEEP(sleeptime)\n new_uptime = GetMonotime()\n now += (new_uptime - uptime)\n uptime = new_uptime\n now = max(now, TIMENOW())\n StoreAntirollback(now=now, ar_filename=ar_filename, kern_f=kern_f)\n return (uptime, now)\n\n\ndef main():\n o = options.Options(optspec)\n (opt, _, _) = o.parse(sys.argv[1:])\n\n kern_f = open(PROC_AR, 'w')\n\n # Drop privileges\n if opt.user:\n pd = pwd.getpwnam(opt.user)\n os.setuid(pd.pw_uid)\n\n uptime = GetMonotime()\n now = GetAntirollbackTime(opt.persist)\n\n StoreAntirollback(now=now, ar_filename=opt.persist, kern_f=kern_f)\n\n while True:\n (uptime, now) = LoopIterate(uptime=uptime, now=now,\n sleeptime=opt.interval,\n ar_filename=opt.persist,\n kern_f=kern_f)\n\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from decimal import Decimal
from django.conf import settings
from blood.models import Bank, Blood
class Cart(object):
def __init__(self, request):
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
def add(self, blood, quantity=1, update_quantity=False):
blood_id = str(blood.id)
max_quantity = Blood.objects.get(id=blood.id).stock
if blood_id not in self.cart:
self.cart[blood_id] = {
'quantity': 0, 'price': str(blood.price)}
if update_quantity and self.cart[blood_id]['quantity'] <= max_quantity:
self.cart[blood_id]['quantity'] = quantity
elif int(self.cart[blood_id]['quantity']+quantity) <= max_quantity:
self.cart[blood_id]['quantity'] += quantity
self.save()
def save(self):
self.session[settings.CART_SESSION_ID] = self.cart
self.session.modified = True
def remove(self, blood):
blood_id = str(blood.id)
if blood_id in self.cart:
del self.cart[blood_id]
self.save()
def __iter__(self):
blood_ids = self.cart.keys()
bloods = Blood.objects.filter(id__in=blood_ids)
for blood in bloods:
self.cart[str(blood.id)]['blood'] = blood
for item in self.cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price'] * item['quantity']
yield item
def __len__(self):
return sum(item['quantity'] for item in self.cart.values())
def get_total_price(self):
return sum(Decimal(item['price']) * item['quantity'] for item in self.cart.values())
def clear(self):
del self.session[settings.CART_SESSION_ID]
self.session.modified = True
|
normal
|
{
"blob_id": "a638504737d0069d4fa40b0fc5026203904563e8",
"index": 5537,
"step-1": "<mask token>\n\n\nclass Cart(object):\n <mask token>\n <mask token>\n\n def save(self):\n self.session[settings.CART_SESSION_ID] = self.cart\n self.session.modified = True\n <mask token>\n\n def __iter__(self):\n blood_ids = self.cart.keys()\n bloods = Blood.objects.filter(id__in=blood_ids)\n for blood in bloods:\n self.cart[str(blood.id)]['blood'] = blood\n for item in self.cart.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['quantity']\n yield item\n\n def __len__(self):\n return sum(item['quantity'] for item in self.cart.values())\n\n def get_total_price(self):\n return sum(Decimal(item['price']) * item['quantity'] for item in\n self.cart.values())\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Cart(object):\n <mask token>\n <mask token>\n\n def save(self):\n self.session[settings.CART_SESSION_ID] = self.cart\n self.session.modified = True\n <mask token>\n\n def __iter__(self):\n blood_ids = self.cart.keys()\n bloods = Blood.objects.filter(id__in=blood_ids)\n for blood in bloods:\n self.cart[str(blood.id)]['blood'] = blood\n for item in self.cart.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['quantity']\n yield item\n\n def __len__(self):\n return sum(item['quantity'] for item in self.cart.values())\n\n def get_total_price(self):\n return sum(Decimal(item['price']) * item['quantity'] for item in\n self.cart.values())\n\n def clear(self):\n del self.session[settings.CART_SESSION_ID]\n self.session.modified = True\n",
"step-3": "<mask token>\n\n\nclass Cart(object):\n <mask token>\n\n def add(self, blood, quantity=1, update_quantity=False):\n blood_id = str(blood.id)\n max_quantity = Blood.objects.get(id=blood.id).stock\n if blood_id not in self.cart:\n self.cart[blood_id] = {'quantity': 0, 'price': str(blood.price)}\n if update_quantity and self.cart[blood_id]['quantity'] <= max_quantity:\n self.cart[blood_id]['quantity'] = quantity\n elif int(self.cart[blood_id]['quantity'] + quantity) <= max_quantity:\n self.cart[blood_id]['quantity'] += quantity\n self.save()\n\n def save(self):\n self.session[settings.CART_SESSION_ID] = self.cart\n self.session.modified = True\n\n def remove(self, blood):\n blood_id = str(blood.id)\n if blood_id in self.cart:\n del self.cart[blood_id]\n self.save()\n\n def __iter__(self):\n blood_ids = self.cart.keys()\n bloods = Blood.objects.filter(id__in=blood_ids)\n for blood in bloods:\n self.cart[str(blood.id)]['blood'] = blood\n for item in self.cart.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['quantity']\n yield item\n\n def __len__(self):\n return sum(item['quantity'] for item in self.cart.values())\n\n def get_total_price(self):\n return sum(Decimal(item['price']) * item['quantity'] for item in\n self.cart.values())\n\n def clear(self):\n del self.session[settings.CART_SESSION_ID]\n self.session.modified = True\n",
"step-4": "<mask token>\n\n\nclass Cart(object):\n\n def __init__(self, request):\n self.session = request.session\n cart = self.session.get(settings.CART_SESSION_ID)\n if not cart:\n cart = self.session[settings.CART_SESSION_ID] = {}\n self.cart = cart\n\n def add(self, blood, quantity=1, update_quantity=False):\n blood_id = str(blood.id)\n max_quantity = Blood.objects.get(id=blood.id).stock\n if blood_id not in self.cart:\n self.cart[blood_id] = {'quantity': 0, 'price': str(blood.price)}\n if update_quantity and self.cart[blood_id]['quantity'] <= max_quantity:\n self.cart[blood_id]['quantity'] = quantity\n elif int(self.cart[blood_id]['quantity'] + quantity) <= max_quantity:\n self.cart[blood_id]['quantity'] += quantity\n self.save()\n\n def save(self):\n self.session[settings.CART_SESSION_ID] = self.cart\n self.session.modified = True\n\n def remove(self, blood):\n blood_id = str(blood.id)\n if blood_id in self.cart:\n del self.cart[blood_id]\n self.save()\n\n def __iter__(self):\n blood_ids = self.cart.keys()\n bloods = Blood.objects.filter(id__in=blood_ids)\n for blood in bloods:\n self.cart[str(blood.id)]['blood'] = blood\n for item in self.cart.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['quantity']\n yield item\n\n def __len__(self):\n return sum(item['quantity'] for item in self.cart.values())\n\n def get_total_price(self):\n return sum(Decimal(item['price']) * item['quantity'] for item in\n self.cart.values())\n\n def clear(self):\n del self.session[settings.CART_SESSION_ID]\n self.session.modified = True\n",
"step-5": "from decimal import Decimal\nfrom django.conf import settings\nfrom blood.models import Bank, Blood\n\n\nclass Cart(object):\n def __init__(self, request):\n self.session = request.session\n cart = self.session.get(settings.CART_SESSION_ID)\n if not cart:\n cart = self.session[settings.CART_SESSION_ID] = {}\n self.cart = cart\n\n def add(self, blood, quantity=1, update_quantity=False):\n blood_id = str(blood.id)\n max_quantity = Blood.objects.get(id=blood.id).stock\n if blood_id not in self.cart:\n self.cart[blood_id] = {\n 'quantity': 0, 'price': str(blood.price)}\n if update_quantity and self.cart[blood_id]['quantity'] <= max_quantity:\n self.cart[blood_id]['quantity'] = quantity\n elif int(self.cart[blood_id]['quantity']+quantity) <= max_quantity:\n self.cart[blood_id]['quantity'] += quantity\n self.save()\n\n def save(self):\n self.session[settings.CART_SESSION_ID] = self.cart\n self.session.modified = True\n\n def remove(self, blood):\n blood_id = str(blood.id)\n if blood_id in self.cart:\n del self.cart[blood_id]\n self.save()\n\n def __iter__(self):\n blood_ids = self.cart.keys()\n bloods = Blood.objects.filter(id__in=blood_ids)\n for blood in bloods:\n self.cart[str(blood.id)]['blood'] = blood\n\n for item in self.cart.values():\n item['price'] = Decimal(item['price'])\n item['total_price'] = item['price'] * item['quantity']\n yield item\n\n def __len__(self):\n return sum(item['quantity'] for item in self.cart.values())\n\n def get_total_price(self):\n return sum(Decimal(item['price']) * item['quantity'] for item in self.cart.values())\n\n def clear(self):\n del self.session[settings.CART_SESSION_ID]\n self.session.modified = True\n",
"step-ids": [
5,
6,
8,
9,
11
]
}
|
[
5,
6,
8,
9,
11
] |
# #Create a function that takes a text file and returns the number of words
# ___ count_words filepath
# w.. o.. ? ? __ file # read
# strng = ?.r..
# strng_list = ?.s.. " "
# r.. l.. ?
#
# print ? "words1.txt"
|
normal
|
{
"blob_id": "b83310c18294def950cef6710c7644c7e8a3208f",
"index": 5219,
"step-1": "# #Create a function that takes a text file and returns the number of words\n# ___ count_words filepath\n# w.. o.. ? ? __ file # read\n# strng = ?.r..\n# strng_list = ?.s.. \" \"\n# r.. l.. ?\n#\n# print ? \"words1.txt\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
cap = cv2.VideoCapture('dfd1.mp4')
mog = cv2.createBackgroundSubtractorMOG2(detectShadows=0)
count = 0
while True:
list = []
ret, frame = cap.read()
ret1, frame1 = cap.read()
fgmask = mog.apply(frame)
mask = np.zeros_like(frame1)
mask1 = np.zeros_like(frame1)
kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
dilation = cv2.dilate(closing, kernel, iterations=1)
canny = cv2.Canny(dilation, 100, 200)
cnts, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(frame, (220, 100), (550, 160), (0, 255, 0), 2)
cv2.imshow('mask', fgmask)
cv2.imshow('mask3', dilation)
cv2.imshow('mask15', canny)
cv2.imshow('mask4', frame)
cv2.imshow('mask8', frame[100:160, 220:550])
for i in range(len(contours)):
point = []
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(frame1, (int(x + w / 2), int(y + h / 2)), (int(x +
w / 2), int(y + h / 2)), (255, 0, 0), 3)
X = int(x + w / 2)
Y = int(y + h / 2)
distance = math.sqrt(X ^ 2 + Y ^ 2)
mask[y:y + h, x:x + w] = frame1[y:y + h, x:x + w]
point.append(distance)
point.append(X)
point.append(Y)
list.append(point)
if count == 0:
print('List has one List')
elif list[count][1] == list[count - 1][1] and list[count][2
] == list[count - 1][2]:
a = list.pop()
count = count - 1
count = count + 1
count = 0
if not list:
print('empty')
else:
list.sort()
print(list)
"""
for i in range(len(list)):
if count == 0:
print("list 내용 한개")
else:
#오름차순 정리된 점 거리 계산
distance1 = math.sqrt((list[count][1] - list[count-1][1]) ** 2 + (list[count][2] - list[count-1][2]) ** 2)
print(count)
print(list[count][1],list[count][2])
print(list[count-1][1],list[count-1][2])
print("거리 ",distance1)
count = count + 1
count = 0
"""
cv2.imshow('mask2', frame1)
print(
' 장면 전환'
)
cv2.imshow('mask7', mask)
k = cv2.waitKey(300) & 255
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import math
import numpy as np
import cv2
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
if __name__ == '__main__':
cap = cv2.VideoCapture('dfd1.mp4')
mog = cv2.createBackgroundSubtractorMOG2(detectShadows=0)
count = 0
while True:
list = []
ret, frame = cap.read()
ret1, frame1 = cap.read()
fgmask = mog.apply(frame)
mask = np.zeros_like(frame1)
mask1 = np.zeros_like(frame1)
kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
dilation = cv2.dilate(closing, kernel, iterations=1)
canny = cv2.Canny(dilation, 100, 200)
cnts, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(frame, (220, 100), (550, 160), (0, 255, 0), 2)
cv2.imshow('mask', fgmask)
cv2.imshow('mask3', dilation)
cv2.imshow('mask15', canny)
cv2.imshow('mask4', frame)
cv2.imshow('mask8', frame[100:160, 220:550])
for i in range(len(contours)):
point = []
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(frame1, (int(x + w / 2), int(y + h / 2)), (int(x +
w / 2), int(y + h / 2)), (255, 0, 0), 3)
X = int(x + w / 2)
Y = int(y + h / 2)
distance = math.sqrt(X ^ 2 + Y ^ 2)
mask[y:y + h, x:x + w] = frame1[y:y + h, x:x + w]
point.append(distance)
point.append(X)
point.append(Y)
list.append(point)
if count == 0:
print('List has one List')
elif list[count][1] == list[count - 1][1] and list[count][2
] == list[count - 1][2]:
a = list.pop()
count = count - 1
count = count + 1
count = 0
if not list:
print('empty')
else:
list.sort()
print(list)
"""
for i in range(len(list)):
if count == 0:
print("list 내용 한개")
else:
#오름차순 정리된 점 거리 계산
distance1 = math.sqrt((list[count][1] - list[count-1][1]) ** 2 + (list[count][2] - list[count-1][2]) ** 2)
print(count)
print(list[count][1],list[count][2])
print(list[count-1][1],list[count-1][2])
print("거리 ",distance1)
count = count + 1
count = 0
"""
cv2.imshow('mask2', frame1)
print(
' 장면 전환'
)
cv2.imshow('mask7', mask)
k = cv2.waitKey(300) & 255
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
<|reserved_special_token_1|>
import math
import numpy as np
import cv2
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
from sklearn import metrics
from scipy.spatial.distance import cdist
if (__name__ == "__main__"):
cap = cv2.VideoCapture('dfd1.mp4')
mog = cv2.createBackgroundSubtractorMOG2(detectShadows=0)
count = 0
#list = ['video' + str(n) for n in range(100)]
while True:
list = []
ret, frame = cap.read()
ret1, frame1 = cap.read()
fgmask = mog.apply(frame)
mask = np.zeros_like(frame1)
mask1 = np.zeros_like(frame1)
kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
dilation = cv2.dilate(closing, kernel, iterations=1)
canny = cv2.Canny(dilation, 100, 200)
cnts, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.rectangle(frame, (220, 100), (550, 160), (0, 255, 0), 2)
cv2.imshow('mask', fgmask)
cv2.imshow('mask3', dilation)
cv2.imshow('mask15', canny)
cv2.imshow('mask4', frame)
cv2.imshow('mask8', frame[100:160, 220:550])
for i in range(len(contours)):
point = []
cnt = contours[i]
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(frame1, (int(x+w/2), int(y+h/2)), (int(x+w/2), int(y+h/2)), (255, 0, 0), 3)
X = int(x+w/2)
Y = int(y+h/2)
distance = math.sqrt(X^2+Y^2)
mask[y:y + h, x:x + w] = frame1[y:y + h, x:x + w]
#(0,0)에서 좌표 거리 계산 후 리스트에 첨가
point.append(distance)
point.append(X)
point.append(Y)
list.append(point)
#같은 좌표 값 제거
if count == 0:
print("List has one List")
elif list[count][1] == list[count-1][1] and list[count][2] == list[count-1][2] :
a = list.pop()
count = count - 1
count = count + 1
count = 0
#(0,0)에서 부터의 거리 오름차순 정리
if not list:
print("empty")
else:
list.sort()
print(list)
'''
for i in range(len(list)):
if count == 0:
print("list 내용 한개")
else:
#오름차순 정리된 점 거리 계산
distance1 = math.sqrt((list[count][1] - list[count-1][1]) ** 2 + (list[count][2] - list[count-1][2]) ** 2)
print(count)
print(list[count][1],list[count][2])
print(list[count-1][1],list[count-1][2])
print("거리 ",distance1)
count = count + 1
count = 0
'''
cv2.imshow('mask2', frame1)
print(' 장면 전환')
cv2.imshow('mask7', mask)
k = cv2.waitKey(300) & 0xFF
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
|
flexible
|
{
"blob_id": "28a0ae0492fb676044c1f9ced7a5a4819e99a8d9",
"index": 8890,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n cap = cv2.VideoCapture('dfd1.mp4')\n mog = cv2.createBackgroundSubtractorMOG2(detectShadows=0)\n count = 0\n while True:\n list = []\n ret, frame = cap.read()\n ret1, frame1 = cap.read()\n fgmask = mog.apply(frame)\n mask = np.zeros_like(frame1)\n mask1 = np.zeros_like(frame1)\n kernel = np.ones((5, 5), np.uint8)\n opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n dilation = cv2.dilate(closing, kernel, iterations=1)\n canny = cv2.Canny(dilation, 100, 200)\n cnts, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n cv2.rectangle(frame, (220, 100), (550, 160), (0, 255, 0), 2)\n cv2.imshow('mask', fgmask)\n cv2.imshow('mask3', dilation)\n cv2.imshow('mask15', canny)\n cv2.imshow('mask4', frame)\n cv2.imshow('mask8', frame[100:160, 220:550])\n for i in range(len(contours)):\n point = []\n cnt = contours[i]\n x, y, w, h = cv2.boundingRect(cnt)\n cv2.rectangle(frame1, (int(x + w / 2), int(y + h / 2)), (int(x +\n w / 2), int(y + h / 2)), (255, 0, 0), 3)\n X = int(x + w / 2)\n Y = int(y + h / 2)\n distance = math.sqrt(X ^ 2 + Y ^ 2)\n mask[y:y + h, x:x + w] = frame1[y:y + h, x:x + w]\n point.append(distance)\n point.append(X)\n point.append(Y)\n list.append(point)\n if count == 0:\n print('List has one List')\n elif list[count][1] == list[count - 1][1] and list[count][2\n ] == list[count - 1][2]:\n a = list.pop()\n count = count - 1\n count = count + 1\n count = 0\n if not list:\n print('empty')\n else:\n list.sort()\n print(list)\n \"\"\"\n for i in range(len(list)):\n if count == 0:\n print(\"list 내용 한개\")\n else:\n #오름차순 정리된 점 거리 계산\n distance1 = math.sqrt((list[count][1] - list[count-1][1]) ** 2 + (list[count][2] - list[count-1][2]) ** 2)\n print(count)\n print(list[count][1],list[count][2])\n print(list[count-1][1],list[count-1][2])\n print(\"거리 \",distance1)\n count = count + 1\n count = 0\n \"\"\"\n cv2.imshow('mask2', frame1)\n print(\n ' 장면 전환'\n )\n cv2.imshow('mask7', mask)\n k = cv2.waitKey(300) & 255\n if k == 27:\n break\n cap.release()\n cv2.destroyAllWindows()\n",
"step-3": "import math\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom sklearn.cluster import KMeans\nfrom sklearn import metrics\nfrom scipy.spatial.distance import cdist\nif __name__ == '__main__':\n cap = cv2.VideoCapture('dfd1.mp4')\n mog = cv2.createBackgroundSubtractorMOG2(detectShadows=0)\n count = 0\n while True:\n list = []\n ret, frame = cap.read()\n ret1, frame1 = cap.read()\n fgmask = mog.apply(frame)\n mask = np.zeros_like(frame1)\n mask1 = np.zeros_like(frame1)\n kernel = np.ones((5, 5), np.uint8)\n opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n dilation = cv2.dilate(closing, kernel, iterations=1)\n canny = cv2.Canny(dilation, 100, 200)\n cnts, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE,\n cv2.CHAIN_APPROX_SIMPLE)\n cv2.rectangle(frame, (220, 100), (550, 160), (0, 255, 0), 2)\n cv2.imshow('mask', fgmask)\n cv2.imshow('mask3', dilation)\n cv2.imshow('mask15', canny)\n cv2.imshow('mask4', frame)\n cv2.imshow('mask8', frame[100:160, 220:550])\n for i in range(len(contours)):\n point = []\n cnt = contours[i]\n x, y, w, h = cv2.boundingRect(cnt)\n cv2.rectangle(frame1, (int(x + w / 2), int(y + h / 2)), (int(x +\n w / 2), int(y + h / 2)), (255, 0, 0), 3)\n X = int(x + w / 2)\n Y = int(y + h / 2)\n distance = math.sqrt(X ^ 2 + Y ^ 2)\n mask[y:y + h, x:x + w] = frame1[y:y + h, x:x + w]\n point.append(distance)\n point.append(X)\n point.append(Y)\n list.append(point)\n if count == 0:\n print('List has one List')\n elif list[count][1] == list[count - 1][1] and list[count][2\n ] == list[count - 1][2]:\n a = list.pop()\n count = count - 1\n count = count + 1\n count = 0\n if not list:\n print('empty')\n else:\n list.sort()\n print(list)\n \"\"\"\n for i in range(len(list)):\n if count == 0:\n print(\"list 내용 한개\")\n else:\n #오름차순 정리된 점 거리 계산\n distance1 = math.sqrt((list[count][1] - list[count-1][1]) ** 2 + (list[count][2] - list[count-1][2]) ** 2)\n print(count)\n print(list[count][1],list[count][2])\n print(list[count-1][1],list[count-1][2])\n print(\"거리 \",distance1)\n count = count + 1\n count = 0\n \"\"\"\n cv2.imshow('mask2', frame1)\n print(\n ' 장면 전환'\n )\n cv2.imshow('mask7', mask)\n k = cv2.waitKey(300) & 255\n if k == 27:\n break\n cap.release()\n cv2.destroyAllWindows()\n",
"step-4": "import math\r\nimport numpy as np\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import metrics\r\nfrom scipy.spatial.distance import cdist\r\n\r\n\r\nif (__name__ == \"__main__\"):\r\n cap = cv2.VideoCapture('dfd1.mp4')\r\n mog = cv2.createBackgroundSubtractorMOG2(detectShadows=0)\r\n count = 0\r\n\r\n #list = ['video' + str(n) for n in range(100)]\r\n while True:\r\n list = []\r\n ret, frame = cap.read()\r\n ret1, frame1 = cap.read()\r\n fgmask = mog.apply(frame)\r\n mask = np.zeros_like(frame1)\r\n mask1 = np.zeros_like(frame1)\r\n\r\n\r\n kernel = np.ones((5, 5), np.uint8)\r\n opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)\r\n closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\r\n dilation = cv2.dilate(closing, kernel, iterations=1)\r\n\r\n canny = cv2.Canny(dilation, 100, 200)\r\n cnts, contours, hierarchy = cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n cv2.rectangle(frame, (220, 100), (550, 160), (0, 255, 0), 2)\r\n\r\n cv2.imshow('mask', fgmask)\r\n cv2.imshow('mask3', dilation)\r\n cv2.imshow('mask15', canny)\r\n cv2.imshow('mask4', frame)\r\n cv2.imshow('mask8', frame[100:160, 220:550])\r\n\r\n for i in range(len(contours)):\r\n point = []\r\n cnt = contours[i]\r\n x, y, w, h = cv2.boundingRect(cnt)\r\n cv2.rectangle(frame1, (int(x+w/2), int(y+h/2)), (int(x+w/2), int(y+h/2)), (255, 0, 0), 3)\r\n X = int(x+w/2)\r\n Y = int(y+h/2)\r\n distance = math.sqrt(X^2+Y^2)\r\n mask[y:y + h, x:x + w] = frame1[y:y + h, x:x + w]\r\n\r\n #(0,0)에서 좌표 거리 계산 후 리스트에 첨가\r\n point.append(distance)\r\n point.append(X)\r\n point.append(Y)\r\n list.append(point)\r\n\r\n #같은 좌표 값 제거\r\n if count == 0:\r\n print(\"List has one List\")\r\n elif list[count][1] == list[count-1][1] and list[count][2] == list[count-1][2] :\r\n a = list.pop()\r\n count = count - 1\r\n count = count + 1\r\n count = 0\r\n\r\n #(0,0)에서 부터의 거리 오름차순 정리\r\n if not list:\r\n print(\"empty\")\r\n else:\r\n list.sort()\r\n print(list)\r\n '''\r\n for i in range(len(list)):\r\n if count == 0:\r\n print(\"list 내용 한개\")\r\n else:\r\n #오름차순 정리된 점 거리 계산\r\n distance1 = math.sqrt((list[count][1] - list[count-1][1]) ** 2 + (list[count][2] - list[count-1][2]) ** 2)\r\n print(count)\r\n print(list[count][1],list[count][2])\r\n print(list[count-1][1],list[count-1][2])\r\n print(\"거리 \",distance1)\r\n count = count + 1\r\n count = 0\r\n '''\r\n cv2.imshow('mask2', frame1)\r\n\r\n\r\n print(' 장면 전환')\r\n cv2.imshow('mask7', mask)\r\n\r\n\r\n\r\n k = cv2.waitKey(300) & 0xFF\r\n if k == 27:\r\n break\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os.path
class State:
def __init__(self):
self.states=[]
self.actions=[]
class Candidate:
def __init__(self,height,lines,holes,bump,fit):
self.heightWeight = height
self.linesWeight = lines
self.holesWeight = holes
self.bumpinessWeight = bump
self.fitness = fit
def __str__(self):
return "%f , %f , %f , %f, %f " % (self.heightWeight, self.linesWeight, self.holesWeight, self.bumpinessWeight, self.fitness)
if __name__=="__main__":
s = Candidate(None,None,None,None,None)
file = open("gen4.txt", "a")
print naming_file(2)
|
normal
|
{
"blob_id": "94100d0253ee82513fe024b2826e6182f852db48",
"index": 2349,
"step-1": "import os.path\nclass State:\n\n\n def __init__(self):\n self.states=[]\n self.actions=[]\n\n\n\nclass Candidate:\n\n def __init__(self,height,lines,holes,bump,fit):\n\n self.heightWeight = height\n self.linesWeight = lines\n self.holesWeight = holes\n self.bumpinessWeight = bump\n self.fitness = fit\n\n def __str__(self):\n return \"%f , %f , %f , %f, %f \" % (self.heightWeight, self.linesWeight, self.holesWeight, self.bumpinessWeight, self.fitness)\n\nif __name__==\"__main__\":\n s = Candidate(None,None,None,None,None)\n file = open(\"gen4.txt\", \"a\")\n\n print naming_file(2)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AttendaceConfig(AppConfig):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class AttendaceConfig(AppConfig):
name = 'attendace'
<|reserved_special_token_1|>
from django.apps import AppConfig
class AttendaceConfig(AppConfig):
name = 'attendace'
|
flexible
|
{
"blob_id": "d5d61b23dc14ffdfe7fe6f983164916863928eaf",
"index": 3685,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass AttendaceConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AttendaceConfig(AppConfig):\n name = 'attendace'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass AttendaceConfig(AppConfig):\n name = 'attendace'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class CMU_Generator:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def read_data(self, phase):
all_data, even_data = [], {}
for action_idx, action in enumerate(self.actions):
action_path = '{}/{}/{}'.format(self.in_path, phase, action)
for sequence_idx, file in enumerate(os.listdir(action_path)):
sequence = []
with open('{}/{}'.format(action_path, file), 'r') as f:
for line in f.readlines():
line = line.strip().split(',')
if len(line) > 0:
sequence.append(np.array([np.float32(x) for x in
line]))
sequence = np.array(sequence)
all_data.append(sequence)
even_data[action_idx, sequence_idx] = sequence[range(0,
sequence.shape[0], 2), :]
return np.concatenate(all_data, axis=0), even_data
<|reserved_special_token_0|>
def normalize_data(self, data):
for key in data.keys():
data[key] = np.divide(data[key] - self.data_mean, self.data_std)
data[key] = data[key][:, self.dim_use]
return data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CMU_Generator:
def __init__(self, args, dataset_args):
self.in_path = dataset_args['cmu_data_path']
self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)
self.actions = ['walking', 'running', 'directing_traffic', 'soccer',
'basketball', 'washwindow', 'jumping', 'basketball_signal']
self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25,
26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,
90, 91, 92, 108, 109, 110, 114, 115, 116]
self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))
)
U.create_folder(self.out_path)
<|reserved_special_token_0|>
def read_data(self, phase):
all_data, even_data = [], {}
for action_idx, action in enumerate(self.actions):
action_path = '{}/{}/{}'.format(self.in_path, phase, action)
for sequence_idx, file in enumerate(os.listdir(action_path)):
sequence = []
with open('{}/{}'.format(action_path, file), 'r') as f:
for line in f.readlines():
line = line.strip().split(',')
if len(line) > 0:
sequence.append(np.array([np.float32(x) for x in
line]))
sequence = np.array(sequence)
all_data.append(sequence)
even_data[action_idx, sequence_idx] = sequence[range(0,
sequence.shape[0], 2), :]
return np.concatenate(all_data, axis=0), even_data
def normalize_state(self):
data_mean = np.mean(self.all_train_data, axis=0)
data_std = np.std(self.all_train_data, axis=0)
dim_zero = list(np.where(data_std < 0.0001)[0])
dim_nonzero = list(np.where(data_std >= 0.0001)[0])
data_std[dim_zero] = 1.0
return data_mean, data_std, dim_zero, dim_nonzero
def normalize_data(self, data):
for key in data.keys():
data[key] = np.divide(data[key] - self.data_mean, self.data_std)
data[key] = data[key][:, self.dim_use]
return data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CMU_Generator:
def __init__(self, args, dataset_args):
self.in_path = dataset_args['cmu_data_path']
self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)
self.actions = ['walking', 'running', 'directing_traffic', 'soccer',
'basketball', 'washwindow', 'jumping', 'basketball_signal']
self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25,
26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,
90, 91, 92, 108, 109, 110, 114, 115, 116]
self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))
)
U.create_folder(self.out_path)
def start(self):
logging.info('Reading data ...')
self.all_train_data, train_data = self.read_data('train')
_, eval_data = self.read_data('test')
logging.info('Normalizing data ...')
self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = (self
.normalize_state())
train_data = self.normalize_data(train_data)
eval_data = self.normalize_data(eval_data)
logging.info('Saving data ...')
with open('{}/data.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((train_data, eval_data, self.actions), f)
with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.data_mean, self.data_std, self.dim_zero, self
.dim_nonzero), f)
with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.dim_use, self.dim_ignore), f)
def read_data(self, phase):
all_data, even_data = [], {}
for action_idx, action in enumerate(self.actions):
action_path = '{}/{}/{}'.format(self.in_path, phase, action)
for sequence_idx, file in enumerate(os.listdir(action_path)):
sequence = []
with open('{}/{}'.format(action_path, file), 'r') as f:
for line in f.readlines():
line = line.strip().split(',')
if len(line) > 0:
sequence.append(np.array([np.float32(x) for x in
line]))
sequence = np.array(sequence)
all_data.append(sequence)
even_data[action_idx, sequence_idx] = sequence[range(0,
sequence.shape[0], 2), :]
return np.concatenate(all_data, axis=0), even_data
def normalize_state(self):
data_mean = np.mean(self.all_train_data, axis=0)
data_std = np.std(self.all_train_data, axis=0)
dim_zero = list(np.where(data_std < 0.0001)[0])
dim_nonzero = list(np.where(data_std >= 0.0001)[0])
data_std[dim_zero] = 1.0
return data_mean, data_std, dim_zero, dim_nonzero
def normalize_data(self, data):
for key in data.keys():
data[key] = np.divide(data[key] - self.data_mean, self.data_std)
data[key] = data[key][:, self.dim_use]
return data
<|reserved_special_token_1|>
import os, pickle, logging, numpy as np
from .. import utils as U
class CMU_Generator:
def __init__(self, args, dataset_args):
self.in_path = dataset_args['cmu_data_path']
self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)
self.actions = ['walking', 'running', 'directing_traffic', 'soccer',
'basketball', 'washwindow', 'jumping', 'basketball_signal']
self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25,
26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,
90, 91, 92, 108, 109, 110, 114, 115, 116]
self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))
)
U.create_folder(self.out_path)
def start(self):
logging.info('Reading data ...')
self.all_train_data, train_data = self.read_data('train')
_, eval_data = self.read_data('test')
logging.info('Normalizing data ...')
self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = (self
.normalize_state())
train_data = self.normalize_data(train_data)
eval_data = self.normalize_data(eval_data)
logging.info('Saving data ...')
with open('{}/data.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((train_data, eval_data, self.actions), f)
with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.data_mean, self.data_std, self.dim_zero, self
.dim_nonzero), f)
with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.dim_use, self.dim_ignore), f)
def read_data(self, phase):
all_data, even_data = [], {}
for action_idx, action in enumerate(self.actions):
action_path = '{}/{}/{}'.format(self.in_path, phase, action)
for sequence_idx, file in enumerate(os.listdir(action_path)):
sequence = []
with open('{}/{}'.format(action_path, file), 'r') as f:
for line in f.readlines():
line = line.strip().split(',')
if len(line) > 0:
sequence.append(np.array([np.float32(x) for x in
line]))
sequence = np.array(sequence)
all_data.append(sequence)
even_data[action_idx, sequence_idx] = sequence[range(0,
sequence.shape[0], 2), :]
return np.concatenate(all_data, axis=0), even_data
def normalize_state(self):
data_mean = np.mean(self.all_train_data, axis=0)
data_std = np.std(self.all_train_data, axis=0)
dim_zero = list(np.where(data_std < 0.0001)[0])
dim_nonzero = list(np.where(data_std >= 0.0001)[0])
data_std[dim_zero] = 1.0
return data_mean, data_std, dim_zero, dim_nonzero
def normalize_data(self, data):
for key in data.keys():
data[key] = np.divide(data[key] - self.data_mean, self.data_std)
data[key] = data[key][:, self.dim_use]
return data
<|reserved_special_token_1|>
import os, pickle, logging, numpy as np
from .. import utils as U
class CMU_Generator():
def __init__(self, args, dataset_args):
self.in_path = dataset_args['cmu_data_path']
self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)
self.actions = ['walking', 'running', 'directing_traffic', 'soccer',
'basketball', 'washwindow', 'jumping', 'basketball_signal']
self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, 26,
39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83,
87, 88, 89, 90, 91, 92, 108, 109, 110, 114, 115, 116]
self.dim_use = list(set(range(39*3)).difference(set(self.dim_ignore)))
U.create_folder(self.out_path)
def start(self):
logging.info('Reading data ...')
self.all_train_data, train_data = self.read_data('train')
_, eval_data = self.read_data('test')
logging.info('Normalizing data ...')
self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = self.normalize_state()
train_data = self.normalize_data(train_data)
eval_data = self.normalize_data(eval_data)
logging.info('Saving data ...')
with open('{}/data.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((train_data, eval_data, self.actions), f)
with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero), f)
with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:
pickle.dump((self.dim_use, self.dim_ignore), f)
def read_data(self, phase):
all_data, even_data = [], {}
for action_idx, action in enumerate(self.actions):
action_path = '{}/{}/{}'.format(self.in_path, phase, action)
for sequence_idx, file in enumerate(os.listdir(action_path)):
sequence = []
with open('{}/{}'.format(action_path, file), 'r') as f:
for line in f.readlines():
line = line.strip().split(',')
if len(line) > 0:
sequence.append(np.array([np.float32(x) for x in line]))
sequence = np.array(sequence)
all_data.append(sequence)
even_data[(action_idx, sequence_idx)] = sequence[range(0,sequence.shape[0],2),:]
return np.concatenate(all_data, axis=0), even_data
def normalize_state(self):
data_mean = np.mean(self.all_train_data, axis=0)
data_std = np.std(self.all_train_data, axis=0)
dim_zero = list(np.where(data_std < 0.0001)[0])
dim_nonzero = list(np.where(data_std >= 0.0001)[0])
data_std[dim_zero] = 1.0
return data_mean, data_std, dim_zero, dim_nonzero
def normalize_data(self, data):
for key in data.keys():
data[key] = np.divide((data[key] - self.data_mean), self.data_std)
data[key] = data[key][:, self.dim_use]
return data
|
flexible
|
{
"blob_id": "2c58a9e83f80d437160b87ec64c7631e7a35bf90",
"index": 6315,
"step-1": "<mask token>\n\n\nclass CMU_Generator:\n <mask token>\n <mask token>\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n <mask token>\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-2": "<mask token>\n\n\nclass CMU_Generator:\n\n def __init__(self, args, dataset_args):\n self.in_path = dataset_args['cmu_data_path']\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, \n 26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,\n 90, 91, 92, 108, 109, 110, 114, 115, 116]\n self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))\n )\n U.create_folder(self.out_path)\n <mask token>\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n\n def normalize_state(self):\n data_mean = np.mean(self.all_train_data, axis=0)\n data_std = np.std(self.all_train_data, axis=0)\n dim_zero = list(np.where(data_std < 0.0001)[0])\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\n data_std[dim_zero] = 1.0\n return data_mean, data_std, dim_zero, dim_nonzero\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-3": "<mask token>\n\n\nclass CMU_Generator:\n\n def __init__(self, args, dataset_args):\n self.in_path = dataset_args['cmu_data_path']\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, \n 26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,\n 90, 91, 92, 108, 109, 110, 114, 115, 116]\n self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))\n )\n U.create_folder(self.out_path)\n\n def start(self):\n logging.info('Reading data ...')\n self.all_train_data, train_data = self.read_data('train')\n _, eval_data = self.read_data('test')\n logging.info('Normalizing data ...')\n self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = (self\n .normalize_state())\n train_data = self.normalize_data(train_data)\n eval_data = self.normalize_data(eval_data)\n logging.info('Saving data ...')\n with open('{}/data.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((train_data, eval_data, self.actions), f)\n with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.data_mean, self.data_std, self.dim_zero, self\n .dim_nonzero), f)\n with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.dim_use, self.dim_ignore), f)\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n\n def normalize_state(self):\n data_mean = np.mean(self.all_train_data, axis=0)\n data_std = np.std(self.all_train_data, axis=0)\n dim_zero = list(np.where(data_std < 0.0001)[0])\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\n data_std[dim_zero] = 1.0\n return data_mean, data_std, dim_zero, dim_nonzero\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-4": "import os, pickle, logging, numpy as np\nfrom .. import utils as U\n\n\nclass CMU_Generator:\n\n def __init__(self, args, dataset_args):\n self.in_path = dataset_args['cmu_data_path']\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, \n 26, 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83, 87, 88, 89,\n 90, 91, 92, 108, 109, 110, 114, 115, 116]\n self.dim_use = list(set(range(39 * 3)).difference(set(self.dim_ignore))\n )\n U.create_folder(self.out_path)\n\n def start(self):\n logging.info('Reading data ...')\n self.all_train_data, train_data = self.read_data('train')\n _, eval_data = self.read_data('test')\n logging.info('Normalizing data ...')\n self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = (self\n .normalize_state())\n train_data = self.normalize_data(train_data)\n eval_data = self.normalize_data(eval_data)\n logging.info('Saving data ...')\n with open('{}/data.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((train_data, eval_data, self.actions), f)\n with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.data_mean, self.data_std, self.dim_zero, self\n .dim_nonzero), f)\n with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:\n pickle.dump((self.dim_use, self.dim_ignore), f)\n\n def read_data(self, phase):\n all_data, even_data = [], {}\n for action_idx, action in enumerate(self.actions):\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\n for sequence_idx, file in enumerate(os.listdir(action_path)):\n sequence = []\n with open('{}/{}'.format(action_path, file), 'r') as f:\n for line in f.readlines():\n line = line.strip().split(',')\n if len(line) > 0:\n sequence.append(np.array([np.float32(x) for x in\n line]))\n sequence = np.array(sequence)\n all_data.append(sequence)\n even_data[action_idx, sequence_idx] = sequence[range(0,\n sequence.shape[0], 2), :]\n return np.concatenate(all_data, axis=0), even_data\n\n def normalize_state(self):\n data_mean = np.mean(self.all_train_data, axis=0)\n data_std = np.std(self.all_train_data, axis=0)\n dim_zero = list(np.where(data_std < 0.0001)[0])\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\n data_std[dim_zero] = 1.0\n return data_mean, data_std, dim_zero, dim_nonzero\n\n def normalize_data(self, data):\n for key in data.keys():\n data[key] = np.divide(data[key] - self.data_mean, self.data_std)\n data[key] = data[key][:, self.dim_use]\n return data\n",
"step-5": "import os, pickle, logging, numpy as np\r\n\r\nfrom .. import utils as U\r\n\r\n\r\nclass CMU_Generator():\r\n def __init__(self, args, dataset_args):\r\n self.in_path = dataset_args['cmu_data_path']\r\n self.out_path = '{}/{}'.format(dataset_args['path'], args.dataset)\r\n self.actions = ['walking', 'running', 'directing_traffic', 'soccer',\r\n 'basketball', 'washwindow', 'jumping', 'basketball_signal']\r\n self.dim_ignore = [0, 1, 2, 3, 4, 5, 6, 7, 8, 21, 22, 23, 24, 25, 26,\r\n 39, 40, 41, 60, 61, 62, 63, 64, 65, 81, 82, 83,\r\n 87, 88, 89, 90, 91, 92, 108, 109, 110, 114, 115, 116]\r\n self.dim_use = list(set(range(39*3)).difference(set(self.dim_ignore)))\r\n U.create_folder(self.out_path)\r\n\r\n def start(self):\r\n logging.info('Reading data ...')\r\n self.all_train_data, train_data = self.read_data('train')\r\n _, eval_data = self.read_data('test')\r\n\r\n logging.info('Normalizing data ...')\r\n self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero = self.normalize_state()\r\n train_data = self.normalize_data(train_data)\r\n eval_data = self.normalize_data(eval_data)\r\n\r\n logging.info('Saving data ...')\r\n with open('{}/data.pkl'.format(self.out_path), 'wb') as f:\r\n pickle.dump((train_data, eval_data, self.actions), f)\r\n with open('{}/normalization.pkl'.format(self.out_path), 'wb') as f:\r\n pickle.dump((self.data_mean, self.data_std, self.dim_zero, self.dim_nonzero), f)\r\n with open('{}/ignore.pkl'.format(self.out_path), 'wb') as f:\r\n pickle.dump((self.dim_use, self.dim_ignore), f)\r\n\r\n def read_data(self, phase):\r\n all_data, even_data = [], {}\r\n for action_idx, action in enumerate(self.actions):\r\n action_path = '{}/{}/{}'.format(self.in_path, phase, action)\r\n for sequence_idx, file in enumerate(os.listdir(action_path)):\r\n sequence = []\r\n with open('{}/{}'.format(action_path, file), 'r') as f:\r\n for line in f.readlines():\r\n line = line.strip().split(',')\r\n if len(line) > 0:\r\n sequence.append(np.array([np.float32(x) for x in line]))\r\n sequence = np.array(sequence)\r\n all_data.append(sequence)\r\n even_data[(action_idx, sequence_idx)] = sequence[range(0,sequence.shape[0],2),:]\r\n return np.concatenate(all_data, axis=0), even_data\r\n\r\n def normalize_state(self):\r\n data_mean = np.mean(self.all_train_data, axis=0)\r\n data_std = np.std(self.all_train_data, axis=0)\r\n dim_zero = list(np.where(data_std < 0.0001)[0])\r\n dim_nonzero = list(np.where(data_std >= 0.0001)[0])\r\n data_std[dim_zero] = 1.0\r\n return data_mean, data_std, dim_zero, dim_nonzero\r\n\r\n def normalize_data(self, data):\r\n for key in data.keys():\r\n data[key] = np.divide((data[key] - self.data_mean), self.data_std)\r\n data[key] = data[key][:, self.dim_use]\r\n return data\r\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from typing import List
h = 5
w = 4
horizontalCuts = [3]
verticalCuts = [3]
class Solution:
def maxArea(self, h: int, w: int, horizontalCuts: List[int], verticalCuts: List[int]) -> int:
horizontalCuts.sort()
verticalCuts.sort()
horizontalCuts.append(h)
verticalCuts.append(w)
hbreadth= 0
prev=0
for h in horizontalCuts:
height= h-prev
hbreadth= max(height, hbreadth)
prev= h
prev=0
vlength=0
for v in verticalCuts:
height= v-prev
vlength= max(vlength, height)
prev=v
maxarea= (hbreadth * vlength) % ((10**9) + 7)
return maxarea
obj=Solution()
print(obj.maxArea(h, w, horizontalCuts, verticalCuts))
|
normal
|
{
"blob_id": "8fb559810fbf79f0849ed98e51d3f2ad1ccc4b8b",
"index": 8296,
"step-1": "<mask token>\n\n\nclass Solution:\n\n def maxArea(self, h: int, w: int, horizontalCuts: List[int],\n verticalCuts: List[int]) ->int:\n horizontalCuts.sort()\n verticalCuts.sort()\n horizontalCuts.append(h)\n verticalCuts.append(w)\n hbreadth = 0\n prev = 0\n for h in horizontalCuts:\n height = h - prev\n hbreadth = max(height, hbreadth)\n prev = h\n prev = 0\n vlength = 0\n for v in verticalCuts:\n height = v - prev\n vlength = max(vlength, height)\n prev = v\n maxarea = hbreadth * vlength % (10 ** 9 + 7)\n return maxarea\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n\n def maxArea(self, h: int, w: int, horizontalCuts: List[int],\n verticalCuts: List[int]) ->int:\n horizontalCuts.sort()\n verticalCuts.sort()\n horizontalCuts.append(h)\n verticalCuts.append(w)\n hbreadth = 0\n prev = 0\n for h in horizontalCuts:\n height = h - prev\n hbreadth = max(height, hbreadth)\n prev = h\n prev = 0\n vlength = 0\n for v in verticalCuts:\n height = v - prev\n vlength = max(vlength, height)\n prev = v\n maxarea = hbreadth * vlength % (10 ** 9 + 7)\n return maxarea\n\n\n<mask token>\nprint(obj.maxArea(h, w, horizontalCuts, verticalCuts))\n",
"step-3": "<mask token>\nh = 5\nw = 4\nhorizontalCuts = [3]\nverticalCuts = [3]\n\n\nclass Solution:\n\n def maxArea(self, h: int, w: int, horizontalCuts: List[int],\n verticalCuts: List[int]) ->int:\n horizontalCuts.sort()\n verticalCuts.sort()\n horizontalCuts.append(h)\n verticalCuts.append(w)\n hbreadth = 0\n prev = 0\n for h in horizontalCuts:\n height = h - prev\n hbreadth = max(height, hbreadth)\n prev = h\n prev = 0\n vlength = 0\n for v in verticalCuts:\n height = v - prev\n vlength = max(vlength, height)\n prev = v\n maxarea = hbreadth * vlength % (10 ** 9 + 7)\n return maxarea\n\n\nobj = Solution()\nprint(obj.maxArea(h, w, horizontalCuts, verticalCuts))\n",
"step-4": "from typing import List\nh = 5\nw = 4\nhorizontalCuts = [3]\nverticalCuts = [3]\n\n\nclass Solution:\n\n def maxArea(self, h: int, w: int, horizontalCuts: List[int],\n verticalCuts: List[int]) ->int:\n horizontalCuts.sort()\n verticalCuts.sort()\n horizontalCuts.append(h)\n verticalCuts.append(w)\n hbreadth = 0\n prev = 0\n for h in horizontalCuts:\n height = h - prev\n hbreadth = max(height, hbreadth)\n prev = h\n prev = 0\n vlength = 0\n for v in verticalCuts:\n height = v - prev\n vlength = max(vlength, height)\n prev = v\n maxarea = hbreadth * vlength % (10 ** 9 + 7)\n return maxarea\n\n\nobj = Solution()\nprint(obj.maxArea(h, w, horizontalCuts, verticalCuts))\n",
"step-5": "from typing import List\nh = 5\nw = 4\nhorizontalCuts = [3]\nverticalCuts = [3]\nclass Solution:\n def maxArea(self, h: int, w: int, horizontalCuts: List[int], verticalCuts: List[int]) -> int:\n horizontalCuts.sort()\n verticalCuts.sort()\n horizontalCuts.append(h)\n verticalCuts.append(w)\n hbreadth= 0\n prev=0\n for h in horizontalCuts:\n height= h-prev\n hbreadth= max(height, hbreadth)\n prev= h\n\n prev=0\n vlength=0\n for v in verticalCuts:\n height= v-prev\n vlength= max(vlength, height)\n prev=v\n\n maxarea= (hbreadth * vlength) % ((10**9) + 7)\n return maxarea\n\nobj=Solution()\nprint(obj.maxArea(h, w, horizontalCuts, verticalCuts))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
pg.alert(name)
if name == 'Caroline':
pg.alert('Hi ' + name)
points += 5
t.sleep(1)
wb.open('https://www.textgiraffe.com/Caroline/Page2/')
elif name == 'Bob':
pg.alert(name + ',you are a great person!')
points += 3
t.sleep(1)
wb.open('http://dreamworks.wikia.com/wiki/File:Bob_the_Builder.jpeg')
elif name == 'Catherine':
pg.alert(name + 'I like you already.')
points += 2
t.sleep(2)
wb.open(
'https://www.amazon.com/Catherine-Street-Sign-Reflective-Aluminum/dp/B00KY6ZDZW'
)
elif name == 'James':
pg.alert('nice to meet you' + name)
points += 1
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=uV9LYMAEnRA')
elif name == 'Kate':
pg.alert('Hello!')
points += 2
t.sleep(1)
wb.open(
'https://www.google.com/search?q=kate+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj-3cyIyJzeAhVRnOAKHRnoCtQQ_AUIDigB&biw=924&bih=639#imgrc=sbQIiK5VLfo7kM:'
)
elif name == 'Will':
pg.alert('Coool!')
ponts += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?q=will+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj3n93PyJzeAhWvY98KHcoWCFEQ_AUIDigB&biw=924&bih=639#imgrc=Z0hfeIoXQgHxJM:'
)
else:
pg.alert("I don't know you!")
points += 0
t.sleep(2)
wb.open(
'https://www.google.com/search?q=smiley+face&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjwsdL4gYveAhXtc98KHaGcAz0Q_AUIDigB&biw=1366&bih=657'
)
<|reserved_special_token_0|>
if color == 'Blue':
pg.alert('mine too!')
points += 5
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=SoIKv3xxuMA')
elif color == 'Pink':
pg.alert('Do you like unicorns too?')
points += 2
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=a-xWhG4UU_Y')
elif color == 'Purple':
pg.alert('cool!')
points += 3
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=TvnYmWpD_T8')
elif color == 'Black':
pg.alert('ok...')
points -= 2
t.sleep(2)
wb.open(
'https://www.google.com/search?q=goth&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiJ-tDj-oreAhUpUt8KHWZsAzQQ_AUIDigB&biw=1366&bih=657#imgrc=odGcWJwuqRcJsM:'
)
elif color == 'Yellow':
pg.alert('Like a sunflower!')
points += 1
t.sleep(1)
wb.open(
'https://www.google.com/search?q=sunflower&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiZyKCTyZzeAhXGc98KHd8kDJ8Q_AUIDigB&biw=924&bih=639#imgrc=8kZ1NZp_9-nr5M:'
)
elif color == 'Brown':
pg.alert('wow.')
points -= 5
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')
else:
pg.alert('nice')
points += 1
t.sleep(2)
wb.open('https://giphy.com/explore/rainbow')
<|reserved_special_token_0|>
if sport == 'Hockey':
pg.alert('yep, I guess your cool')
points += 5
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=JDnZTUkCOBQ')
elif sport == 'Soccer':
pg.alert('you mean futbol...')
points += 5
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=K-U1ZgrsGGg')
elif sport == 'Lacrosse':
pg.alert(' I used to play..')
points += 2
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=o5hsPBsGD44')
elif sport == 'Football':
pg.alert('that cool.')
points += 4
t.sleep(3)
wb.open(
'https://www.google.com/search?q=football&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwimsOqj_IreAhUumeAKHd-FD6kQ_AUIDigB&biw=1366&bih=657#imgrc=GCqjPQ-jqckcfM:'
)
elif sport == 'Field Hockey':
pg.alert('Nice!')
points += 2
t.sleep(3)
wb.open(
'https://www.google.com/search?q=field+hockey&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwieus2jypzeAhWvVN8KHeK1CJ8Q_AUIDigB&biw=924&bih=639#imgrc=FCpGZY2CS5KVXM:'
)
elif sport == 'Surfing':
pg.alert('WOAH')
points += 7
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=HBklS2vYEPo')
else:
pg.alert('cool')
points += 0
t.sleep(2)
wb.open(
'https://www.google.com/search?q=no+sports&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiGqOK-_IreAhXFneAKHcEGANIQ_AUIDigB&biw=1366&bih=657#imgrc=y7acx-yoEouoUM:'
)
<|reserved_special_token_0|>
if subject == 'Math':
pg.alert('so your a mathmatician')
points += 2
t.sleep(3)
wb.open(
'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=HNvFW9yoDYTm_QbUyKzgDw&q=addiong&oq=addiong&gs_l=img.3..0i10i24.5226.6666..6852...1.0..0.56.417.8......0....1..gws-wiz-img.......0j0i67j0i10.kcqMNDR26RY#imgrc=LqznGvY1fJpCGM:'
)
elif subject == 'Computer science':
pg.alert('nice')
points += 9
t.sleep(3)
wb.open(
'https://www.google.com/search?q=computers&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiom6vv_IreAhUuneAKHXVGA4kQ_AUIDygC&biw=1366&bih=657'
)
elif subject == 'English':
pg.alert('I like it too.')
points += 3
t.sleep(3)
wb.open(
'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=hNvFW4e3Jafp_QbR26mIDw&q=+book&oq=+book&gs_l=img.3..0i67l3j0j0i67j0l5.3464.3464..3690...0.0..0.51.51.1......0....1..gws-wiz-img.2n6KjdjVyU0'
)
elif subject == 'Science':
pg.alert('Bill Nye the Science Guy.')
points += 3
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=nDN7M0J3HXc')
elif subject == 'Spanish':
pg.alert('Hola! Como estas?')
points += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?hl=en&authuser=0&rlz=1C1GCEA_enUS752US774&tbm=isch&q=fiesta&chips=q:fiesta,online_chips:mexican+fiesta&usg=AI4_-kQGU87DySQyv0Aqat3pdqhIpYYwjA&sa=X&ved=0ahUKEwjzjvL6lq7eAhWpTd8KHQ6-CIoQ4lYIKygE&biw=924&bih=639&dpr=1#imgrc=6H_w7py8kTIUHM:'
)
elif subject == 'History':
pg.alert('In 1492 Christopher Columbus sailed the ocean blue')
points += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?q=history&rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&source=lnms&sa=X&ved=0ahUKEwiZ_YDvutHeAhXOVN8KHdEUDEkQ_AUICygC'
)
else:
pg.alert('cool')
points += 1
t.sleep(2)
wb.open(
'https://www.google.com/search?q=school+gif&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjqpI_f_YreAhWsd98KHblYBY8Q_AUIDigB&biw=1366&bih=657#imgrc=kk5pi12VrUoKGM:'
)
<|reserved_special_token_0|>
if food == 'Pizza':
pg.alert('Pizza Hut? Dominos?')
points += 2
t.sleep(2)
wb.open('https://cooking.nytimes.com/guides/1-how-to-make-pizza')
elif food == 'Chocolate cake':
pg.alert('Now I want one')
points += 9
t.sleep(3)
wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')
elif food == 'Pasta':
pg.alert('I like pasta!')
points += 3
t.sleep(3)
wb.open(
'https://www.google.com/search?q=pasta&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiH_JXSlK7eAhWKT98KHScQASEQ_AUIDigB&biw=924&bih=639'
)
elif food == 'Ice cream':
pg.alert('What kind? I like cookie monster.')
points += 3
t.sleep(2)
wb.open('https://barefeetinthekitchen.com/homemade-ice-cream-recipe/')
elif food == 'Fruit':
pg.alert('Refreshing!')
points += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?q=fruit&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwijobOcla7eAhVyUt8KHfONDGUQ_AUIDigB&biw=924&bih=639#imgrc=ACrdFKwEzni-QM:'
)
elif food == 'Chicken':
pg.alert('Yum!')
points += 2
t.sleep(2)
wb.open(
'https://www.google.com/search?q=chicken&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj59fTCutHeAhXLct8KHRV6D88Q_AUIEygB&biw=1366&bih=657'
)
else:
pg.alert('YUUMMM')
points += 1
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=11HK5EuYwSk')
<|reserved_special_token_0|>
if 'Divergent' in movie:
number = pg.prompt('Which movie is your favorite').title()
if number == '1':
pg.alert('Nice!')
<|reserved_special_token_0|>
if ice_cream == 'cookies and cream':
pg.alert('YES')
pg.alert('Your final score is ' + str(points))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
points = 0
<|reserved_special_token_0|>
name = pg.prompt('What is your name? ').title()
pg.alert(name)
if name == 'Caroline':
pg.alert('Hi ' + name)
points += 5
t.sleep(1)
wb.open('https://www.textgiraffe.com/Caroline/Page2/')
elif name == 'Bob':
pg.alert(name + ',you are a great person!')
points += 3
t.sleep(1)
wb.open('http://dreamworks.wikia.com/wiki/File:Bob_the_Builder.jpeg')
elif name == 'Catherine':
pg.alert(name + 'I like you already.')
points += 2
t.sleep(2)
wb.open(
'https://www.amazon.com/Catherine-Street-Sign-Reflective-Aluminum/dp/B00KY6ZDZW'
)
elif name == 'James':
pg.alert('nice to meet you' + name)
points += 1
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=uV9LYMAEnRA')
elif name == 'Kate':
pg.alert('Hello!')
points += 2
t.sleep(1)
wb.open(
'https://www.google.com/search?q=kate+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj-3cyIyJzeAhVRnOAKHRnoCtQQ_AUIDigB&biw=924&bih=639#imgrc=sbQIiK5VLfo7kM:'
)
elif name == 'Will':
pg.alert('Coool!')
ponts += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?q=will+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj3n93PyJzeAhWvY98KHcoWCFEQ_AUIDigB&biw=924&bih=639#imgrc=Z0hfeIoXQgHxJM:'
)
else:
pg.alert("I don't know you!")
points += 0
t.sleep(2)
wb.open(
'https://www.google.com/search?q=smiley+face&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjwsdL4gYveAhXtc98KHaGcAz0Q_AUIDigB&biw=1366&bih=657'
)
color = pg.prompt('what is your favorite color? ').title()
if color == 'Blue':
pg.alert('mine too!')
points += 5
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=SoIKv3xxuMA')
elif color == 'Pink':
pg.alert('Do you like unicorns too?')
points += 2
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=a-xWhG4UU_Y')
elif color == 'Purple':
pg.alert('cool!')
points += 3
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=TvnYmWpD_T8')
elif color == 'Black':
pg.alert('ok...')
points -= 2
t.sleep(2)
wb.open(
'https://www.google.com/search?q=goth&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiJ-tDj-oreAhUpUt8KHWZsAzQQ_AUIDigB&biw=1366&bih=657#imgrc=odGcWJwuqRcJsM:'
)
elif color == 'Yellow':
pg.alert('Like a sunflower!')
points += 1
t.sleep(1)
wb.open(
'https://www.google.com/search?q=sunflower&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiZyKCTyZzeAhXGc98KHd8kDJ8Q_AUIDigB&biw=924&bih=639#imgrc=8kZ1NZp_9-nr5M:'
)
elif color == 'Brown':
pg.alert('wow.')
points -= 5
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')
else:
pg.alert('nice')
points += 1
t.sleep(2)
wb.open('https://giphy.com/explore/rainbow')
sport = pg.prompt('What is your favorite sport? ').title()
if sport == 'Hockey':
pg.alert('yep, I guess your cool')
points += 5
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=JDnZTUkCOBQ')
elif sport == 'Soccer':
pg.alert('you mean futbol...')
points += 5
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=K-U1ZgrsGGg')
elif sport == 'Lacrosse':
pg.alert(' I used to play..')
points += 2
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=o5hsPBsGD44')
elif sport == 'Football':
pg.alert('that cool.')
points += 4
t.sleep(3)
wb.open(
'https://www.google.com/search?q=football&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwimsOqj_IreAhUumeAKHd-FD6kQ_AUIDigB&biw=1366&bih=657#imgrc=GCqjPQ-jqckcfM:'
)
elif sport == 'Field Hockey':
pg.alert('Nice!')
points += 2
t.sleep(3)
wb.open(
'https://www.google.com/search?q=field+hockey&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwieus2jypzeAhWvVN8KHeK1CJ8Q_AUIDigB&biw=924&bih=639#imgrc=FCpGZY2CS5KVXM:'
)
elif sport == 'Surfing':
pg.alert('WOAH')
points += 7
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=HBklS2vYEPo')
else:
pg.alert('cool')
points += 0
t.sleep(2)
wb.open(
'https://www.google.com/search?q=no+sports&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiGqOK-_IreAhXFneAKHcEGANIQ_AUIDigB&biw=1366&bih=657#imgrc=y7acx-yoEouoUM:'
)
subject = pg.prompt('What is your favorite subject?').title()
if subject == 'Math':
pg.alert('so your a mathmatician')
points += 2
t.sleep(3)
wb.open(
'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=HNvFW9yoDYTm_QbUyKzgDw&q=addiong&oq=addiong&gs_l=img.3..0i10i24.5226.6666..6852...1.0..0.56.417.8......0....1..gws-wiz-img.......0j0i67j0i10.kcqMNDR26RY#imgrc=LqznGvY1fJpCGM:'
)
elif subject == 'Computer science':
pg.alert('nice')
points += 9
t.sleep(3)
wb.open(
'https://www.google.com/search?q=computers&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiom6vv_IreAhUuneAKHXVGA4kQ_AUIDygC&biw=1366&bih=657'
)
elif subject == 'English':
pg.alert('I like it too.')
points += 3
t.sleep(3)
wb.open(
'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=hNvFW4e3Jafp_QbR26mIDw&q=+book&oq=+book&gs_l=img.3..0i67l3j0j0i67j0l5.3464.3464..3690...0.0..0.51.51.1......0....1..gws-wiz-img.2n6KjdjVyU0'
)
elif subject == 'Science':
pg.alert('Bill Nye the Science Guy.')
points += 3
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=nDN7M0J3HXc')
elif subject == 'Spanish':
pg.alert('Hola! Como estas?')
points += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?hl=en&authuser=0&rlz=1C1GCEA_enUS752US774&tbm=isch&q=fiesta&chips=q:fiesta,online_chips:mexican+fiesta&usg=AI4_-kQGU87DySQyv0Aqat3pdqhIpYYwjA&sa=X&ved=0ahUKEwjzjvL6lq7eAhWpTd8KHQ6-CIoQ4lYIKygE&biw=924&bih=639&dpr=1#imgrc=6H_w7py8kTIUHM:'
)
elif subject == 'History':
pg.alert('In 1492 Christopher Columbus sailed the ocean blue')
points += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?q=history&rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&source=lnms&sa=X&ved=0ahUKEwiZ_YDvutHeAhXOVN8KHdEUDEkQ_AUICygC'
)
else:
pg.alert('cool')
points += 1
t.sleep(2)
wb.open(
'https://www.google.com/search?q=school+gif&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjqpI_f_YreAhWsd98KHblYBY8Q_AUIDigB&biw=1366&bih=657#imgrc=kk5pi12VrUoKGM:'
)
food = pg.prompt('What is your favorite food?').title()
if food == 'Pizza':
pg.alert('Pizza Hut? Dominos?')
points += 2
t.sleep(2)
wb.open('https://cooking.nytimes.com/guides/1-how-to-make-pizza')
elif food == 'Chocolate cake':
pg.alert('Now I want one')
points += 9
t.sleep(3)
wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')
elif food == 'Pasta':
pg.alert('I like pasta!')
points += 3
t.sleep(3)
wb.open(
'https://www.google.com/search?q=pasta&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiH_JXSlK7eAhWKT98KHScQASEQ_AUIDigB&biw=924&bih=639'
)
elif food == 'Ice cream':
pg.alert('What kind? I like cookie monster.')
points += 3
t.sleep(2)
wb.open('https://barefeetinthekitchen.com/homemade-ice-cream-recipe/')
elif food == 'Fruit':
pg.alert('Refreshing!')
points += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?q=fruit&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwijobOcla7eAhVyUt8KHfONDGUQ_AUIDigB&biw=924&bih=639#imgrc=ACrdFKwEzni-QM:'
)
elif food == 'Chicken':
pg.alert('Yum!')
points += 2
t.sleep(2)
wb.open(
'https://www.google.com/search?q=chicken&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj59fTCutHeAhXLct8KHRV6D88Q_AUIEygB&biw=1366&bih=657'
)
else:
pg.alert('YUUMMM')
points += 1
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=11HK5EuYwSk')
movie = pg.prompt('What is your favorite movie series?').title()
if 'Divergent' in movie:
number = pg.prompt('Which movie is your favorite').title()
if number == '1':
pg.alert('Nice!')
ice_cream = pg.confirm('Which of these flavors is your favorite?',
'Choose one', ['chocolate', 'vanilla', 'cookies and cream'])
if ice_cream == 'cookies and cream':
pg.alert('YES')
pg.alert('Your final score is ' + str(points))
<|reserved_special_token_1|>
import webbrowser as wb
points = 0
import time as t
import pyautogui as pg
name = pg.prompt('What is your name? ').title()
pg.alert(name)
if name == 'Caroline':
pg.alert('Hi ' + name)
points += 5
t.sleep(1)
wb.open('https://www.textgiraffe.com/Caroline/Page2/')
elif name == 'Bob':
pg.alert(name + ',you are a great person!')
points += 3
t.sleep(1)
wb.open('http://dreamworks.wikia.com/wiki/File:Bob_the_Builder.jpeg')
elif name == 'Catherine':
pg.alert(name + 'I like you already.')
points += 2
t.sleep(2)
wb.open(
'https://www.amazon.com/Catherine-Street-Sign-Reflective-Aluminum/dp/B00KY6ZDZW'
)
elif name == 'James':
pg.alert('nice to meet you' + name)
points += 1
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=uV9LYMAEnRA')
elif name == 'Kate':
pg.alert('Hello!')
points += 2
t.sleep(1)
wb.open(
'https://www.google.com/search?q=kate+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj-3cyIyJzeAhVRnOAKHRnoCtQQ_AUIDigB&biw=924&bih=639#imgrc=sbQIiK5VLfo7kM:'
)
elif name == 'Will':
pg.alert('Coool!')
ponts += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?q=will+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj3n93PyJzeAhWvY98KHcoWCFEQ_AUIDigB&biw=924&bih=639#imgrc=Z0hfeIoXQgHxJM:'
)
else:
pg.alert("I don't know you!")
points += 0
t.sleep(2)
wb.open(
'https://www.google.com/search?q=smiley+face&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjwsdL4gYveAhXtc98KHaGcAz0Q_AUIDigB&biw=1366&bih=657'
)
color = pg.prompt('what is your favorite color? ').title()
if color == 'Blue':
pg.alert('mine too!')
points += 5
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=SoIKv3xxuMA')
elif color == 'Pink':
pg.alert('Do you like unicorns too?')
points += 2
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=a-xWhG4UU_Y')
elif color == 'Purple':
pg.alert('cool!')
points += 3
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=TvnYmWpD_T8')
elif color == 'Black':
pg.alert('ok...')
points -= 2
t.sleep(2)
wb.open(
'https://www.google.com/search?q=goth&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiJ-tDj-oreAhUpUt8KHWZsAzQQ_AUIDigB&biw=1366&bih=657#imgrc=odGcWJwuqRcJsM:'
)
elif color == 'Yellow':
pg.alert('Like a sunflower!')
points += 1
t.sleep(1)
wb.open(
'https://www.google.com/search?q=sunflower&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiZyKCTyZzeAhXGc98KHd8kDJ8Q_AUIDigB&biw=924&bih=639#imgrc=8kZ1NZp_9-nr5M:'
)
elif color == 'Brown':
pg.alert('wow.')
points -= 5
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')
else:
pg.alert('nice')
points += 1
t.sleep(2)
wb.open('https://giphy.com/explore/rainbow')
sport = pg.prompt('What is your favorite sport? ').title()
if sport == 'Hockey':
pg.alert('yep, I guess your cool')
points += 5
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=JDnZTUkCOBQ')
elif sport == 'Soccer':
pg.alert('you mean futbol...')
points += 5
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=K-U1ZgrsGGg')
elif sport == 'Lacrosse':
pg.alert(' I used to play..')
points += 2
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=o5hsPBsGD44')
elif sport == 'Football':
pg.alert('that cool.')
points += 4
t.sleep(3)
wb.open(
'https://www.google.com/search?q=football&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwimsOqj_IreAhUumeAKHd-FD6kQ_AUIDigB&biw=1366&bih=657#imgrc=GCqjPQ-jqckcfM:'
)
elif sport == 'Field Hockey':
pg.alert('Nice!')
points += 2
t.sleep(3)
wb.open(
'https://www.google.com/search?q=field+hockey&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwieus2jypzeAhWvVN8KHeK1CJ8Q_AUIDigB&biw=924&bih=639#imgrc=FCpGZY2CS5KVXM:'
)
elif sport == 'Surfing':
pg.alert('WOAH')
points += 7
t.sleep(1)
wb.open('https://www.youtube.com/watch?v=HBklS2vYEPo')
else:
pg.alert('cool')
points += 0
t.sleep(2)
wb.open(
'https://www.google.com/search?q=no+sports&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiGqOK-_IreAhXFneAKHcEGANIQ_AUIDigB&biw=1366&bih=657#imgrc=y7acx-yoEouoUM:'
)
subject = pg.prompt('What is your favorite subject?').title()
if subject == 'Math':
pg.alert('so your a mathmatician')
points += 2
t.sleep(3)
wb.open(
'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=HNvFW9yoDYTm_QbUyKzgDw&q=addiong&oq=addiong&gs_l=img.3..0i10i24.5226.6666..6852...1.0..0.56.417.8......0....1..gws-wiz-img.......0j0i67j0i10.kcqMNDR26RY#imgrc=LqznGvY1fJpCGM:'
)
elif subject == 'Computer science':
pg.alert('nice')
points += 9
t.sleep(3)
wb.open(
'https://www.google.com/search?q=computers&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiom6vv_IreAhUuneAKHXVGA4kQ_AUIDygC&biw=1366&bih=657'
)
elif subject == 'English':
pg.alert('I like it too.')
points += 3
t.sleep(3)
wb.open(
'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=hNvFW4e3Jafp_QbR26mIDw&q=+book&oq=+book&gs_l=img.3..0i67l3j0j0i67j0l5.3464.3464..3690...0.0..0.51.51.1......0....1..gws-wiz-img.2n6KjdjVyU0'
)
elif subject == 'Science':
pg.alert('Bill Nye the Science Guy.')
points += 3
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=nDN7M0J3HXc')
elif subject == 'Spanish':
pg.alert('Hola! Como estas?')
points += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?hl=en&authuser=0&rlz=1C1GCEA_enUS752US774&tbm=isch&q=fiesta&chips=q:fiesta,online_chips:mexican+fiesta&usg=AI4_-kQGU87DySQyv0Aqat3pdqhIpYYwjA&sa=X&ved=0ahUKEwjzjvL6lq7eAhWpTd8KHQ6-CIoQ4lYIKygE&biw=924&bih=639&dpr=1#imgrc=6H_w7py8kTIUHM:'
)
elif subject == 'History':
pg.alert('In 1492 Christopher Columbus sailed the ocean blue')
points += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?q=history&rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&source=lnms&sa=X&ved=0ahUKEwiZ_YDvutHeAhXOVN8KHdEUDEkQ_AUICygC'
)
else:
pg.alert('cool')
points += 1
t.sleep(2)
wb.open(
'https://www.google.com/search?q=school+gif&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjqpI_f_YreAhWsd98KHblYBY8Q_AUIDigB&biw=1366&bih=657#imgrc=kk5pi12VrUoKGM:'
)
food = pg.prompt('What is your favorite food?').title()
if food == 'Pizza':
pg.alert('Pizza Hut? Dominos?')
points += 2
t.sleep(2)
wb.open('https://cooking.nytimes.com/guides/1-how-to-make-pizza')
elif food == 'Chocolate cake':
pg.alert('Now I want one')
points += 9
t.sleep(3)
wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')
elif food == 'Pasta':
pg.alert('I like pasta!')
points += 3
t.sleep(3)
wb.open(
'https://www.google.com/search?q=pasta&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiH_JXSlK7eAhWKT98KHScQASEQ_AUIDigB&biw=924&bih=639'
)
elif food == 'Ice cream':
pg.alert('What kind? I like cookie monster.')
points += 3
t.sleep(2)
wb.open('https://barefeetinthekitchen.com/homemade-ice-cream-recipe/')
elif food == 'Fruit':
pg.alert('Refreshing!')
points += 3
t.sleep(2)
wb.open(
'https://www.google.com/search?q=fruit&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwijobOcla7eAhVyUt8KHfONDGUQ_AUIDigB&biw=924&bih=639#imgrc=ACrdFKwEzni-QM:'
)
elif food == 'Chicken':
pg.alert('Yum!')
points += 2
t.sleep(2)
wb.open(
'https://www.google.com/search?q=chicken&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj59fTCutHeAhXLct8KHRV6D88Q_AUIEygB&biw=1366&bih=657'
)
else:
pg.alert('YUUMMM')
points += 1
t.sleep(2)
wb.open('https://www.youtube.com/watch?v=11HK5EuYwSk')
movie = pg.prompt('What is your favorite movie series?').title()
if 'Divergent' in movie:
number = pg.prompt('Which movie is your favorite').title()
if number == '1':
pg.alert('Nice!')
ice_cream = pg.confirm('Which of these flavors is your favorite?',
'Choose one', ['chocolate', 'vanilla', 'cookies and cream'])
if ice_cream == 'cookies and cream':
pg.alert('YES')
pg.alert('Your final score is ' + str(points))
<|reserved_special_token_1|>
import webbrowser as wb
points = 0
import time as t
import pyautogui as pg
name = pg.prompt("What is your name? ").title()
pg.alert(name)
if name == "Caroline":
pg.alert ("Hi " + name)
points += 5
t.sleep(1)
wb.open ("https://www.textgiraffe.com/Caroline/Page2/")
elif name == "Bob":
pg.alert (name + ",you are a great person!")
points += 3
t.sleep(1)
wb.open("http://dreamworks.wikia.com/wiki/File:Bob_the_Builder.jpeg")
elif name == "Catherine":
pg.alert (name + "I like you already.")
points += 2
t.sleep(2)
wb.open ("https://www.amazon.com/Catherine-Street-Sign-Reflective-Aluminum/dp/B00KY6ZDZW")
elif name == "James":
pg.alert ("nice to meet you" + name)
points += 1
t.sleep(1)
wb.open ("https://www.youtube.com/watch?v=uV9LYMAEnRA")
elif name == "Kate":
pg.alert ("Hello!")
points += 2
t.sleep (1)
wb.open ("https://www.google.com/search?q=kate+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj-3cyIyJzeAhVRnOAKHRnoCtQQ_AUIDigB&biw=924&bih=639#imgrc=sbQIiK5VLfo7kM:")
elif name == "Will":
pg.alert ("Coool!")
ponts += 3
t.sleep (2)
wb.open ("https://www.google.com/search?q=will+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj3n93PyJzeAhWvY98KHcoWCFEQ_AUIDigB&biw=924&bih=639#imgrc=Z0hfeIoXQgHxJM:")
else:
pg.alert ("I don't know you!")
points += 0
t.sleep(2)
wb.open ("https://www.google.com/search?q=smiley+face&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjwsdL4gYveAhXtc98KHaGcAz0Q_AUIDigB&biw=1366&bih=657")
color = pg.prompt ("what is your favorite color? ").title()
if color == "Blue":
pg.alert ("mine too!")
points += 5
t.sleep(1)
wb.open ("https://www.youtube.com/watch?v=SoIKv3xxuMA")
elif color == "Pink":
pg.alert ("Do you like unicorns too?")
points += 2
t.sleep(2)
wb.open ("https://www.youtube.com/watch?v=a-xWhG4UU_Y")
elif color == "Purple":
pg.alert ("cool!")
points += 3
t.sleep(1)
wb.open ("https://www.youtube.com/watch?v=TvnYmWpD_T8")
elif color == "Black":
pg.alert ("ok...")
points -= 2
t.sleep(2)
wb.open ("https://www.google.com/search?q=goth&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiJ-tDj-oreAhUpUt8KHWZsAzQQ_AUIDigB&biw=1366&bih=657#imgrc=odGcWJwuqRcJsM:")
elif color == "Yellow":
pg.alert ("Like a sunflower!")
points += 1
t.sleep (1)
wb.open ("https://www.google.com/search?q=sunflower&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiZyKCTyZzeAhXGc98KHd8kDJ8Q_AUIDigB&biw=924&bih=639#imgrc=8kZ1NZp_9-nr5M:")
elif color == "Brown":
pg.alert ("wow.")
points -= 5
t.sleep (1)
wb.open ("https://www.youtube.com/watch?v=dsJtgmAhFF4")
else:
pg.alert("nice")
points += 1
t.sleep(2)
wb.open ("https://giphy.com/explore/rainbow")
sport = pg.prompt ("What is your favorite sport? ").title()
if sport == "Hockey":
pg.alert ("yep, I guess your cool")
points += 5
t.sleep(2)
wb.open ("https://www.youtube.com/watch?v=JDnZTUkCOBQ")
elif sport == "Soccer":
pg.alert ("you mean futbol...")
points += 5
t.sleep(2)
wb.open ("https://www.youtube.com/watch?v=K-U1ZgrsGGg")
elif sport == "Lacrosse":
pg.alert (" I used to play..")
points += 2
t.sleep(2)
wb.open ("https://www.youtube.com/watch?v=o5hsPBsGD44")
elif sport == "Football":
pg.alert ("that cool.")
points += 4
t.sleep(3)
wb.open ("https://www.google.com/search?q=football&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwimsOqj_IreAhUumeAKHd-FD6kQ_AUIDigB&biw=1366&bih=657#imgrc=GCqjPQ-jqckcfM:")
elif sport == "Field Hockey":
pg.alert ("Nice!")
points += 2
t.sleep(3)
wb.open ("https://www.google.com/search?q=field+hockey&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwieus2jypzeAhWvVN8KHeK1CJ8Q_AUIDigB&biw=924&bih=639#imgrc=FCpGZY2CS5KVXM:")
elif sport == "Surfing":
pg.alert ("WOAH")
points += 7
t.sleep(1)
wb.open ("https://www.youtube.com/watch?v=HBklS2vYEPo")
else:
pg.alert ("cool")
points += 0
t.sleep(2)
wb.open ("https://www.google.com/search?q=no+sports&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiGqOK-_IreAhXFneAKHcEGANIQ_AUIDigB&biw=1366&bih=657#imgrc=y7acx-yoEouoUM:")
subject = pg.prompt ("What is your favorite subject?").title()
if subject == "Math":
pg.alert ("so your a mathmatician")
points += 2
t.sleep(3)
wb.open ("https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=HNvFW9yoDYTm_QbUyKzgDw&q=addiong&oq=addiong&gs_l=img.3..0i10i24.5226.6666..6852...1.0..0.56.417.8......0....1..gws-wiz-img.......0j0i67j0i10.kcqMNDR26RY#imgrc=LqznGvY1fJpCGM:")
elif subject == "Computer science":
pg.alert ("nice")
points += 9
t.sleep(3)
wb.open ("https://www.google.com/search?q=computers&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiom6vv_IreAhUuneAKHXVGA4kQ_AUIDygC&biw=1366&bih=657")
elif subject == "English":
pg.alert ("I like it too.")
points += 3
t.sleep(3)
wb.open ("https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=hNvFW4e3Jafp_QbR26mIDw&q=+book&oq=+book&gs_l=img.3..0i67l3j0j0i67j0l5.3464.3464..3690...0.0..0.51.51.1......0....1..gws-wiz-img.2n6KjdjVyU0")
elif subject == "Science":
pg.alert ("Bill Nye the Science Guy.")
points += 3
t.sleep(2)
wb.open("https://www.youtube.com/watch?v=nDN7M0J3HXc")
elif subject == "Spanish":
pg.alert ("Hola! Como estas?")
points += 3
t.sleep(2)
wb.open ("https://www.google.com/search?hl=en&authuser=0&rlz=1C1GCEA_enUS752US774&tbm=isch&q=fiesta&chips=q:fiesta,online_chips:mexican+fiesta&usg=AI4_-kQGU87DySQyv0Aqat3pdqhIpYYwjA&sa=X&ved=0ahUKEwjzjvL6lq7eAhWpTd8KHQ6-CIoQ4lYIKygE&biw=924&bih=639&dpr=1#imgrc=6H_w7py8kTIUHM:")
elif subject == "History":
pg.alert ("In 1492 Christopher Columbus sailed the ocean blue")
points += 3
t.sleep(2)
wb.open ("https://www.google.com/search?q=history&rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&source=lnms&sa=X&ved=0ahUKEwiZ_YDvutHeAhXOVN8KHdEUDEkQ_AUICygC")
else:
pg.alert ("cool")
points += 1
t.sleep(2)
wb.open ("https://www.google.com/search?q=school+gif&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjqpI_f_YreAhWsd98KHblYBY8Q_AUIDigB&biw=1366&bih=657#imgrc=kk5pi12VrUoKGM:")
food = pg.prompt ("What is your favorite food?").title()
if food == "Pizza":
pg.alert ("Pizza Hut? Dominos?")
points += 2
t.sleep(2)
wb.open ("https://cooking.nytimes.com/guides/1-how-to-make-pizza")
elif food == "Chocolate cake":
pg.alert ("Now I want one")
points += 9
t.sleep(3)
wb.open ("https://www.youtube.com/watch?v=dsJtgmAhFF4")
elif food == "Pasta":
pg.alert ("I like pasta!")
points += 3
t.sleep(3)
wb.open ("https://www.google.com/search?q=pasta&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiH_JXSlK7eAhWKT98KHScQASEQ_AUIDigB&biw=924&bih=639")
elif food == "Ice cream":
pg.alert ("What kind? I like cookie monster.")
points += 3
t.sleep(2)
wb.open("https://barefeetinthekitchen.com/homemade-ice-cream-recipe/")
elif food == "Fruit":
pg.alert ("Refreshing!")
points += 3
t.sleep(2)
wb.open ("https://www.google.com/search?q=fruit&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwijobOcla7eAhVyUt8KHfONDGUQ_AUIDigB&biw=924&bih=639#imgrc=ACrdFKwEzni-QM:")
elif food == "Chicken":
pg.alert ("Yum!")
points += 2
t.sleep(2)
wb.open ("https://www.google.com/search?q=chicken&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj59fTCutHeAhXLct8KHRV6D88Q_AUIEygB&biw=1366&bih=657")
else:
pg.alert ("YUUMMM")
points += 1
t.sleep(2)
wb.open ("https://www.youtube.com/watch?v=11HK5EuYwSk")
movie = pg.prompt ("What is your favorite movie series?").title()
if "Divergent" in movie:
number = pg.prompt("Which movie is your favorite").title()
if number == "1":
pg.alert("Nice!")
ice_cream = pg.confirm("Which of these flavors is your favorite?", "Choose one", ["chocolate", "vanilla", "cookies and cream"])
if ice_cream == "cookies and cream":
pg.alert("YES")
pg.alert ("Your final score is " + str(points))
|
flexible
|
{
"blob_id": "16e10db90a0a0d8ee7ca5b0c7f86cc81432d87d1",
"index": 4391,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npg.alert(name)\nif name == 'Caroline':\n pg.alert('Hi ' + name)\n points += 5\n t.sleep(1)\n wb.open('https://www.textgiraffe.com/Caroline/Page2/')\nelif name == 'Bob':\n pg.alert(name + ',you are a great person!')\n points += 3\n t.sleep(1)\n wb.open('http://dreamworks.wikia.com/wiki/File:Bob_the_Builder.jpeg')\nelif name == 'Catherine':\n pg.alert(name + 'I like you already.')\n points += 2\n t.sleep(2)\n wb.open(\n 'https://www.amazon.com/Catherine-Street-Sign-Reflective-Aluminum/dp/B00KY6ZDZW'\n )\nelif name == 'James':\n pg.alert('nice to meet you' + name)\n points += 1\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=uV9LYMAEnRA')\nelif name == 'Kate':\n pg.alert('Hello!')\n points += 2\n t.sleep(1)\n wb.open(\n 'https://www.google.com/search?q=kate+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj-3cyIyJzeAhVRnOAKHRnoCtQQ_AUIDigB&biw=924&bih=639#imgrc=sbQIiK5VLfo7kM:'\n )\nelif name == 'Will':\n pg.alert('Coool!')\n ponts += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=will+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj3n93PyJzeAhWvY98KHcoWCFEQ_AUIDigB&biw=924&bih=639#imgrc=Z0hfeIoXQgHxJM:'\n )\nelse:\n pg.alert(\"I don't know you!\")\n points += 0\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=smiley+face&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjwsdL4gYveAhXtc98KHaGcAz0Q_AUIDigB&biw=1366&bih=657'\n )\n<mask token>\nif color == 'Blue':\n pg.alert('mine too!')\n points += 5\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=SoIKv3xxuMA')\nelif color == 'Pink':\n pg.alert('Do you like unicorns too?')\n points += 2\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=a-xWhG4UU_Y')\nelif color == 'Purple':\n pg.alert('cool!')\n points += 3\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=TvnYmWpD_T8')\nelif color == 'Black':\n pg.alert('ok...')\n points -= 2\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=goth&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiJ-tDj-oreAhUpUt8KHWZsAzQQ_AUIDigB&biw=1366&bih=657#imgrc=odGcWJwuqRcJsM:'\n )\nelif color == 'Yellow':\n pg.alert('Like a sunflower!')\n points += 1\n t.sleep(1)\n wb.open(\n 'https://www.google.com/search?q=sunflower&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiZyKCTyZzeAhXGc98KHd8kDJ8Q_AUIDigB&biw=924&bih=639#imgrc=8kZ1NZp_9-nr5M:'\n )\nelif color == 'Brown':\n pg.alert('wow.')\n points -= 5\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')\nelse:\n pg.alert('nice')\n points += 1\n t.sleep(2)\n wb.open('https://giphy.com/explore/rainbow')\n<mask token>\nif sport == 'Hockey':\n pg.alert('yep, I guess your cool')\n points += 5\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=JDnZTUkCOBQ')\nelif sport == 'Soccer':\n pg.alert('you mean futbol...')\n points += 5\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=K-U1ZgrsGGg')\nelif sport == 'Lacrosse':\n pg.alert(' I used to play..')\n points += 2\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=o5hsPBsGD44')\nelif sport == 'Football':\n pg.alert('that cool.')\n points += 4\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=football&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwimsOqj_IreAhUumeAKHd-FD6kQ_AUIDigB&biw=1366&bih=657#imgrc=GCqjPQ-jqckcfM:'\n )\nelif sport == 'Field Hockey':\n pg.alert('Nice!')\n points += 2\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=field+hockey&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwieus2jypzeAhWvVN8KHeK1CJ8Q_AUIDigB&biw=924&bih=639#imgrc=FCpGZY2CS5KVXM:'\n )\nelif sport == 'Surfing':\n pg.alert('WOAH')\n points += 7\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=HBklS2vYEPo')\nelse:\n pg.alert('cool')\n points += 0\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=no+sports&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiGqOK-_IreAhXFneAKHcEGANIQ_AUIDigB&biw=1366&bih=657#imgrc=y7acx-yoEouoUM:'\n )\n<mask token>\nif subject == 'Math':\n pg.alert('so your a mathmatician')\n points += 2\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=HNvFW9yoDYTm_QbUyKzgDw&q=addiong&oq=addiong&gs_l=img.3..0i10i24.5226.6666..6852...1.0..0.56.417.8......0....1..gws-wiz-img.......0j0i67j0i10.kcqMNDR26RY#imgrc=LqznGvY1fJpCGM:'\n )\nelif subject == 'Computer science':\n pg.alert('nice')\n points += 9\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=computers&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiom6vv_IreAhUuneAKHXVGA4kQ_AUIDygC&biw=1366&bih=657'\n )\nelif subject == 'English':\n pg.alert('I like it too.')\n points += 3\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=hNvFW4e3Jafp_QbR26mIDw&q=+book&oq=+book&gs_l=img.3..0i67l3j0j0i67j0l5.3464.3464..3690...0.0..0.51.51.1......0....1..gws-wiz-img.2n6KjdjVyU0'\n )\nelif subject == 'Science':\n pg.alert('Bill Nye the Science Guy.')\n points += 3\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=nDN7M0J3HXc')\nelif subject == 'Spanish':\n pg.alert('Hola! Como estas?')\n points += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?hl=en&authuser=0&rlz=1C1GCEA_enUS752US774&tbm=isch&q=fiesta&chips=q:fiesta,online_chips:mexican+fiesta&usg=AI4_-kQGU87DySQyv0Aqat3pdqhIpYYwjA&sa=X&ved=0ahUKEwjzjvL6lq7eAhWpTd8KHQ6-CIoQ4lYIKygE&biw=924&bih=639&dpr=1#imgrc=6H_w7py8kTIUHM:'\n )\nelif subject == 'History':\n pg.alert('In 1492 Christopher Columbus sailed the ocean blue')\n points += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=history&rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&source=lnms&sa=X&ved=0ahUKEwiZ_YDvutHeAhXOVN8KHdEUDEkQ_AUICygC'\n )\nelse:\n pg.alert('cool')\n points += 1\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=school+gif&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjqpI_f_YreAhWsd98KHblYBY8Q_AUIDigB&biw=1366&bih=657#imgrc=kk5pi12VrUoKGM:'\n )\n<mask token>\nif food == 'Pizza':\n pg.alert('Pizza Hut? Dominos?')\n points += 2\n t.sleep(2)\n wb.open('https://cooking.nytimes.com/guides/1-how-to-make-pizza')\nelif food == 'Chocolate cake':\n pg.alert('Now I want one')\n points += 9\n t.sleep(3)\n wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')\nelif food == 'Pasta':\n pg.alert('I like pasta!')\n points += 3\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=pasta&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiH_JXSlK7eAhWKT98KHScQASEQ_AUIDigB&biw=924&bih=639'\n )\nelif food == 'Ice cream':\n pg.alert('What kind? I like cookie monster.')\n points += 3\n t.sleep(2)\n wb.open('https://barefeetinthekitchen.com/homemade-ice-cream-recipe/')\nelif food == 'Fruit':\n pg.alert('Refreshing!')\n points += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=fruit&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwijobOcla7eAhVyUt8KHfONDGUQ_AUIDigB&biw=924&bih=639#imgrc=ACrdFKwEzni-QM:'\n )\nelif food == 'Chicken':\n pg.alert('Yum!')\n points += 2\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=chicken&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj59fTCutHeAhXLct8KHRV6D88Q_AUIEygB&biw=1366&bih=657'\n )\nelse:\n pg.alert('YUUMMM')\n points += 1\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=11HK5EuYwSk')\n<mask token>\nif 'Divergent' in movie:\n number = pg.prompt('Which movie is your favorite').title()\n if number == '1':\n pg.alert('Nice!')\n<mask token>\nif ice_cream == 'cookies and cream':\n pg.alert('YES')\npg.alert('Your final score is ' + str(points))\n",
"step-3": "<mask token>\npoints = 0\n<mask token>\nname = pg.prompt('What is your name? ').title()\npg.alert(name)\nif name == 'Caroline':\n pg.alert('Hi ' + name)\n points += 5\n t.sleep(1)\n wb.open('https://www.textgiraffe.com/Caroline/Page2/')\nelif name == 'Bob':\n pg.alert(name + ',you are a great person!')\n points += 3\n t.sleep(1)\n wb.open('http://dreamworks.wikia.com/wiki/File:Bob_the_Builder.jpeg')\nelif name == 'Catherine':\n pg.alert(name + 'I like you already.')\n points += 2\n t.sleep(2)\n wb.open(\n 'https://www.amazon.com/Catherine-Street-Sign-Reflective-Aluminum/dp/B00KY6ZDZW'\n )\nelif name == 'James':\n pg.alert('nice to meet you' + name)\n points += 1\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=uV9LYMAEnRA')\nelif name == 'Kate':\n pg.alert('Hello!')\n points += 2\n t.sleep(1)\n wb.open(\n 'https://www.google.com/search?q=kate+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj-3cyIyJzeAhVRnOAKHRnoCtQQ_AUIDigB&biw=924&bih=639#imgrc=sbQIiK5VLfo7kM:'\n )\nelif name == 'Will':\n pg.alert('Coool!')\n ponts += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=will+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj3n93PyJzeAhWvY98KHcoWCFEQ_AUIDigB&biw=924&bih=639#imgrc=Z0hfeIoXQgHxJM:'\n )\nelse:\n pg.alert(\"I don't know you!\")\n points += 0\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=smiley+face&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjwsdL4gYveAhXtc98KHaGcAz0Q_AUIDigB&biw=1366&bih=657'\n )\ncolor = pg.prompt('what is your favorite color? ').title()\nif color == 'Blue':\n pg.alert('mine too!')\n points += 5\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=SoIKv3xxuMA')\nelif color == 'Pink':\n pg.alert('Do you like unicorns too?')\n points += 2\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=a-xWhG4UU_Y')\nelif color == 'Purple':\n pg.alert('cool!')\n points += 3\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=TvnYmWpD_T8')\nelif color == 'Black':\n pg.alert('ok...')\n points -= 2\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=goth&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiJ-tDj-oreAhUpUt8KHWZsAzQQ_AUIDigB&biw=1366&bih=657#imgrc=odGcWJwuqRcJsM:'\n )\nelif color == 'Yellow':\n pg.alert('Like a sunflower!')\n points += 1\n t.sleep(1)\n wb.open(\n 'https://www.google.com/search?q=sunflower&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiZyKCTyZzeAhXGc98KHd8kDJ8Q_AUIDigB&biw=924&bih=639#imgrc=8kZ1NZp_9-nr5M:'\n )\nelif color == 'Brown':\n pg.alert('wow.')\n points -= 5\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')\nelse:\n pg.alert('nice')\n points += 1\n t.sleep(2)\n wb.open('https://giphy.com/explore/rainbow')\nsport = pg.prompt('What is your favorite sport? ').title()\nif sport == 'Hockey':\n pg.alert('yep, I guess your cool')\n points += 5\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=JDnZTUkCOBQ')\nelif sport == 'Soccer':\n pg.alert('you mean futbol...')\n points += 5\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=K-U1ZgrsGGg')\nelif sport == 'Lacrosse':\n pg.alert(' I used to play..')\n points += 2\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=o5hsPBsGD44')\nelif sport == 'Football':\n pg.alert('that cool.')\n points += 4\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=football&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwimsOqj_IreAhUumeAKHd-FD6kQ_AUIDigB&biw=1366&bih=657#imgrc=GCqjPQ-jqckcfM:'\n )\nelif sport == 'Field Hockey':\n pg.alert('Nice!')\n points += 2\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=field+hockey&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwieus2jypzeAhWvVN8KHeK1CJ8Q_AUIDigB&biw=924&bih=639#imgrc=FCpGZY2CS5KVXM:'\n )\nelif sport == 'Surfing':\n pg.alert('WOAH')\n points += 7\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=HBklS2vYEPo')\nelse:\n pg.alert('cool')\n points += 0\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=no+sports&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiGqOK-_IreAhXFneAKHcEGANIQ_AUIDigB&biw=1366&bih=657#imgrc=y7acx-yoEouoUM:'\n )\nsubject = pg.prompt('What is your favorite subject?').title()\nif subject == 'Math':\n pg.alert('so your a mathmatician')\n points += 2\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=HNvFW9yoDYTm_QbUyKzgDw&q=addiong&oq=addiong&gs_l=img.3..0i10i24.5226.6666..6852...1.0..0.56.417.8......0....1..gws-wiz-img.......0j0i67j0i10.kcqMNDR26RY#imgrc=LqznGvY1fJpCGM:'\n )\nelif subject == 'Computer science':\n pg.alert('nice')\n points += 9\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=computers&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiom6vv_IreAhUuneAKHXVGA4kQ_AUIDygC&biw=1366&bih=657'\n )\nelif subject == 'English':\n pg.alert('I like it too.')\n points += 3\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=hNvFW4e3Jafp_QbR26mIDw&q=+book&oq=+book&gs_l=img.3..0i67l3j0j0i67j0l5.3464.3464..3690...0.0..0.51.51.1......0....1..gws-wiz-img.2n6KjdjVyU0'\n )\nelif subject == 'Science':\n pg.alert('Bill Nye the Science Guy.')\n points += 3\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=nDN7M0J3HXc')\nelif subject == 'Spanish':\n pg.alert('Hola! Como estas?')\n points += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?hl=en&authuser=0&rlz=1C1GCEA_enUS752US774&tbm=isch&q=fiesta&chips=q:fiesta,online_chips:mexican+fiesta&usg=AI4_-kQGU87DySQyv0Aqat3pdqhIpYYwjA&sa=X&ved=0ahUKEwjzjvL6lq7eAhWpTd8KHQ6-CIoQ4lYIKygE&biw=924&bih=639&dpr=1#imgrc=6H_w7py8kTIUHM:'\n )\nelif subject == 'History':\n pg.alert('In 1492 Christopher Columbus sailed the ocean blue')\n points += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=history&rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&source=lnms&sa=X&ved=0ahUKEwiZ_YDvutHeAhXOVN8KHdEUDEkQ_AUICygC'\n )\nelse:\n pg.alert('cool')\n points += 1\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=school+gif&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjqpI_f_YreAhWsd98KHblYBY8Q_AUIDigB&biw=1366&bih=657#imgrc=kk5pi12VrUoKGM:'\n )\nfood = pg.prompt('What is your favorite food?').title()\nif food == 'Pizza':\n pg.alert('Pizza Hut? Dominos?')\n points += 2\n t.sleep(2)\n wb.open('https://cooking.nytimes.com/guides/1-how-to-make-pizza')\nelif food == 'Chocolate cake':\n pg.alert('Now I want one')\n points += 9\n t.sleep(3)\n wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')\nelif food == 'Pasta':\n pg.alert('I like pasta!')\n points += 3\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=pasta&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiH_JXSlK7eAhWKT98KHScQASEQ_AUIDigB&biw=924&bih=639'\n )\nelif food == 'Ice cream':\n pg.alert('What kind? I like cookie monster.')\n points += 3\n t.sleep(2)\n wb.open('https://barefeetinthekitchen.com/homemade-ice-cream-recipe/')\nelif food == 'Fruit':\n pg.alert('Refreshing!')\n points += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=fruit&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwijobOcla7eAhVyUt8KHfONDGUQ_AUIDigB&biw=924&bih=639#imgrc=ACrdFKwEzni-QM:'\n )\nelif food == 'Chicken':\n pg.alert('Yum!')\n points += 2\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=chicken&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj59fTCutHeAhXLct8KHRV6D88Q_AUIEygB&biw=1366&bih=657'\n )\nelse:\n pg.alert('YUUMMM')\n points += 1\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=11HK5EuYwSk')\nmovie = pg.prompt('What is your favorite movie series?').title()\nif 'Divergent' in movie:\n number = pg.prompt('Which movie is your favorite').title()\n if number == '1':\n pg.alert('Nice!')\nice_cream = pg.confirm('Which of these flavors is your favorite?',\n 'Choose one', ['chocolate', 'vanilla', 'cookies and cream'])\nif ice_cream == 'cookies and cream':\n pg.alert('YES')\npg.alert('Your final score is ' + str(points))\n",
"step-4": "import webbrowser as wb\npoints = 0\nimport time as t\nimport pyautogui as pg\nname = pg.prompt('What is your name? ').title()\npg.alert(name)\nif name == 'Caroline':\n pg.alert('Hi ' + name)\n points += 5\n t.sleep(1)\n wb.open('https://www.textgiraffe.com/Caroline/Page2/')\nelif name == 'Bob':\n pg.alert(name + ',you are a great person!')\n points += 3\n t.sleep(1)\n wb.open('http://dreamworks.wikia.com/wiki/File:Bob_the_Builder.jpeg')\nelif name == 'Catherine':\n pg.alert(name + 'I like you already.')\n points += 2\n t.sleep(2)\n wb.open(\n 'https://www.amazon.com/Catherine-Street-Sign-Reflective-Aluminum/dp/B00KY6ZDZW'\n )\nelif name == 'James':\n pg.alert('nice to meet you' + name)\n points += 1\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=uV9LYMAEnRA')\nelif name == 'Kate':\n pg.alert('Hello!')\n points += 2\n t.sleep(1)\n wb.open(\n 'https://www.google.com/search?q=kate+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj-3cyIyJzeAhVRnOAKHRnoCtQQ_AUIDigB&biw=924&bih=639#imgrc=sbQIiK5VLfo7kM:'\n )\nelif name == 'Will':\n pg.alert('Coool!')\n ponts += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=will+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj3n93PyJzeAhWvY98KHcoWCFEQ_AUIDigB&biw=924&bih=639#imgrc=Z0hfeIoXQgHxJM:'\n )\nelse:\n pg.alert(\"I don't know you!\")\n points += 0\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=smiley+face&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjwsdL4gYveAhXtc98KHaGcAz0Q_AUIDigB&biw=1366&bih=657'\n )\ncolor = pg.prompt('what is your favorite color? ').title()\nif color == 'Blue':\n pg.alert('mine too!')\n points += 5\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=SoIKv3xxuMA')\nelif color == 'Pink':\n pg.alert('Do you like unicorns too?')\n points += 2\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=a-xWhG4UU_Y')\nelif color == 'Purple':\n pg.alert('cool!')\n points += 3\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=TvnYmWpD_T8')\nelif color == 'Black':\n pg.alert('ok...')\n points -= 2\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=goth&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiJ-tDj-oreAhUpUt8KHWZsAzQQ_AUIDigB&biw=1366&bih=657#imgrc=odGcWJwuqRcJsM:'\n )\nelif color == 'Yellow':\n pg.alert('Like a sunflower!')\n points += 1\n t.sleep(1)\n wb.open(\n 'https://www.google.com/search?q=sunflower&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiZyKCTyZzeAhXGc98KHd8kDJ8Q_AUIDigB&biw=924&bih=639#imgrc=8kZ1NZp_9-nr5M:'\n )\nelif color == 'Brown':\n pg.alert('wow.')\n points -= 5\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')\nelse:\n pg.alert('nice')\n points += 1\n t.sleep(2)\n wb.open('https://giphy.com/explore/rainbow')\nsport = pg.prompt('What is your favorite sport? ').title()\nif sport == 'Hockey':\n pg.alert('yep, I guess your cool')\n points += 5\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=JDnZTUkCOBQ')\nelif sport == 'Soccer':\n pg.alert('you mean futbol...')\n points += 5\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=K-U1ZgrsGGg')\nelif sport == 'Lacrosse':\n pg.alert(' I used to play..')\n points += 2\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=o5hsPBsGD44')\nelif sport == 'Football':\n pg.alert('that cool.')\n points += 4\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=football&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwimsOqj_IreAhUumeAKHd-FD6kQ_AUIDigB&biw=1366&bih=657#imgrc=GCqjPQ-jqckcfM:'\n )\nelif sport == 'Field Hockey':\n pg.alert('Nice!')\n points += 2\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=field+hockey&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwieus2jypzeAhWvVN8KHeK1CJ8Q_AUIDigB&biw=924&bih=639#imgrc=FCpGZY2CS5KVXM:'\n )\nelif sport == 'Surfing':\n pg.alert('WOAH')\n points += 7\n t.sleep(1)\n wb.open('https://www.youtube.com/watch?v=HBklS2vYEPo')\nelse:\n pg.alert('cool')\n points += 0\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=no+sports&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiGqOK-_IreAhXFneAKHcEGANIQ_AUIDigB&biw=1366&bih=657#imgrc=y7acx-yoEouoUM:'\n )\nsubject = pg.prompt('What is your favorite subject?').title()\nif subject == 'Math':\n pg.alert('so your a mathmatician')\n points += 2\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=HNvFW9yoDYTm_QbUyKzgDw&q=addiong&oq=addiong&gs_l=img.3..0i10i24.5226.6666..6852...1.0..0.56.417.8......0....1..gws-wiz-img.......0j0i67j0i10.kcqMNDR26RY#imgrc=LqznGvY1fJpCGM:'\n )\nelif subject == 'Computer science':\n pg.alert('nice')\n points += 9\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=computers&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiom6vv_IreAhUuneAKHXVGA4kQ_AUIDygC&biw=1366&bih=657'\n )\nelif subject == 'English':\n pg.alert('I like it too.')\n points += 3\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=hNvFW4e3Jafp_QbR26mIDw&q=+book&oq=+book&gs_l=img.3..0i67l3j0j0i67j0l5.3464.3464..3690...0.0..0.51.51.1......0....1..gws-wiz-img.2n6KjdjVyU0'\n )\nelif subject == 'Science':\n pg.alert('Bill Nye the Science Guy.')\n points += 3\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=nDN7M0J3HXc')\nelif subject == 'Spanish':\n pg.alert('Hola! Como estas?')\n points += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?hl=en&authuser=0&rlz=1C1GCEA_enUS752US774&tbm=isch&q=fiesta&chips=q:fiesta,online_chips:mexican+fiesta&usg=AI4_-kQGU87DySQyv0Aqat3pdqhIpYYwjA&sa=X&ved=0ahUKEwjzjvL6lq7eAhWpTd8KHQ6-CIoQ4lYIKygE&biw=924&bih=639&dpr=1#imgrc=6H_w7py8kTIUHM:'\n )\nelif subject == 'History':\n pg.alert('In 1492 Christopher Columbus sailed the ocean blue')\n points += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=history&rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&source=lnms&sa=X&ved=0ahUKEwiZ_YDvutHeAhXOVN8KHdEUDEkQ_AUICygC'\n )\nelse:\n pg.alert('cool')\n points += 1\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=school+gif&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjqpI_f_YreAhWsd98KHblYBY8Q_AUIDigB&biw=1366&bih=657#imgrc=kk5pi12VrUoKGM:'\n )\nfood = pg.prompt('What is your favorite food?').title()\nif food == 'Pizza':\n pg.alert('Pizza Hut? Dominos?')\n points += 2\n t.sleep(2)\n wb.open('https://cooking.nytimes.com/guides/1-how-to-make-pizza')\nelif food == 'Chocolate cake':\n pg.alert('Now I want one')\n points += 9\n t.sleep(3)\n wb.open('https://www.youtube.com/watch?v=dsJtgmAhFF4')\nelif food == 'Pasta':\n pg.alert('I like pasta!')\n points += 3\n t.sleep(3)\n wb.open(\n 'https://www.google.com/search?q=pasta&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiH_JXSlK7eAhWKT98KHScQASEQ_AUIDigB&biw=924&bih=639'\n )\nelif food == 'Ice cream':\n pg.alert('What kind? I like cookie monster.')\n points += 3\n t.sleep(2)\n wb.open('https://barefeetinthekitchen.com/homemade-ice-cream-recipe/')\nelif food == 'Fruit':\n pg.alert('Refreshing!')\n points += 3\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=fruit&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwijobOcla7eAhVyUt8KHfONDGUQ_AUIDigB&biw=924&bih=639#imgrc=ACrdFKwEzni-QM:'\n )\nelif food == 'Chicken':\n pg.alert('Yum!')\n points += 2\n t.sleep(2)\n wb.open(\n 'https://www.google.com/search?q=chicken&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj59fTCutHeAhXLct8KHRV6D88Q_AUIEygB&biw=1366&bih=657'\n )\nelse:\n pg.alert('YUUMMM')\n points += 1\n t.sleep(2)\n wb.open('https://www.youtube.com/watch?v=11HK5EuYwSk')\nmovie = pg.prompt('What is your favorite movie series?').title()\nif 'Divergent' in movie:\n number = pg.prompt('Which movie is your favorite').title()\n if number == '1':\n pg.alert('Nice!')\nice_cream = pg.confirm('Which of these flavors is your favorite?',\n 'Choose one', ['chocolate', 'vanilla', 'cookies and cream'])\nif ice_cream == 'cookies and cream':\n pg.alert('YES')\npg.alert('Your final score is ' + str(points))\n",
"step-5": "import webbrowser as wb\r\npoints = 0\r\nimport time as t\r\nimport pyautogui as pg\r\n\r\n\r\nname = pg.prompt(\"What is your name? \").title()\r\n\r\npg.alert(name)\r\nif name == \"Caroline\":\r\n pg.alert (\"Hi \" + name)\r\n points += 5\r\n t.sleep(1) \r\n wb.open (\"https://www.textgiraffe.com/Caroline/Page2/\")\r\nelif name == \"Bob\":\r\n pg.alert (name + \",you are a great person!\")\r\n points += 3\r\n t.sleep(1)\r\n wb.open(\"http://dreamworks.wikia.com/wiki/File:Bob_the_Builder.jpeg\")\r\nelif name == \"Catherine\":\r\n pg.alert (name + \"I like you already.\")\r\n points += 2\r\n t.sleep(2)\r\n wb.open (\"https://www.amazon.com/Catherine-Street-Sign-Reflective-Aluminum/dp/B00KY6ZDZW\")\r\nelif name == \"James\":\r\n pg.alert (\"nice to meet you\" + name)\r\n points += 1\r\n t.sleep(1)\r\n wb.open (\"https://www.youtube.com/watch?v=uV9LYMAEnRA\")\r\nelif name == \"Kate\":\r\n pg.alert (\"Hello!\")\r\n points += 2\r\n t.sleep (1)\r\n wb.open (\"https://www.google.com/search?q=kate+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj-3cyIyJzeAhVRnOAKHRnoCtQQ_AUIDigB&biw=924&bih=639#imgrc=sbQIiK5VLfo7kM:\")\r\nelif name == \"Will\":\r\n pg.alert (\"Coool!\")\r\n ponts += 3\r\n t.sleep (2)\r\n wb.open (\"https://www.google.com/search?q=will+name&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj3n93PyJzeAhWvY98KHcoWCFEQ_AUIDigB&biw=924&bih=639#imgrc=Z0hfeIoXQgHxJM:\")\r\nelse:\r\n pg.alert (\"I don't know you!\")\r\n points += 0\r\n t.sleep(2)\r\n wb.open (\"https://www.google.com/search?q=smiley+face&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjwsdL4gYveAhXtc98KHaGcAz0Q_AUIDigB&biw=1366&bih=657\")\r\ncolor = pg.prompt (\"what is your favorite color? \").title()\r\nif color == \"Blue\":\r\n pg.alert (\"mine too!\")\r\n points += 5\r\n t.sleep(1)\r\n wb.open (\"https://www.youtube.com/watch?v=SoIKv3xxuMA\")\r\nelif color == \"Pink\":\r\n pg.alert (\"Do you like unicorns too?\")\r\n points += 2\r\n t.sleep(2)\r\n wb.open (\"https://www.youtube.com/watch?v=a-xWhG4UU_Y\")\r\nelif color == \"Purple\":\r\n pg.alert (\"cool!\")\r\n points += 3\r\n t.sleep(1)\r\n wb.open (\"https://www.youtube.com/watch?v=TvnYmWpD_T8\")\r\nelif color == \"Black\":\r\n pg.alert (\"ok...\")\r\n points -= 2\r\n t.sleep(2)\r\n wb.open (\"https://www.google.com/search?q=goth&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiJ-tDj-oreAhUpUt8KHWZsAzQQ_AUIDigB&biw=1366&bih=657#imgrc=odGcWJwuqRcJsM:\")\r\nelif color == \"Yellow\":\r\n pg.alert (\"Like a sunflower!\")\r\n points += 1\r\n t.sleep (1)\r\n wb.open (\"https://www.google.com/search?q=sunflower&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiZyKCTyZzeAhXGc98KHd8kDJ8Q_AUIDigB&biw=924&bih=639#imgrc=8kZ1NZp_9-nr5M:\")\r\nelif color == \"Brown\":\r\n pg.alert (\"wow.\")\r\n points -= 5\r\n t.sleep (1)\r\n wb.open (\"https://www.youtube.com/watch?v=dsJtgmAhFF4\")\r\nelse:\r\n pg.alert(\"nice\")\r\n points += 1\r\n t.sleep(2)\r\n wb.open (\"https://giphy.com/explore/rainbow\")\r\nsport = pg.prompt (\"What is your favorite sport? \").title()\r\nif sport == \"Hockey\":\r\n pg.alert (\"yep, I guess your cool\")\r\n points += 5\r\n t.sleep(2)\r\n wb.open (\"https://www.youtube.com/watch?v=JDnZTUkCOBQ\")\r\nelif sport == \"Soccer\":\r\n pg.alert (\"you mean futbol...\")\r\n points += 5\r\n t.sleep(2)\r\n wb.open (\"https://www.youtube.com/watch?v=K-U1ZgrsGGg\")\r\nelif sport == \"Lacrosse\":\r\n pg.alert (\" I used to play..\")\r\n points += 2\r\n t.sleep(2)\r\n wb.open (\"https://www.youtube.com/watch?v=o5hsPBsGD44\")\r\nelif sport == \"Football\":\r\n pg.alert (\"that cool.\")\r\n points += 4\r\n t.sleep(3)\r\n wb.open (\"https://www.google.com/search?q=football&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwimsOqj_IreAhUumeAKHd-FD6kQ_AUIDigB&biw=1366&bih=657#imgrc=GCqjPQ-jqckcfM:\")\r\nelif sport == \"Field Hockey\":\r\n pg.alert (\"Nice!\")\r\n points += 2\r\n t.sleep(3)\r\n wb.open (\"https://www.google.com/search?q=field+hockey&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwieus2jypzeAhWvVN8KHeK1CJ8Q_AUIDigB&biw=924&bih=639#imgrc=FCpGZY2CS5KVXM:\")\r\nelif sport == \"Surfing\":\r\n pg.alert (\"WOAH\")\r\n points += 7\r\n t.sleep(1)\r\n wb.open (\"https://www.youtube.com/watch?v=HBklS2vYEPo\")\r\nelse:\r\n pg.alert (\"cool\")\r\n points += 0\r\n t.sleep(2)\r\n wb.open (\"https://www.google.com/search?q=no+sports&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiGqOK-_IreAhXFneAKHcEGANIQ_AUIDigB&biw=1366&bih=657#imgrc=y7acx-yoEouoUM:\")\r\nsubject = pg.prompt (\"What is your favorite subject?\").title()\r\nif subject == \"Math\":\r\n pg.alert (\"so your a mathmatician\")\r\n points += 2\r\n t.sleep(3)\r\n wb.open (\"https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=HNvFW9yoDYTm_QbUyKzgDw&q=addiong&oq=addiong&gs_l=img.3..0i10i24.5226.6666..6852...1.0..0.56.417.8......0....1..gws-wiz-img.......0j0i67j0i10.kcqMNDR26RY#imgrc=LqznGvY1fJpCGM:\")\r\nelif subject == \"Computer science\":\r\n pg.alert (\"nice\")\r\n points += 9\r\n t.sleep(3)\r\n wb.open (\"https://www.google.com/search?q=computers&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiom6vv_IreAhUuneAKHXVGA4kQ_AUIDygC&biw=1366&bih=657\")\r\nelif subject == \"English\":\r\n pg.alert (\"I like it too.\")\r\n points += 3\r\n t.sleep(3)\r\n wb.open (\"https://www.google.com/search?rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&sa=1&ei=hNvFW4e3Jafp_QbR26mIDw&q=+book&oq=+book&gs_l=img.3..0i67l3j0j0i67j0l5.3464.3464..3690...0.0..0.51.51.1......0....1..gws-wiz-img.2n6KjdjVyU0\")\r\nelif subject == \"Science\":\r\n pg.alert (\"Bill Nye the Science Guy.\")\r\n points += 3\r\n t.sleep(2)\r\n wb.open(\"https://www.youtube.com/watch?v=nDN7M0J3HXc\")\r\nelif subject == \"Spanish\":\r\n pg.alert (\"Hola! Como estas?\")\r\n points += 3\r\n t.sleep(2)\r\n wb.open (\"https://www.google.com/search?hl=en&authuser=0&rlz=1C1GCEA_enUS752US774&tbm=isch&q=fiesta&chips=q:fiesta,online_chips:mexican+fiesta&usg=AI4_-kQGU87DySQyv0Aqat3pdqhIpYYwjA&sa=X&ved=0ahUKEwjzjvL6lq7eAhWpTd8KHQ6-CIoQ4lYIKygE&biw=924&bih=639&dpr=1#imgrc=6H_w7py8kTIUHM:\")\r\nelif subject == \"History\":\r\n pg.alert (\"In 1492 Christopher Columbus sailed the ocean blue\")\r\n points += 3\r\n t.sleep(2)\r\n wb.open (\"https://www.google.com/search?q=history&rlz=1C1GCEA_enUS752US774&biw=1366&bih=657&tbm=isch&source=lnms&sa=X&ved=0ahUKEwiZ_YDvutHeAhXOVN8KHdEUDEkQ_AUICygC\")\r\nelse:\r\n pg.alert (\"cool\")\r\n points += 1\r\n t.sleep(2)\r\n wb.open (\"https://www.google.com/search?q=school+gif&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwjqpI_f_YreAhWsd98KHblYBY8Q_AUIDigB&biw=1366&bih=657#imgrc=kk5pi12VrUoKGM:\")\r\n\r\nfood = pg.prompt (\"What is your favorite food?\").title()\r\nif food == \"Pizza\":\r\n pg.alert (\"Pizza Hut? Dominos?\")\r\n points += 2\r\n t.sleep(2)\r\n wb.open (\"https://cooking.nytimes.com/guides/1-how-to-make-pizza\")\r\nelif food == \"Chocolate cake\":\r\n pg.alert (\"Now I want one\")\r\n points += 9\r\n t.sleep(3)\r\n wb.open (\"https://www.youtube.com/watch?v=dsJtgmAhFF4\")\r\nelif food == \"Pasta\":\r\n pg.alert (\"I like pasta!\")\r\n points += 3\r\n t.sleep(3)\r\n wb.open (\"https://www.google.com/search?q=pasta&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwiH_JXSlK7eAhWKT98KHScQASEQ_AUIDigB&biw=924&bih=639\")\r\nelif food == \"Ice cream\":\r\n pg.alert (\"What kind? I like cookie monster.\")\r\n points += 3\r\n t.sleep(2)\r\n wb.open(\"https://barefeetinthekitchen.com/homemade-ice-cream-recipe/\")\r\nelif food == \"Fruit\":\r\n pg.alert (\"Refreshing!\")\r\n points += 3\r\n t.sleep(2)\r\n wb.open (\"https://www.google.com/search?q=fruit&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwijobOcla7eAhVyUt8KHfONDGUQ_AUIDigB&biw=924&bih=639#imgrc=ACrdFKwEzni-QM:\")\r\nelif food == \"Chicken\":\r\n pg.alert (\"Yum!\")\r\n points += 2\r\n t.sleep(2)\r\n wb.open (\"https://www.google.com/search?q=chicken&rlz=1C1GCEA_enUS752US774&source=lnms&tbm=isch&sa=X&ved=0ahUKEwj59fTCutHeAhXLct8KHRV6D88Q_AUIEygB&biw=1366&bih=657\")\r\nelse:\r\n pg.alert (\"YUUMMM\")\r\n points += 1\r\n t.sleep(2)\r\n wb.open (\"https://www.youtube.com/watch?v=11HK5EuYwSk\")\r\n\r\nmovie = pg.prompt (\"What is your favorite movie series?\").title()\r\nif \"Divergent\" in movie:\r\n number = pg.prompt(\"Which movie is your favorite\").title()\r\n\r\n if number == \"1\":\r\n pg.alert(\"Nice!\")\r\n\r\nice_cream = pg.confirm(\"Which of these flavors is your favorite?\", \"Choose one\", [\"chocolate\", \"vanilla\", \"cookies and cream\"])\r\nif ice_cream == \"cookies and cream\":\r\n pg.alert(\"YES\")\r\n\r\npg.alert (\"Your final score is \" + str(points))\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)
)
print(df2.sum())
<|reserved_special_token_1|>
<|reserved_special_token_0|>
df = pd.DataFrame([['Hospital1', '2019-10-01'], ['Hospital2', '2019-10-01'],
['Hospital3', '2019-10-01'], ['Hospital1', '2019-10-01'], ['Hospital2',
'2019-10-02'], ['Hospital3', '2019-10-02'], ['Hospital2', '2019-10-03'],
['Hospital2', '2019-10-04'], ['Hospital3', '2019-10-04'], ['Hospital3',
'2019-10-05'], ['Hospital1', '2019-10-06'], ['Hospital1', '2019-10-07'],
['Hospital1', '2019-10-08']], columns=['Hospital_Name', 'Date'])
df2 = pd.DataFrame([['Hospital1', 12, 15, 16, 12], ['Hospital2', 10, 17, 14,
12], ['Hospital2', 15, 20, 12, 12]], columns=['Hospital_Name',
'2019-10-01', '2019-10-02', '2019-10-03', '2019-10-04'])
print(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)
)
print(df2.sum())
<|reserved_special_token_1|>
import pandas as pd
import numpy as np
df = pd.DataFrame([['Hospital1', '2019-10-01'], ['Hospital2', '2019-10-01'],
['Hospital3', '2019-10-01'], ['Hospital1', '2019-10-01'], ['Hospital2',
'2019-10-02'], ['Hospital3', '2019-10-02'], ['Hospital2', '2019-10-03'],
['Hospital2', '2019-10-04'], ['Hospital3', '2019-10-04'], ['Hospital3',
'2019-10-05'], ['Hospital1', '2019-10-06'], ['Hospital1', '2019-10-07'],
['Hospital1', '2019-10-08']], columns=['Hospital_Name', 'Date'])
df2 = pd.DataFrame([['Hospital1', 12, 15, 16, 12], ['Hospital2', 10, 17, 14,
12], ['Hospital2', 15, 20, 12, 12]], columns=['Hospital_Name',
'2019-10-01', '2019-10-02', '2019-10-03', '2019-10-04'])
print(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)
)
print(df2.sum())
|
flexible
|
{
"blob_id": "8d8f1f0dbb76b5c536bd1a2142bb61c51dd75075",
"index": 9573,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)\n )\nprint(df2.sum())\n",
"step-3": "<mask token>\ndf = pd.DataFrame([['Hospital1', '2019-10-01'], ['Hospital2', '2019-10-01'],\n ['Hospital3', '2019-10-01'], ['Hospital1', '2019-10-01'], ['Hospital2',\n '2019-10-02'], ['Hospital3', '2019-10-02'], ['Hospital2', '2019-10-03'],\n ['Hospital2', '2019-10-04'], ['Hospital3', '2019-10-04'], ['Hospital3',\n '2019-10-05'], ['Hospital1', '2019-10-06'], ['Hospital1', '2019-10-07'],\n ['Hospital1', '2019-10-08']], columns=['Hospital_Name', 'Date'])\ndf2 = pd.DataFrame([['Hospital1', 12, 15, 16, 12], ['Hospital2', 10, 17, 14,\n 12], ['Hospital2', 15, 20, 12, 12]], columns=['Hospital_Name',\n '2019-10-01', '2019-10-02', '2019-10-03', '2019-10-04'])\nprint(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)\n )\nprint(df2.sum())\n",
"step-4": "import pandas as pd\nimport numpy as np\ndf = pd.DataFrame([['Hospital1', '2019-10-01'], ['Hospital2', '2019-10-01'],\n ['Hospital3', '2019-10-01'], ['Hospital1', '2019-10-01'], ['Hospital2',\n '2019-10-02'], ['Hospital3', '2019-10-02'], ['Hospital2', '2019-10-03'],\n ['Hospital2', '2019-10-04'], ['Hospital3', '2019-10-04'], ['Hospital3',\n '2019-10-05'], ['Hospital1', '2019-10-06'], ['Hospital1', '2019-10-07'],\n ['Hospital1', '2019-10-08']], columns=['Hospital_Name', 'Date'])\ndf2 = pd.DataFrame([['Hospital1', 12, 15, 16, 12], ['Hospital2', 10, 17, 14,\n 12], ['Hospital2', 15, 20, 12, 12]], columns=['Hospital_Name',\n '2019-10-01', '2019-10-02', '2019-10-03', '2019-10-04'])\nprint(pd.pivot_table(df, values='Date', index='Hospital_Name', aggfunc=np.size)\n )\nprint(df2.sum())\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import datetime
import logging
import os
from functools import lru_cache
from authlib.jose import JsonWebKey, jwt
from flask import g, request, jsonify
from lorem_ipsum.model import User, AppContext
import lorem_ipsum
from lorem_ipsum.model import Permission, BlacklistToken
LOGGER = logging.getLogger('lorem-ipsum')
def app_context():
if 'app_context' not in g:
g.app_context = lorem_ipsum.create_app_context()
return g.app_context
@lru_cache()
def get_jwk():
LOGGER.debug('Loading jwk from public key...')
key_data = None
with open(app_context().config['jwk_public_key_path'], 'rb') as _key_file:
key_data = _key_file.read()
LOGGER.debug(key_data)
key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})
_jwks = {'keys': [{**key.as_dict(), 'kid': 'demo_key'}]}
LOGGER.debug(_jwks)
return _jwks
class AuthenticationError(ValueError):
pass
class AuthorizationError(ValueError):
pass
class BearerTokenValidator:
def __init__(self, access_token, app_context: AppContext):
self.access_token = access_token
user_service = app_context.user_service
self.blacklist_token_repo = app_context.blacklist_token_repo
self.payload = user_service.decode_auth_token(access_token, get_jwk())
def check_is_blacklisted(self):
is_blacklisted_token = BlacklistToken.check_blacklist(self.access_token, self.blacklist_token_repo)
if is_blacklisted_token:
LOGGER.debug('Token blacklisted.')
raise AuthenticationError('Invalid token.')
return self
def check_username_claim(self):
if not self.payload.get('sub'):
LOGGER.debug('Token missing sub.')
raise AuthorizationError('Forbidden.')
return self
def check_user_exists(self, user):
if not user:
LOGGER.debug('Token user not found.')
raise AuthorizationError('Forbidden.')
return self
def check_has_permissions(self, user: User, permissions: list):
has_permissions = True
for permission in permissions:
if not user.role.has_permission(Permission.from_enum(permission)):
LOGGER.debug(f'Missing permission {permission}.')
has_permissions = False
LOGGER.debug(f'Required permissions: {permissions}')
if not has_permissions:
raise AuthorizationError('Forbidden.')
return self
@staticmethod
def from_authorization_header(authorization_header: str, app_context: AppContext):
if not authorization_header:
LOGGER.debug('Authorization header not found.')
raise AuthenticationError('Invalid token.')
if 'Bearer ' not in authorization_header:
LOGGER.debug('Bearer token not found.')
raise AuthenticationError('Invalid token.')
access_token = authorization_header.split('Bearer')[1].strip()
LOGGER.debug(f'Bearer token is:\n"{access_token}"')
return BearerTokenValidator(access_token, app_context)
def should_skip_auth(flask_request):
"""
Return true if should skip auth, e.g. when method is OPTIONS like when performing a React request.
:param flask_request: Flask request.
:return:
"""
return flask_request.method in ['HEAD', 'OPTIONS']
def requires_permission(permissions: list):
def requires_permission_decorator(function):
def wrapper(*args, **kwargs):
LOGGER.info(f'Authorization...\n{request.headers}')
if should_skip_auth(request):
return jsonify('ok')
authorization_header = request.headers.get('Authorization')
context = app_context()
with context.transaction_manager.transaction:
bearer_token_validator = BearerTokenValidator.from_authorization_header(authorization_header, context) \
.check_is_blacklisted() \
.check_username_claim()
user = context.user_repo.get(username=bearer_token_validator.payload['sub'])
bearer_token_validator.check_user_exists(user) \
.check_has_permissions(user, permissions)
g.access_token = bearer_token_validator.access_token
g.user = user
_result = function(*args, **kwargs)
return _result
wrapper.__name__ = function.__name__
return wrapper
return requires_permission_decorator
class ExceptionHandlers:
def __init__(self, app):
@app.errorhandler(AuthorizationError)
def handle_authorization_exception(e):
"""Return403 forbidden."""
return jsonify(str(e)), 403
@app.errorhandler(AuthenticationError)
def handle_authentication_exception(e):
"""Return401 authentication error."""
return jsonify(str(e)), 401
@lru_cache()
def jwk_key():
jwk_path = os.environ.get('jwk_private_key_path') or app_context().config['jwk_private_key_path']
with open(jwk_path, 'rb') as f:
key = JsonWebKey.import_key(f.read())
return key
def new_token(payload: dict):
key = jwk_key()
header = {'alg': 'RS256', 'kid': 'demo_key'}
token = jwt.encode(header, payload, key)
LOGGER.debug(token)
return token.decode('utf-8')
def issue_token_for_user(user: User):
access_token = new_token({
"iss": "lorem.ipsum.dev",
"aud": "lorem.ipsum.auth",
"sub": user.username,
"email": user.email,
"roles": [
user.role.name
],
"exp": datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(hours=4),
"iat": datetime.datetime.now(tz=datetime.timezone.utc)
})
return access_token
|
normal
|
{
"blob_id": "97d4387c7bfd141b5a7019b221adb550105d4351",
"index": 604,
"step-1": "<mask token>\n\n\nclass AuthorizationError(ValueError):\n pass\n\n\nclass BearerTokenValidator:\n\n def __init__(self, access_token, app_context: AppContext):\n self.access_token = access_token\n user_service = app_context.user_service\n self.blacklist_token_repo = app_context.blacklist_token_repo\n self.payload = user_service.decode_auth_token(access_token, get_jwk())\n\n def check_is_blacklisted(self):\n is_blacklisted_token = BlacklistToken.check_blacklist(self.\n access_token, self.blacklist_token_repo)\n if is_blacklisted_token:\n LOGGER.debug('Token blacklisted.')\n raise AuthenticationError('Invalid token.')\n return self\n\n def check_username_claim(self):\n if not self.payload.get('sub'):\n LOGGER.debug('Token missing sub.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_user_exists(self, user):\n if not user:\n LOGGER.debug('Token user not found.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_has_permissions(self, user: User, permissions: list):\n has_permissions = True\n for permission in permissions:\n if not user.role.has_permission(Permission.from_enum(permission)):\n LOGGER.debug(f'Missing permission {permission}.')\n has_permissions = False\n LOGGER.debug(f'Required permissions: {permissions}')\n if not has_permissions:\n raise AuthorizationError('Forbidden.')\n return self\n\n @staticmethod\n def from_authorization_header(authorization_header: str, app_context:\n AppContext):\n if not authorization_header:\n LOGGER.debug('Authorization header not found.')\n raise AuthenticationError('Invalid token.')\n if 'Bearer ' not in authorization_header:\n LOGGER.debug('Bearer token not found.')\n raise AuthenticationError('Invalid token.')\n access_token = authorization_header.split('Bearer')[1].strip()\n LOGGER.debug(f'Bearer token is:\\n\"{access_token}\"')\n return BearerTokenValidator(access_token, app_context)\n\n\n<mask token>\n\n\nclass ExceptionHandlers:\n\n def __init__(self, app):\n\n @app.errorhandler(AuthorizationError)\n def handle_authorization_exception(e):\n \"\"\"Return403 forbidden.\"\"\"\n return jsonify(str(e)), 403\n\n @app.errorhandler(AuthenticationError)\n def handle_authentication_exception(e):\n \"\"\"Return401 authentication error.\"\"\"\n return jsonify(str(e)), 401\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AuthenticationError(ValueError):\n pass\n\n\nclass AuthorizationError(ValueError):\n pass\n\n\nclass BearerTokenValidator:\n\n def __init__(self, access_token, app_context: AppContext):\n self.access_token = access_token\n user_service = app_context.user_service\n self.blacklist_token_repo = app_context.blacklist_token_repo\n self.payload = user_service.decode_auth_token(access_token, get_jwk())\n\n def check_is_blacklisted(self):\n is_blacklisted_token = BlacklistToken.check_blacklist(self.\n access_token, self.blacklist_token_repo)\n if is_blacklisted_token:\n LOGGER.debug('Token blacklisted.')\n raise AuthenticationError('Invalid token.')\n return self\n\n def check_username_claim(self):\n if not self.payload.get('sub'):\n LOGGER.debug('Token missing sub.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_user_exists(self, user):\n if not user:\n LOGGER.debug('Token user not found.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_has_permissions(self, user: User, permissions: list):\n has_permissions = True\n for permission in permissions:\n if not user.role.has_permission(Permission.from_enum(permission)):\n LOGGER.debug(f'Missing permission {permission}.')\n has_permissions = False\n LOGGER.debug(f'Required permissions: {permissions}')\n if not has_permissions:\n raise AuthorizationError('Forbidden.')\n return self\n\n @staticmethod\n def from_authorization_header(authorization_header: str, app_context:\n AppContext):\n if not authorization_header:\n LOGGER.debug('Authorization header not found.')\n raise AuthenticationError('Invalid token.')\n if 'Bearer ' not in authorization_header:\n LOGGER.debug('Bearer token not found.')\n raise AuthenticationError('Invalid token.')\n access_token = authorization_header.split('Bearer')[1].strip()\n LOGGER.debug(f'Bearer token is:\\n\"{access_token}\"')\n return BearerTokenValidator(access_token, app_context)\n\n\n<mask token>\n\n\nclass ExceptionHandlers:\n\n def __init__(self, app):\n\n @app.errorhandler(AuthorizationError)\n def handle_authorization_exception(e):\n \"\"\"Return403 forbidden.\"\"\"\n return jsonify(str(e)), 403\n\n @app.errorhandler(AuthenticationError)\n def handle_authentication_exception(e):\n \"\"\"Return401 authentication error.\"\"\"\n return jsonify(str(e)), 401\n\n\n<mask token>\n\n\ndef issue_token_for_user(user: User):\n access_token = new_token({'iss': 'lorem.ipsum.dev', 'aud':\n 'lorem.ipsum.auth', 'sub': user.username, 'email': user.email,\n 'roles': [user.role.name], 'exp': datetime.datetime.now(tz=datetime\n .timezone.utc) + datetime.timedelta(hours=4), 'iat': datetime.\n datetime.now(tz=datetime.timezone.utc)})\n return access_token\n",
"step-3": "<mask token>\n\n\ndef app_context():\n if 'app_context' not in g:\n g.app_context = lorem_ipsum.create_app_context()\n return g.app_context\n\n\n@lru_cache()\ndef get_jwk():\n LOGGER.debug('Loading jwk from public key...')\n key_data = None\n with open(app_context().config['jwk_public_key_path'], 'rb') as _key_file:\n key_data = _key_file.read()\n LOGGER.debug(key_data)\n key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})\n _jwks = {'keys': [{**key.as_dict(), 'kid': 'demo_key'}]}\n LOGGER.debug(_jwks)\n return _jwks\n\n\nclass AuthenticationError(ValueError):\n pass\n\n\nclass AuthorizationError(ValueError):\n pass\n\n\nclass BearerTokenValidator:\n\n def __init__(self, access_token, app_context: AppContext):\n self.access_token = access_token\n user_service = app_context.user_service\n self.blacklist_token_repo = app_context.blacklist_token_repo\n self.payload = user_service.decode_auth_token(access_token, get_jwk())\n\n def check_is_blacklisted(self):\n is_blacklisted_token = BlacklistToken.check_blacklist(self.\n access_token, self.blacklist_token_repo)\n if is_blacklisted_token:\n LOGGER.debug('Token blacklisted.')\n raise AuthenticationError('Invalid token.')\n return self\n\n def check_username_claim(self):\n if not self.payload.get('sub'):\n LOGGER.debug('Token missing sub.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_user_exists(self, user):\n if not user:\n LOGGER.debug('Token user not found.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_has_permissions(self, user: User, permissions: list):\n has_permissions = True\n for permission in permissions:\n if not user.role.has_permission(Permission.from_enum(permission)):\n LOGGER.debug(f'Missing permission {permission}.')\n has_permissions = False\n LOGGER.debug(f'Required permissions: {permissions}')\n if not has_permissions:\n raise AuthorizationError('Forbidden.')\n return self\n\n @staticmethod\n def from_authorization_header(authorization_header: str, app_context:\n AppContext):\n if not authorization_header:\n LOGGER.debug('Authorization header not found.')\n raise AuthenticationError('Invalid token.')\n if 'Bearer ' not in authorization_header:\n LOGGER.debug('Bearer token not found.')\n raise AuthenticationError('Invalid token.')\n access_token = authorization_header.split('Bearer')[1].strip()\n LOGGER.debug(f'Bearer token is:\\n\"{access_token}\"')\n return BearerTokenValidator(access_token, app_context)\n\n\ndef should_skip_auth(flask_request):\n \"\"\"\n Return true if should skip auth, e.g. when method is OPTIONS like when performing a React request.\n :param flask_request: Flask request.\n :return:\n \"\"\"\n return flask_request.method in ['HEAD', 'OPTIONS']\n\n\n<mask token>\n\n\nclass ExceptionHandlers:\n\n def __init__(self, app):\n\n @app.errorhandler(AuthorizationError)\n def handle_authorization_exception(e):\n \"\"\"Return403 forbidden.\"\"\"\n return jsonify(str(e)), 403\n\n @app.errorhandler(AuthenticationError)\n def handle_authentication_exception(e):\n \"\"\"Return401 authentication error.\"\"\"\n return jsonify(str(e)), 401\n\n\n@lru_cache()\ndef jwk_key():\n jwk_path = os.environ.get('jwk_private_key_path') or app_context().config[\n 'jwk_private_key_path']\n with open(jwk_path, 'rb') as f:\n key = JsonWebKey.import_key(f.read())\n return key\n\n\ndef new_token(payload: dict):\n key = jwk_key()\n header = {'alg': 'RS256', 'kid': 'demo_key'}\n token = jwt.encode(header, payload, key)\n LOGGER.debug(token)\n return token.decode('utf-8')\n\n\ndef issue_token_for_user(user: User):\n access_token = new_token({'iss': 'lorem.ipsum.dev', 'aud':\n 'lorem.ipsum.auth', 'sub': user.username, 'email': user.email,\n 'roles': [user.role.name], 'exp': datetime.datetime.now(tz=datetime\n .timezone.utc) + datetime.timedelta(hours=4), 'iat': datetime.\n datetime.now(tz=datetime.timezone.utc)})\n return access_token\n",
"step-4": "<mask token>\nLOGGER = logging.getLogger('lorem-ipsum')\n\n\ndef app_context():\n if 'app_context' not in g:\n g.app_context = lorem_ipsum.create_app_context()\n return g.app_context\n\n\n@lru_cache()\ndef get_jwk():\n LOGGER.debug('Loading jwk from public key...')\n key_data = None\n with open(app_context().config['jwk_public_key_path'], 'rb') as _key_file:\n key_data = _key_file.read()\n LOGGER.debug(key_data)\n key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})\n _jwks = {'keys': [{**key.as_dict(), 'kid': 'demo_key'}]}\n LOGGER.debug(_jwks)\n return _jwks\n\n\nclass AuthenticationError(ValueError):\n pass\n\n\nclass AuthorizationError(ValueError):\n pass\n\n\nclass BearerTokenValidator:\n\n def __init__(self, access_token, app_context: AppContext):\n self.access_token = access_token\n user_service = app_context.user_service\n self.blacklist_token_repo = app_context.blacklist_token_repo\n self.payload = user_service.decode_auth_token(access_token, get_jwk())\n\n def check_is_blacklisted(self):\n is_blacklisted_token = BlacklistToken.check_blacklist(self.\n access_token, self.blacklist_token_repo)\n if is_blacklisted_token:\n LOGGER.debug('Token blacklisted.')\n raise AuthenticationError('Invalid token.')\n return self\n\n def check_username_claim(self):\n if not self.payload.get('sub'):\n LOGGER.debug('Token missing sub.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_user_exists(self, user):\n if not user:\n LOGGER.debug('Token user not found.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_has_permissions(self, user: User, permissions: list):\n has_permissions = True\n for permission in permissions:\n if not user.role.has_permission(Permission.from_enum(permission)):\n LOGGER.debug(f'Missing permission {permission}.')\n has_permissions = False\n LOGGER.debug(f'Required permissions: {permissions}')\n if not has_permissions:\n raise AuthorizationError('Forbidden.')\n return self\n\n @staticmethod\n def from_authorization_header(authorization_header: str, app_context:\n AppContext):\n if not authorization_header:\n LOGGER.debug('Authorization header not found.')\n raise AuthenticationError('Invalid token.')\n if 'Bearer ' not in authorization_header:\n LOGGER.debug('Bearer token not found.')\n raise AuthenticationError('Invalid token.')\n access_token = authorization_header.split('Bearer')[1].strip()\n LOGGER.debug(f'Bearer token is:\\n\"{access_token}\"')\n return BearerTokenValidator(access_token, app_context)\n\n\ndef should_skip_auth(flask_request):\n \"\"\"\n Return true if should skip auth, e.g. when method is OPTIONS like when performing a React request.\n :param flask_request: Flask request.\n :return:\n \"\"\"\n return flask_request.method in ['HEAD', 'OPTIONS']\n\n\ndef requires_permission(permissions: list):\n\n def requires_permission_decorator(function):\n\n def wrapper(*args, **kwargs):\n LOGGER.info(f'Authorization...\\n{request.headers}')\n if should_skip_auth(request):\n return jsonify('ok')\n authorization_header = request.headers.get('Authorization')\n context = app_context()\n with context.transaction_manager.transaction:\n bearer_token_validator = (BearerTokenValidator.\n from_authorization_header(authorization_header, context\n ).check_is_blacklisted().check_username_claim())\n user = context.user_repo.get(username=\n bearer_token_validator.payload['sub'])\n bearer_token_validator.check_user_exists(user\n ).check_has_permissions(user, permissions)\n g.access_token = bearer_token_validator.access_token\n g.user = user\n _result = function(*args, **kwargs)\n return _result\n wrapper.__name__ = function.__name__\n return wrapper\n return requires_permission_decorator\n\n\nclass ExceptionHandlers:\n\n def __init__(self, app):\n\n @app.errorhandler(AuthorizationError)\n def handle_authorization_exception(e):\n \"\"\"Return403 forbidden.\"\"\"\n return jsonify(str(e)), 403\n\n @app.errorhandler(AuthenticationError)\n def handle_authentication_exception(e):\n \"\"\"Return401 authentication error.\"\"\"\n return jsonify(str(e)), 401\n\n\n@lru_cache()\ndef jwk_key():\n jwk_path = os.environ.get('jwk_private_key_path') or app_context().config[\n 'jwk_private_key_path']\n with open(jwk_path, 'rb') as f:\n key = JsonWebKey.import_key(f.read())\n return key\n\n\ndef new_token(payload: dict):\n key = jwk_key()\n header = {'alg': 'RS256', 'kid': 'demo_key'}\n token = jwt.encode(header, payload, key)\n LOGGER.debug(token)\n return token.decode('utf-8')\n\n\ndef issue_token_for_user(user: User):\n access_token = new_token({'iss': 'lorem.ipsum.dev', 'aud':\n 'lorem.ipsum.auth', 'sub': user.username, 'email': user.email,\n 'roles': [user.role.name], 'exp': datetime.datetime.now(tz=datetime\n .timezone.utc) + datetime.timedelta(hours=4), 'iat': datetime.\n datetime.now(tz=datetime.timezone.utc)})\n return access_token\n",
"step-5": "import datetime\nimport logging\nimport os\nfrom functools import lru_cache\nfrom authlib.jose import JsonWebKey, jwt\n\nfrom flask import g, request, jsonify\nfrom lorem_ipsum.model import User, AppContext\nimport lorem_ipsum\nfrom lorem_ipsum.model import Permission, BlacklistToken\n\nLOGGER = logging.getLogger('lorem-ipsum')\n\n\ndef app_context():\n if 'app_context' not in g:\n g.app_context = lorem_ipsum.create_app_context()\n return g.app_context\n\n\n@lru_cache()\ndef get_jwk():\n LOGGER.debug('Loading jwk from public key...')\n key_data = None\n with open(app_context().config['jwk_public_key_path'], 'rb') as _key_file:\n key_data = _key_file.read()\n LOGGER.debug(key_data)\n key = JsonWebKey.import_key(key_data, {'kty': 'RSA'})\n _jwks = {'keys': [{**key.as_dict(), 'kid': 'demo_key'}]}\n LOGGER.debug(_jwks)\n return _jwks\n\n\nclass AuthenticationError(ValueError):\n pass\n\n\nclass AuthorizationError(ValueError):\n pass\n\n\nclass BearerTokenValidator:\n def __init__(self, access_token, app_context: AppContext):\n self.access_token = access_token\n user_service = app_context.user_service\n self.blacklist_token_repo = app_context.blacklist_token_repo\n self.payload = user_service.decode_auth_token(access_token, get_jwk())\n\n def check_is_blacklisted(self):\n is_blacklisted_token = BlacklistToken.check_blacklist(self.access_token, self.blacklist_token_repo)\n if is_blacklisted_token:\n LOGGER.debug('Token blacklisted.')\n raise AuthenticationError('Invalid token.')\n return self\n\n def check_username_claim(self):\n if not self.payload.get('sub'):\n LOGGER.debug('Token missing sub.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_user_exists(self, user):\n if not user:\n LOGGER.debug('Token user not found.')\n raise AuthorizationError('Forbidden.')\n return self\n\n def check_has_permissions(self, user: User, permissions: list):\n has_permissions = True\n for permission in permissions:\n if not user.role.has_permission(Permission.from_enum(permission)):\n LOGGER.debug(f'Missing permission {permission}.')\n has_permissions = False\n LOGGER.debug(f'Required permissions: {permissions}')\n if not has_permissions:\n raise AuthorizationError('Forbidden.')\n return self\n\n @staticmethod\n def from_authorization_header(authorization_header: str, app_context: AppContext):\n if not authorization_header:\n LOGGER.debug('Authorization header not found.')\n raise AuthenticationError('Invalid token.')\n if 'Bearer ' not in authorization_header:\n LOGGER.debug('Bearer token not found.')\n raise AuthenticationError('Invalid token.')\n access_token = authorization_header.split('Bearer')[1].strip()\n LOGGER.debug(f'Bearer token is:\\n\"{access_token}\"')\n return BearerTokenValidator(access_token, app_context)\n\n\ndef should_skip_auth(flask_request):\n \"\"\"\n Return true if should skip auth, e.g. when method is OPTIONS like when performing a React request.\n :param flask_request: Flask request.\n :return:\n \"\"\"\n return flask_request.method in ['HEAD', 'OPTIONS']\n\n\ndef requires_permission(permissions: list):\n def requires_permission_decorator(function):\n def wrapper(*args, **kwargs):\n LOGGER.info(f'Authorization...\\n{request.headers}')\n if should_skip_auth(request):\n return jsonify('ok')\n authorization_header = request.headers.get('Authorization')\n context = app_context()\n with context.transaction_manager.transaction:\n bearer_token_validator = BearerTokenValidator.from_authorization_header(authorization_header, context) \\\n .check_is_blacklisted() \\\n .check_username_claim()\n user = context.user_repo.get(username=bearer_token_validator.payload['sub'])\n\n bearer_token_validator.check_user_exists(user) \\\n .check_has_permissions(user, permissions)\n g.access_token = bearer_token_validator.access_token\n g.user = user\n\n _result = function(*args, **kwargs)\n return _result\n\n wrapper.__name__ = function.__name__\n return wrapper\n\n return requires_permission_decorator\n\n\nclass ExceptionHandlers:\n def __init__(self, app):\n @app.errorhandler(AuthorizationError)\n def handle_authorization_exception(e):\n \"\"\"Return403 forbidden.\"\"\"\n return jsonify(str(e)), 403\n\n @app.errorhandler(AuthenticationError)\n def handle_authentication_exception(e):\n \"\"\"Return401 authentication error.\"\"\"\n return jsonify(str(e)), 401\n\n\n@lru_cache()\ndef jwk_key():\n jwk_path = os.environ.get('jwk_private_key_path') or app_context().config['jwk_private_key_path']\n with open(jwk_path, 'rb') as f:\n key = JsonWebKey.import_key(f.read())\n return key\n\n\ndef new_token(payload: dict):\n key = jwk_key()\n header = {'alg': 'RS256', 'kid': 'demo_key'}\n token = jwt.encode(header, payload, key)\n LOGGER.debug(token)\n return token.decode('utf-8')\n\n\ndef issue_token_for_user(user: User):\n access_token = new_token({\n \"iss\": \"lorem.ipsum.dev\",\n \"aud\": \"lorem.ipsum.auth\",\n \"sub\": user.username,\n \"email\": user.email,\n \"roles\": [\n user.role.name\n ],\n \"exp\": datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(hours=4),\n \"iat\": datetime.datetime.now(tz=datetime.timezone.utc)\n })\n return access_token\n",
"step-ids": [
10,
12,
17,
19,
21
]
}
|
[
10,
12,
17,
19,
21
] |
#!/usr/bin/python3
"""display your id from github.
"""
from sys import argv
import requests
if __name__ == "__main__":
get = requests.get('https://api.github.com/user',
auth=(argv[1], argv[2])).json().get('id')
print(get)
|
normal
|
{
"blob_id": "8280f321b102cace462761f9ece2aebf9e28a432",
"index": 3941,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n get = requests.get('https://api.github.com/user', auth=(argv[1], argv[2])\n ).json().get('id')\n print(get)\n",
"step-3": "<mask token>\nfrom sys import argv\nimport requests\nif __name__ == '__main__':\n get = requests.get('https://api.github.com/user', auth=(argv[1], argv[2])\n ).json().get('id')\n print(get)\n",
"step-4": "#!/usr/bin/python3\n\"\"\"display your id from github.\n\"\"\"\nfrom sys import argv\nimport requests\n\n\nif __name__ == \"__main__\":\n get = requests.get('https://api.github.com/user',\n auth=(argv[1], argv[2])).json().get('id')\n print(get)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
import os
from django.conf import settings
BASE_DIR=os.path.dirname(__file__)
settings.configure(
DEBUG=True,
SECRET_KEY='ki==706e99f0ps9w5s*!kx%1^=5jq_k1c&4r@#e&ng9=xlm5_',
ROOT_URLCONF='sitebuilder.urls',
MIDDLEWARE_CLASSES=(),
INSTALLED_APPS=(
'django.contrib.staticfiles',
'django.contrib.webdesign',
'sitebuilder',
'compressor',
),
STATIC_URL='/static/',
SITE_PAGES_DIRECTORY=os.path.join(BASE_DIR,'pages'),
SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR,'_build'),
STATIC_ROOT=os.path.join(BASE_DIR,'_build','static'),
#STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage',
STATICFILES_FINDERS=(
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
)
if __name__ =="__main__":
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
normal
|
{
"blob_id": "d30e5e24dd06a4846fdde3c9fcac0a5dac55ad0d",
"index": 5916,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsettings.configure(DEBUG=True, SECRET_KEY=\n 'ki==706e99f0ps9w5s*!kx%1^=5jq_k1c&4r@#e&ng9=xlm5_', ROOT_URLCONF=\n 'sitebuilder.urls', MIDDLEWARE_CLASSES=(), INSTALLED_APPS=(\n 'django.contrib.staticfiles', 'django.contrib.webdesign', 'sitebuilder',\n 'compressor'), STATIC_URL='/static/', SITE_PAGES_DIRECTORY=os.path.join\n (BASE_DIR, 'pages'), SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR,\n '_build'), STATIC_ROOT=os.path.join(BASE_DIR, '_build', 'static'),\n STATICFILES_FINDERS=(\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder'))\nif __name__ == '__main__':\n from django.core.management import execute_from_command_line\n execute_from_command_line(sys.argv)\n",
"step-3": "<mask token>\nBASE_DIR = os.path.dirname(__file__)\nsettings.configure(DEBUG=True, SECRET_KEY=\n 'ki==706e99f0ps9w5s*!kx%1^=5jq_k1c&4r@#e&ng9=xlm5_', ROOT_URLCONF=\n 'sitebuilder.urls', MIDDLEWARE_CLASSES=(), INSTALLED_APPS=(\n 'django.contrib.staticfiles', 'django.contrib.webdesign', 'sitebuilder',\n 'compressor'), STATIC_URL='/static/', SITE_PAGES_DIRECTORY=os.path.join\n (BASE_DIR, 'pages'), SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR,\n '_build'), STATIC_ROOT=os.path.join(BASE_DIR, '_build', 'static'),\n STATICFILES_FINDERS=(\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder'))\nif __name__ == '__main__':\n from django.core.management import execute_from_command_line\n execute_from_command_line(sys.argv)\n",
"step-4": "import sys\nimport os\nfrom django.conf import settings\nBASE_DIR = os.path.dirname(__file__)\nsettings.configure(DEBUG=True, SECRET_KEY=\n 'ki==706e99f0ps9w5s*!kx%1^=5jq_k1c&4r@#e&ng9=xlm5_', ROOT_URLCONF=\n 'sitebuilder.urls', MIDDLEWARE_CLASSES=(), INSTALLED_APPS=(\n 'django.contrib.staticfiles', 'django.contrib.webdesign', 'sitebuilder',\n 'compressor'), STATIC_URL='/static/', SITE_PAGES_DIRECTORY=os.path.join\n (BASE_DIR, 'pages'), SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR,\n '_build'), STATIC_ROOT=os.path.join(BASE_DIR, '_build', 'static'),\n STATICFILES_FINDERS=(\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder'))\nif __name__ == '__main__':\n from django.core.management import execute_from_command_line\n execute_from_command_line(sys.argv)\n",
"step-5": "import sys\nimport os\n\nfrom django.conf import settings\n\nBASE_DIR=os.path.dirname(__file__)\n\nsettings.configure(\n\tDEBUG=True,\n\tSECRET_KEY='ki==706e99f0ps9w5s*!kx%1^=5jq_k1c&4r@#e&ng9=xlm5_',\n\tROOT_URLCONF='sitebuilder.urls',\n\tMIDDLEWARE_CLASSES=(),\n\tINSTALLED_APPS=(\n\t\t'django.contrib.staticfiles',\n\t\t'django.contrib.webdesign',\n\t\t'sitebuilder',\n\t\t'compressor',\n\n\t\t),\n\tSTATIC_URL='/static/',\n\tSITE_PAGES_DIRECTORY=os.path.join(BASE_DIR,'pages'),\n\tSITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR,'_build'),\n\tSTATIC_ROOT=os.path.join(BASE_DIR,'_build','static'),\n\t#STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage',\n\tSTATICFILES_FINDERS=(\n\t\t'django.contrib.staticfiles.finders.FileSystemFinder',\n\t\t'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n\t\t'compressor.finders.CompressorFinder',\n\n\t\t)\n\n)\n\nif __name__ ==\"__main__\":\n\tfrom django.core.management import execute_from_command_line\n\n\texecute_from_command_line(sys.argv)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# This package includes different measures to evaluate topics
|
normal
|
{
"blob_id": "3dcca85c8003b57ad37734bbbe171ab8cef0f56c",
"index": 1894,
"step-1": "# This package includes different measures to evaluate topics\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.