code
stringlengths
13
6.09M
order_type
stringclasses
2 values
original_example
dict
step_ids
listlengths
1
5
# Spelling bee NYT puzzle solver with open('words.txt') as words_fh: # Converts strips and lowercases lexicon (space seperated txt file) # Use set to remove duplicates (decasing) lexicon = set(list(map(lambda x: x.strip().lower(), words_fh.readlines()))) # NOTE: Could add a CLI to allow users to input this. Manual edits are the way for now MANDATORY_LETTER = 'l' LETTERS = set(['t','i','e','v','p','x'] + [MANDATORY_LETTER]) # Search for valid words valid_words = [word for word in lexicon if set(word).issubset(LETTERS) and MANDATORY_LETTER in set(word) and len(word) >= 4] sorted_valid_words = sorted(valid_words, key=lambda x: len(x)) print(sorted_valid_words)
normal
{ "blob_id": "aacd5d671090c3305a53d62c3c6c25d4c033f42d", "index": 6420, "step-1": "<mask token>\n", "step-2": "with open('words.txt') as words_fh:\n lexicon = set(list(map(lambda x: x.strip().lower(), words_fh.readlines())))\n<mask token>\nprint(sorted_valid_words)\n", "step-3": "with open('words.txt') as words_fh:\n lexicon = set(list(map(lambda x: x.strip().lower(), words_fh.readlines())))\nMANDATORY_LETTER = 'l'\nLETTERS = set(['t', 'i', 'e', 'v', 'p', 'x'] + [MANDATORY_LETTER])\nvalid_words = [word for word in lexicon if set(word).issubset(LETTERS) and \n MANDATORY_LETTER in set(word) and len(word) >= 4]\nsorted_valid_words = sorted(valid_words, key=lambda x: len(x))\nprint(sorted_valid_words)\n", "step-4": "# Spelling bee NYT puzzle solver\r\n\r\nwith open('words.txt') as words_fh:\r\n # Converts strips and lowercases lexicon (space seperated txt file)\r\n # Use set to remove duplicates (decasing)\r\n\tlexicon = set(list(map(lambda x: x.strip().lower(), words_fh.readlines())))\r\n\r\n# NOTE: Could add a CLI to allow users to input this. Manual edits are the way for now\r\nMANDATORY_LETTER = 'l'\r\nLETTERS = set(['t','i','e','v','p','x'] + [MANDATORY_LETTER])\r\n\r\n# Search for valid words \r\nvalid_words = [word for word in lexicon if set(word).issubset(LETTERS) and MANDATORY_LETTER in set(word) and len(word) >= 4]\r\nsorted_valid_words = sorted(valid_words, key=lambda x: len(x))\r\nprint(sorted_valid_words)", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class ReflecBeatColette(BaseClient): <|reserved_special_token_0|> def verify_pcb_boot(self, loc: str) ->None: call = self.call_node() pcb = Node.void('pcb') pcb.set_attribute('method', 'boot') pcb.add_child(Node.string('lid', loc)) call.add_child(pcb) resp = self.exchange('', call) self.assert_path(resp, 'response/pcb/sinfo/nm') self.assert_path(resp, 'response/pcb/sinfo/cl_enbl') self.assert_path(resp, 'response/pcb/sinfo/cl_h') self.assert_path(resp, 'response/pcb/sinfo/cl_m') <|reserved_special_token_0|> def verify_info_ranking(self) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'ranking') info.add_child(Node.s32('ver', 0)) call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info/ver') self.assert_path(resp, 'response/info/ranking/weekly/bt') self.assert_path(resp, 'response/info/ranking/weekly/et') self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid') self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt') self.assert_path(resp, 'response/info/ranking/monthly/bt') self.assert_path(resp, 'response/info/ranking/monthly/et') self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid') self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt') self.assert_path(resp, 'response/info/ranking/total/bt') self.assert_path(resp, 'response/info/ranking/total/et') self.assert_path(resp, 'response/info/ranking/total/new/d/mid') self.assert_path(resp, 'response/info/ranking/total/new/d/cnt') <|reserved_special_token_0|> def verify_player_delete(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'delete') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player') def verify_player_end(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'end') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player') def verify_player_succeed(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'succeed') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player/name') self.assert_path(resp, 'response/player/lv') self.assert_path(resp, 'response/player/exp') self.assert_path(resp, 'response/player/grd') self.assert_path(resp, 'response/player/ap') self.assert_path(resp, 'response/player/released') self.assert_path(resp, 'response/player/mrecord') def verify_player_read(self, refid: str, location: str) ->List[Dict[str, int]]: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'read') player.add_child(Node.string('rid', refid)) player.add_child(Node.string('lid', location)) player.add_child(Node.s16('ver', 5)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player/pdata/account/usrid') self.assert_path(resp, 'response/player/pdata/account/tpc') self.assert_path(resp, 'response/player/pdata/account/dpc') self.assert_path(resp, 'response/player/pdata/account/crd') self.assert_path(resp, 'response/player/pdata/account/brd') self.assert_path(resp, 'response/player/pdata/account/tdc') self.assert_path(resp, 'response/player/pdata/account/intrvld') self.assert_path(resp, 'response/player/pdata/account/ver') self.assert_path(resp, 'response/player/pdata/account/pst') self.assert_path(resp, 'response/player/pdata/account/st') self.assert_path(resp, 'response/player/pdata/base/name') self.assert_path(resp, 'response/player/pdata/base/exp') self.assert_path(resp, 'response/player/pdata/base/lv') self.assert_path(resp, 'response/player/pdata/base/mg') self.assert_path(resp, 'response/player/pdata/base/ap') self.assert_path(resp, 'response/player/pdata/base/tid') self.assert_path(resp, 'response/player/pdata/base/tname') self.assert_path(resp, 'response/player/pdata/base/cmnt') self.assert_path(resp, 'response/player/pdata/base/uattr') self.assert_path(resp, 'response/player/pdata/base/hidden_param') self.assert_path(resp, 'response/player/pdata/base/tbs') self.assert_path(resp, 'response/player/pdata/base/tbs_r') self.assert_path(resp, 'response/player/pdata/rival') self.assert_path(resp, 'response/player/pdata/fav_music_slot') self.assert_path(resp, 'response/player/pdata/custom') self.assert_path(resp, 'response/player/pdata/config') self.assert_path(resp, 'response/player/pdata/stamp') self.assert_path(resp, 'response/player/pdata/released') self.assert_path(resp, 'response/player/pdata/record') if resp.child_value('player/pdata/base/name') != self.NAME: raise Exception('Invalid name {} returned on profile read!'. format(resp.child_value('player/pdata/base/name'))) scores = [] for child in resp.child('player/pdata/record').children: if child.name != 'rec': continue score = {'id': child.child_value('mid'), 'chart': child. child_value('ntgrd'), 'clear_type': child.child_value('ct'), 'achievement_rate': child.child_value('ar'), 'score': child .child_value('scr'), 'combo': child.child_value('cmb'), 'miss_count': child.child_value('ms')} scores.append(score) return scores def verify_player_write(self, refid: str, loc: str, scores: List[Dict[ str, int]]) ->int: call = self.call_node() player = Node.void('player') call.add_child(player) player.set_attribute('method', 'write') pdata = Node.void('pdata') player.add_child(pdata) account = Node.void('account') pdata.add_child(account) account.add_child(Node.s32('usrid', 0)) account.add_child(Node.s32('plyid', 0)) account.add_child(Node.s32('tpc', 1)) account.add_child(Node.s32('dpc', 1)) account.add_child(Node.s32('crd', 1)) account.add_child(Node.s32('brd', 1)) account.add_child(Node.s32('tdc', 1)) account.add_child(Node.string('rid', refid)) account.add_child(Node.string('lid', loc)) account.add_child(Node.u8('mode', 0)) account.add_child(Node.s16('ver', 5)) account.add_child(Node.bool('pp', True)) account.add_child(Node.bool('ps', True)) account.add_child(Node.s16('pay', 0)) account.add_child(Node.s16('pay_pc', 0)) account.add_child(Node.u64('st', int(time.time() * 1000))) base = Node.void('base') pdata.add_child(base) base.add_child(Node.string('name', self.NAME)) base.add_child(Node.s32('exp', 0)) base.add_child(Node.s32('lv', 1)) base.add_child(Node.s32('mg', -1)) base.add_child(Node.s32('ap', -1)) base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])) base.add_child(Node.bool('is_tut', True)) stglog = Node.void('stglog') pdata.add_child(stglog) index = 0 for score in scores: log = Node.void('log') stglog.add_child(log) log.add_child(Node.s8('stg', index)) log.add_child(Node.s16('mid', score['id'])) log.add_child(Node.s8('ng', score['chart'])) log.add_child(Node.s8('col', 0)) log.add_child(Node.s8('mt', 7)) log.add_child(Node.s8('rt', 0)) log.add_child(Node.s8('ct', score['clear_type'])) log.add_child(Node.s16('grd', 0)) log.add_child(Node.s16('ar', score['achievement_rate'])) log.add_child(Node.s16('sc', score['score'])) log.add_child(Node.s16('jt_jst', 0)) log.add_child(Node.s16('jt_grt', 0)) log.add_child(Node.s16('jt_gd', 0)) log.add_child(Node.s16('jt_ms', score['miss_count'])) log.add_child(Node.s16('jt_jr', 0)) log.add_child(Node.s16('cmb', score['combo'])) log.add_child(Node.s16('exp', 0)) log.add_child(Node.s32('r_uid', 0)) log.add_child(Node.s32('r_plyid', 0)) log.add_child(Node.s8('r_stg', 0)) log.add_child(Node.s8('r_ct', -1)) log.add_child(Node.s16('r_sc', 0)) log.add_child(Node.s16('r_grd', 0)) log.add_child(Node.s16('r_ar', 0)) log.add_child(Node.s8('r_cpuid', -1)) log.add_child(Node.s32('time', int(time.time()))) log.add_child(Node.s8('decide', 0)) index = index + 1 resp = self.exchange('', call) self.assert_path(resp, 'response/player/uid') return resp.child_value('player/uid') <|reserved_special_token_0|> def verify_lobby_entry(self, location: str, extid: int) ->int: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'entry') e = Node.void('e') lobby.add_child(e) e.add_child(Node.s32('eid', 0)) e.add_child(Node.u16('mid', 79)) e.add_child(Node.u8('ng', 0)) e.add_child(Node.s32('uid', extid)) e.add_child(Node.s32('uattr', 0)) e.add_child(Node.string('pn', self.NAME)) e.add_child(Node.s16('mg', 255)) e.add_child(Node.s32('mopt', 0)) e.add_child(Node.s32('tid', 0)) e.add_child(Node.string('tn', '')) e.add_child(Node.s32('topt', 0)) e.add_child(Node.string('lid', location)) e.add_child(Node.string('sn', '')) e.add_child(Node.u8('pref', 51)) e.add_child(Node.s8('stg', 4)) e.add_child(Node.s8('pside', 0)) e.add_child(Node.s16('eatime', 30)) e.add_child(Node.u8_array('ga', [127, 0, 0, 1])) e.add_child(Node.u16('gp', 10007)) e.add_child(Node.u8_array('la', [16, 0, 0, 0])) e.add_child(Node.u8('ver', 5)) lobby.add_child(Node.s32_array('friend', [])) call.add_child(lobby) resp = self.exchange('', call) self.assert_path(resp, 'response/lobby/interval') self.assert_path(resp, 'response/lobby/interval_p') self.assert_path(resp, 'response/lobby/eid') self.assert_path(resp, 'response/lobby/e/eid') self.assert_path(resp, 'response/lobby/e/mid') self.assert_path(resp, 'response/lobby/e/ng') self.assert_path(resp, 'response/lobby/e/uid') self.assert_path(resp, 'response/lobby/e/uattr') self.assert_path(resp, 'response/lobby/e/pn') self.assert_path(resp, 'response/lobby/e/mg') self.assert_path(resp, 'response/lobby/e/mopt') self.assert_path(resp, 'response/lobby/e/tid') self.assert_path(resp, 'response/lobby/e/tn') self.assert_path(resp, 'response/lobby/e/topt') self.assert_path(resp, 'response/lobby/e/lid') self.assert_path(resp, 'response/lobby/e/sn') self.assert_path(resp, 'response/lobby/e/pref') self.assert_path(resp, 'response/lobby/e/stg') self.assert_path(resp, 'response/lobby/e/pside') self.assert_path(resp, 'response/lobby/e/eatime') self.assert_path(resp, 'response/lobby/e/ga') self.assert_path(resp, 'response/lobby/e/gp') self.assert_path(resp, 'response/lobby/e/la') self.assert_path(resp, 'response/lobby/e/ver') return resp.child_value('lobby/eid') def verify_lobby_delete(self, eid: int) ->None: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'delete') lobby.add_child(Node.s32('eid', eid)) call.add_child(lobby) resp = self.exchange('', call) self.assert_path(resp, 'response/lobby') def verify_pzlcmt_read(self, extid: int) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'pzlcmt_read') info.add_child(Node.s32('uid', extid)) info.add_child(Node.s32('tid', 0)) info.add_child(Node.s32('time', 0)) info.add_child(Node.s32('limit', 30)) call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info/comment/time') self.assert_path(resp, 'response/info/c/uid') self.assert_path(resp, 'response/info/c/name') self.assert_path(resp, 'response/info/c/icon') self.assert_path(resp, 'response/info/c/bln') self.assert_path(resp, 'response/info/c/tid') self.assert_path(resp, 'response/info/c/t_name') self.assert_path(resp, 'response/info/c/pref') self.assert_path(resp, 'response/info/c/time') self.assert_path(resp, 'response/info/c/comment') self.assert_path(resp, 'response/info/c/is_tweet') found = False for child in resp.child('info').children: if child.name != 'c': continue if child.child_value('uid') == extid: name = child.child_value('name') comment = child.child_value('comment') if name != self.NAME: raise Exception("Invalid name '{}' returned for comment!" .format(name)) if comment != 'アメ〜〜!': raise Exception( "Invalid comment '{}' returned for comment!".format (comment)) found = True if not found: raise Exception('Comment we posted was not found!') <|reserved_special_token_0|> def verify_jbrbcollabo_save(self, refid: str) ->None: call = self.call_node() jbrbcollabo = Node.void('jbrbcollabo') jbrbcollabo.set_attribute('method', 'save') jbrbcollabo.add_child(Node.string('ref_id', refid)) jbrbcollabo.add_child(Node.u16('cre_count', 0)) call.add_child(jbrbcollabo) resp = self.exchange('', call) self.assert_path(resp, 'response/jbrbcollabo') def verify(self, cardid: Optional[str]) ->None: self.verify_services_get(expected_services=['pcbtracker', 'pcbevent', 'local', 'message', 'facility', 'cardmng', 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby', 'ntp', 'keepalive']) paseli_enabled = self.verify_pcbtracker_alive() self.verify_message_get() self.verify_package_list() location = self.verify_facility_get() self.verify_pcbevent_put() self.verify_pcb_boot(location) self.verify_info_common() if cardid is not None: card = cardid else: card = self.random_card() print('Generated random card ID {} for use.'.format(card)) if cardid is None: self.verify_cardmng_inquire(card, msg_type='unregistered', paseli_enabled=paseli_enabled) ref_id = self.verify_cardmng_getrefid(card) if len(ref_id) != 16: raise Exception( "Invalid refid '{}' returned when registering card". format(ref_id)) if ref_id != self.verify_cardmng_inquire(card, msg_type='new', paseli_enabled=paseli_enabled): raise Exception( "Invalid refid '{}' returned when querying card".format (ref_id)) self.verify_player_start(ref_id) self.verify_player_delete(ref_id) self.verify_player_succeed(ref_id) extid = self.verify_player_write(ref_id, location, [{'id': 0, 'chart': 0, 'clear_type': -1, 'achievement_rate': 0, 'score': 0, 'combo': 0, 'miss_count': 0}]) else: print('Skipping new card checks for existing card') ref_id = self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled) self.verify_cardmng_authpass(ref_id, correct=True) self.verify_cardmng_authpass(ref_id, correct=False) if ref_id != self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled): raise Exception("Invalid refid '{}' returned when querying card" .format(ref_id)) self.verify_lobby_read(location, extid) eid = self.verify_lobby_entry(location, extid) self.verify_lobby_delete(eid) self.verify_pzlcmt_write(extid) self.verify_pzlcmt_read(extid) self.verify_jbrbcollabo_save(ref_id) if cardid is None: for phase in [1, 2]: if phase == 1: dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2, 'achievement_rate': 7543, 'score': 432, 'combo': 123, 'miss_count': 5}, {'id': 1, 'chart': 0, 'clear_type': 4, 'achievement_rate': 9876, 'score': 543, 'combo': 543, 'miss_count': 0}, {'id': 3, 'chart': 2, 'clear_type': 2, 'achievement_rate': 1234, 'score': 123, 'combo': 42, 'miss_count': 54}, {'id': 3, 'chart': 0, 'clear_type': 2, 'achievement_rate': 1024, 'score': 50, 'combo': 12, 'miss_count': 90}] if phase == 2: dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3, 'achievement_rate': 8765, 'score': 469, 'combo': 468, 'miss_count': 1}, {'id': 1, 'chart': 0, 'clear_type': 2, 'achievement_rate': 8765, 'score': 432, 'combo': 321, 'miss_count': 15, 'expected_score': 543, 'expected_clear_type': 4, 'expected_achievement_rate': 9876, 'expected_combo': 543, 'expected_miss_count': 0}] self.verify_player_write(ref_id, location, dummyscores) scores = self.verify_player_read(ref_id, location) for expected in dummyscores: actual = None for received in scores: if received['id'] == expected['id'] and received[ 'chart'] == expected['chart']: actual = received break if actual is None: raise Exception( "Didn't find song {} chart {} in response!". format(expected['id'], expected['chart'])) if 'expected_score' in expected: expected_score = expected['expected_score'] else: expected_score = expected['score'] if 'expected_achievement_rate' in expected: expected_achievement_rate = expected[ 'expected_achievement_rate'] else: expected_achievement_rate = expected['achievement_rate' ] if 'expected_clear_type' in expected: expected_clear_type = expected['expected_clear_type'] else: expected_clear_type = expected['clear_type'] if 'expected_combo' in expected: expected_combo = expected['expected_combo'] else: expected_combo = expected['combo'] if 'expected_miss_count' in expected: expected_miss_count = expected['expected_miss_count'] else: expected_miss_count = expected['miss_count'] if actual['score'] != expected_score: raise Exception( "Expected a score of '{}' for song '{}' chart '{}' but got score '{}'" .format(expected_score, expected['id'], expected['chart'], actual['score'])) if actual['achievement_rate'] != expected_achievement_rate: raise Exception( "Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'" .format(expected_achievement_rate, expected[ 'id'], expected['chart'], actual[ 'achievement_rate'])) if actual['clear_type'] != expected_clear_type: raise Exception( "Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'" .format(expected_clear_type, expected['id'], expected['chart'], actual['clear_type'])) if actual['combo'] != expected_combo: raise Exception( "Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'" .format(expected_combo, expected['id'], expected['chart'], actual['combo'])) if actual['miss_count'] != expected_miss_count: raise Exception( "Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'" .format(expected_miss_count, expected['id'], expected['chart'], actual['miss_count'])) time.sleep(1) else: print('Skipping score checks for existing card') self.verify_player_end(ref_id) self.verify_info_ranking() if paseli_enabled: print('PASELI enabled for this PCBID, executing PASELI checks') else: print('PASELI disabled for this PCBID, skipping PASELI checks') return sessid, balance = self.verify_eacoin_checkin(card) if balance == 0: print('Skipping PASELI consume check because card has 0 balance') else: self.verify_eacoin_consume(sessid, balance, random.randint(0, balance)) self.verify_eacoin_checkout(sessid) <|reserved_special_token_1|> <|reserved_special_token_0|> class ReflecBeatColette(BaseClient): <|reserved_special_token_0|> def verify_pcb_boot(self, loc: str) ->None: call = self.call_node() pcb = Node.void('pcb') pcb.set_attribute('method', 'boot') pcb.add_child(Node.string('lid', loc)) call.add_child(pcb) resp = self.exchange('', call) self.assert_path(resp, 'response/pcb/sinfo/nm') self.assert_path(resp, 'response/pcb/sinfo/cl_enbl') self.assert_path(resp, 'response/pcb/sinfo/cl_h') self.assert_path(resp, 'response/pcb/sinfo/cl_m') def verify_info_common(self) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'common') call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info/event_ctrl') self.assert_path(resp, 'response/info/item_lock_ctrl') def verify_info_ranking(self) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'ranking') info.add_child(Node.s32('ver', 0)) call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info/ver') self.assert_path(resp, 'response/info/ranking/weekly/bt') self.assert_path(resp, 'response/info/ranking/weekly/et') self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid') self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt') self.assert_path(resp, 'response/info/ranking/monthly/bt') self.assert_path(resp, 'response/info/ranking/monthly/et') self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid') self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt') self.assert_path(resp, 'response/info/ranking/total/bt') self.assert_path(resp, 'response/info/ranking/total/et') self.assert_path(resp, 'response/info/ranking/total/new/d/mid') self.assert_path(resp, 'response/info/ranking/total/new/d/cnt') <|reserved_special_token_0|> def verify_player_delete(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'delete') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player') def verify_player_end(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'end') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player') def verify_player_succeed(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'succeed') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player/name') self.assert_path(resp, 'response/player/lv') self.assert_path(resp, 'response/player/exp') self.assert_path(resp, 'response/player/grd') self.assert_path(resp, 'response/player/ap') self.assert_path(resp, 'response/player/released') self.assert_path(resp, 'response/player/mrecord') def verify_player_read(self, refid: str, location: str) ->List[Dict[str, int]]: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'read') player.add_child(Node.string('rid', refid)) player.add_child(Node.string('lid', location)) player.add_child(Node.s16('ver', 5)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player/pdata/account/usrid') self.assert_path(resp, 'response/player/pdata/account/tpc') self.assert_path(resp, 'response/player/pdata/account/dpc') self.assert_path(resp, 'response/player/pdata/account/crd') self.assert_path(resp, 'response/player/pdata/account/brd') self.assert_path(resp, 'response/player/pdata/account/tdc') self.assert_path(resp, 'response/player/pdata/account/intrvld') self.assert_path(resp, 'response/player/pdata/account/ver') self.assert_path(resp, 'response/player/pdata/account/pst') self.assert_path(resp, 'response/player/pdata/account/st') self.assert_path(resp, 'response/player/pdata/base/name') self.assert_path(resp, 'response/player/pdata/base/exp') self.assert_path(resp, 'response/player/pdata/base/lv') self.assert_path(resp, 'response/player/pdata/base/mg') self.assert_path(resp, 'response/player/pdata/base/ap') self.assert_path(resp, 'response/player/pdata/base/tid') self.assert_path(resp, 'response/player/pdata/base/tname') self.assert_path(resp, 'response/player/pdata/base/cmnt') self.assert_path(resp, 'response/player/pdata/base/uattr') self.assert_path(resp, 'response/player/pdata/base/hidden_param') self.assert_path(resp, 'response/player/pdata/base/tbs') self.assert_path(resp, 'response/player/pdata/base/tbs_r') self.assert_path(resp, 'response/player/pdata/rival') self.assert_path(resp, 'response/player/pdata/fav_music_slot') self.assert_path(resp, 'response/player/pdata/custom') self.assert_path(resp, 'response/player/pdata/config') self.assert_path(resp, 'response/player/pdata/stamp') self.assert_path(resp, 'response/player/pdata/released') self.assert_path(resp, 'response/player/pdata/record') if resp.child_value('player/pdata/base/name') != self.NAME: raise Exception('Invalid name {} returned on profile read!'. format(resp.child_value('player/pdata/base/name'))) scores = [] for child in resp.child('player/pdata/record').children: if child.name != 'rec': continue score = {'id': child.child_value('mid'), 'chart': child. child_value('ntgrd'), 'clear_type': child.child_value('ct'), 'achievement_rate': child.child_value('ar'), 'score': child .child_value('scr'), 'combo': child.child_value('cmb'), 'miss_count': child.child_value('ms')} scores.append(score) return scores def verify_player_write(self, refid: str, loc: str, scores: List[Dict[ str, int]]) ->int: call = self.call_node() player = Node.void('player') call.add_child(player) player.set_attribute('method', 'write') pdata = Node.void('pdata') player.add_child(pdata) account = Node.void('account') pdata.add_child(account) account.add_child(Node.s32('usrid', 0)) account.add_child(Node.s32('plyid', 0)) account.add_child(Node.s32('tpc', 1)) account.add_child(Node.s32('dpc', 1)) account.add_child(Node.s32('crd', 1)) account.add_child(Node.s32('brd', 1)) account.add_child(Node.s32('tdc', 1)) account.add_child(Node.string('rid', refid)) account.add_child(Node.string('lid', loc)) account.add_child(Node.u8('mode', 0)) account.add_child(Node.s16('ver', 5)) account.add_child(Node.bool('pp', True)) account.add_child(Node.bool('ps', True)) account.add_child(Node.s16('pay', 0)) account.add_child(Node.s16('pay_pc', 0)) account.add_child(Node.u64('st', int(time.time() * 1000))) base = Node.void('base') pdata.add_child(base) base.add_child(Node.string('name', self.NAME)) base.add_child(Node.s32('exp', 0)) base.add_child(Node.s32('lv', 1)) base.add_child(Node.s32('mg', -1)) base.add_child(Node.s32('ap', -1)) base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])) base.add_child(Node.bool('is_tut', True)) stglog = Node.void('stglog') pdata.add_child(stglog) index = 0 for score in scores: log = Node.void('log') stglog.add_child(log) log.add_child(Node.s8('stg', index)) log.add_child(Node.s16('mid', score['id'])) log.add_child(Node.s8('ng', score['chart'])) log.add_child(Node.s8('col', 0)) log.add_child(Node.s8('mt', 7)) log.add_child(Node.s8('rt', 0)) log.add_child(Node.s8('ct', score['clear_type'])) log.add_child(Node.s16('grd', 0)) log.add_child(Node.s16('ar', score['achievement_rate'])) log.add_child(Node.s16('sc', score['score'])) log.add_child(Node.s16('jt_jst', 0)) log.add_child(Node.s16('jt_grt', 0)) log.add_child(Node.s16('jt_gd', 0)) log.add_child(Node.s16('jt_ms', score['miss_count'])) log.add_child(Node.s16('jt_jr', 0)) log.add_child(Node.s16('cmb', score['combo'])) log.add_child(Node.s16('exp', 0)) log.add_child(Node.s32('r_uid', 0)) log.add_child(Node.s32('r_plyid', 0)) log.add_child(Node.s8('r_stg', 0)) log.add_child(Node.s8('r_ct', -1)) log.add_child(Node.s16('r_sc', 0)) log.add_child(Node.s16('r_grd', 0)) log.add_child(Node.s16('r_ar', 0)) log.add_child(Node.s8('r_cpuid', -1)) log.add_child(Node.s32('time', int(time.time()))) log.add_child(Node.s8('decide', 0)) index = index + 1 resp = self.exchange('', call) self.assert_path(resp, 'response/player/uid') return resp.child_value('player/uid') <|reserved_special_token_0|> def verify_lobby_entry(self, location: str, extid: int) ->int: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'entry') e = Node.void('e') lobby.add_child(e) e.add_child(Node.s32('eid', 0)) e.add_child(Node.u16('mid', 79)) e.add_child(Node.u8('ng', 0)) e.add_child(Node.s32('uid', extid)) e.add_child(Node.s32('uattr', 0)) e.add_child(Node.string('pn', self.NAME)) e.add_child(Node.s16('mg', 255)) e.add_child(Node.s32('mopt', 0)) e.add_child(Node.s32('tid', 0)) e.add_child(Node.string('tn', '')) e.add_child(Node.s32('topt', 0)) e.add_child(Node.string('lid', location)) e.add_child(Node.string('sn', '')) e.add_child(Node.u8('pref', 51)) e.add_child(Node.s8('stg', 4)) e.add_child(Node.s8('pside', 0)) e.add_child(Node.s16('eatime', 30)) e.add_child(Node.u8_array('ga', [127, 0, 0, 1])) e.add_child(Node.u16('gp', 10007)) e.add_child(Node.u8_array('la', [16, 0, 0, 0])) e.add_child(Node.u8('ver', 5)) lobby.add_child(Node.s32_array('friend', [])) call.add_child(lobby) resp = self.exchange('', call) self.assert_path(resp, 'response/lobby/interval') self.assert_path(resp, 'response/lobby/interval_p') self.assert_path(resp, 'response/lobby/eid') self.assert_path(resp, 'response/lobby/e/eid') self.assert_path(resp, 'response/lobby/e/mid') self.assert_path(resp, 'response/lobby/e/ng') self.assert_path(resp, 'response/lobby/e/uid') self.assert_path(resp, 'response/lobby/e/uattr') self.assert_path(resp, 'response/lobby/e/pn') self.assert_path(resp, 'response/lobby/e/mg') self.assert_path(resp, 'response/lobby/e/mopt') self.assert_path(resp, 'response/lobby/e/tid') self.assert_path(resp, 'response/lobby/e/tn') self.assert_path(resp, 'response/lobby/e/topt') self.assert_path(resp, 'response/lobby/e/lid') self.assert_path(resp, 'response/lobby/e/sn') self.assert_path(resp, 'response/lobby/e/pref') self.assert_path(resp, 'response/lobby/e/stg') self.assert_path(resp, 'response/lobby/e/pside') self.assert_path(resp, 'response/lobby/e/eatime') self.assert_path(resp, 'response/lobby/e/ga') self.assert_path(resp, 'response/lobby/e/gp') self.assert_path(resp, 'response/lobby/e/la') self.assert_path(resp, 'response/lobby/e/ver') return resp.child_value('lobby/eid') def verify_lobby_delete(self, eid: int) ->None: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'delete') lobby.add_child(Node.s32('eid', eid)) call.add_child(lobby) resp = self.exchange('', call) self.assert_path(resp, 'response/lobby') def verify_pzlcmt_read(self, extid: int) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'pzlcmt_read') info.add_child(Node.s32('uid', extid)) info.add_child(Node.s32('tid', 0)) info.add_child(Node.s32('time', 0)) info.add_child(Node.s32('limit', 30)) call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info/comment/time') self.assert_path(resp, 'response/info/c/uid') self.assert_path(resp, 'response/info/c/name') self.assert_path(resp, 'response/info/c/icon') self.assert_path(resp, 'response/info/c/bln') self.assert_path(resp, 'response/info/c/tid') self.assert_path(resp, 'response/info/c/t_name') self.assert_path(resp, 'response/info/c/pref') self.assert_path(resp, 'response/info/c/time') self.assert_path(resp, 'response/info/c/comment') self.assert_path(resp, 'response/info/c/is_tweet') found = False for child in resp.child('info').children: if child.name != 'c': continue if child.child_value('uid') == extid: name = child.child_value('name') comment = child.child_value('comment') if name != self.NAME: raise Exception("Invalid name '{}' returned for comment!" .format(name)) if comment != 'アメ〜〜!': raise Exception( "Invalid comment '{}' returned for comment!".format (comment)) found = True if not found: raise Exception('Comment we posted was not found!') <|reserved_special_token_0|> def verify_jbrbcollabo_save(self, refid: str) ->None: call = self.call_node() jbrbcollabo = Node.void('jbrbcollabo') jbrbcollabo.set_attribute('method', 'save') jbrbcollabo.add_child(Node.string('ref_id', refid)) jbrbcollabo.add_child(Node.u16('cre_count', 0)) call.add_child(jbrbcollabo) resp = self.exchange('', call) self.assert_path(resp, 'response/jbrbcollabo') def verify(self, cardid: Optional[str]) ->None: self.verify_services_get(expected_services=['pcbtracker', 'pcbevent', 'local', 'message', 'facility', 'cardmng', 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby', 'ntp', 'keepalive']) paseli_enabled = self.verify_pcbtracker_alive() self.verify_message_get() self.verify_package_list() location = self.verify_facility_get() self.verify_pcbevent_put() self.verify_pcb_boot(location) self.verify_info_common() if cardid is not None: card = cardid else: card = self.random_card() print('Generated random card ID {} for use.'.format(card)) if cardid is None: self.verify_cardmng_inquire(card, msg_type='unregistered', paseli_enabled=paseli_enabled) ref_id = self.verify_cardmng_getrefid(card) if len(ref_id) != 16: raise Exception( "Invalid refid '{}' returned when registering card". format(ref_id)) if ref_id != self.verify_cardmng_inquire(card, msg_type='new', paseli_enabled=paseli_enabled): raise Exception( "Invalid refid '{}' returned when querying card".format (ref_id)) self.verify_player_start(ref_id) self.verify_player_delete(ref_id) self.verify_player_succeed(ref_id) extid = self.verify_player_write(ref_id, location, [{'id': 0, 'chart': 0, 'clear_type': -1, 'achievement_rate': 0, 'score': 0, 'combo': 0, 'miss_count': 0}]) else: print('Skipping new card checks for existing card') ref_id = self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled) self.verify_cardmng_authpass(ref_id, correct=True) self.verify_cardmng_authpass(ref_id, correct=False) if ref_id != self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled): raise Exception("Invalid refid '{}' returned when querying card" .format(ref_id)) self.verify_lobby_read(location, extid) eid = self.verify_lobby_entry(location, extid) self.verify_lobby_delete(eid) self.verify_pzlcmt_write(extid) self.verify_pzlcmt_read(extid) self.verify_jbrbcollabo_save(ref_id) if cardid is None: for phase in [1, 2]: if phase == 1: dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2, 'achievement_rate': 7543, 'score': 432, 'combo': 123, 'miss_count': 5}, {'id': 1, 'chart': 0, 'clear_type': 4, 'achievement_rate': 9876, 'score': 543, 'combo': 543, 'miss_count': 0}, {'id': 3, 'chart': 2, 'clear_type': 2, 'achievement_rate': 1234, 'score': 123, 'combo': 42, 'miss_count': 54}, {'id': 3, 'chart': 0, 'clear_type': 2, 'achievement_rate': 1024, 'score': 50, 'combo': 12, 'miss_count': 90}] if phase == 2: dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3, 'achievement_rate': 8765, 'score': 469, 'combo': 468, 'miss_count': 1}, {'id': 1, 'chart': 0, 'clear_type': 2, 'achievement_rate': 8765, 'score': 432, 'combo': 321, 'miss_count': 15, 'expected_score': 543, 'expected_clear_type': 4, 'expected_achievement_rate': 9876, 'expected_combo': 543, 'expected_miss_count': 0}] self.verify_player_write(ref_id, location, dummyscores) scores = self.verify_player_read(ref_id, location) for expected in dummyscores: actual = None for received in scores: if received['id'] == expected['id'] and received[ 'chart'] == expected['chart']: actual = received break if actual is None: raise Exception( "Didn't find song {} chart {} in response!". format(expected['id'], expected['chart'])) if 'expected_score' in expected: expected_score = expected['expected_score'] else: expected_score = expected['score'] if 'expected_achievement_rate' in expected: expected_achievement_rate = expected[ 'expected_achievement_rate'] else: expected_achievement_rate = expected['achievement_rate' ] if 'expected_clear_type' in expected: expected_clear_type = expected['expected_clear_type'] else: expected_clear_type = expected['clear_type'] if 'expected_combo' in expected: expected_combo = expected['expected_combo'] else: expected_combo = expected['combo'] if 'expected_miss_count' in expected: expected_miss_count = expected['expected_miss_count'] else: expected_miss_count = expected['miss_count'] if actual['score'] != expected_score: raise Exception( "Expected a score of '{}' for song '{}' chart '{}' but got score '{}'" .format(expected_score, expected['id'], expected['chart'], actual['score'])) if actual['achievement_rate'] != expected_achievement_rate: raise Exception( "Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'" .format(expected_achievement_rate, expected[ 'id'], expected['chart'], actual[ 'achievement_rate'])) if actual['clear_type'] != expected_clear_type: raise Exception( "Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'" .format(expected_clear_type, expected['id'], expected['chart'], actual['clear_type'])) if actual['combo'] != expected_combo: raise Exception( "Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'" .format(expected_combo, expected['id'], expected['chart'], actual['combo'])) if actual['miss_count'] != expected_miss_count: raise Exception( "Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'" .format(expected_miss_count, expected['id'], expected['chart'], actual['miss_count'])) time.sleep(1) else: print('Skipping score checks for existing card') self.verify_player_end(ref_id) self.verify_info_ranking() if paseli_enabled: print('PASELI enabled for this PCBID, executing PASELI checks') else: print('PASELI disabled for this PCBID, skipping PASELI checks') return sessid, balance = self.verify_eacoin_checkin(card) if balance == 0: print('Skipping PASELI consume check because card has 0 balance') else: self.verify_eacoin_consume(sessid, balance, random.randint(0, balance)) self.verify_eacoin_checkout(sessid) <|reserved_special_token_1|> <|reserved_special_token_0|> class ReflecBeatColette(BaseClient): <|reserved_special_token_0|> def verify_pcb_boot(self, loc: str) ->None: call = self.call_node() pcb = Node.void('pcb') pcb.set_attribute('method', 'boot') pcb.add_child(Node.string('lid', loc)) call.add_child(pcb) resp = self.exchange('', call) self.assert_path(resp, 'response/pcb/sinfo/nm') self.assert_path(resp, 'response/pcb/sinfo/cl_enbl') self.assert_path(resp, 'response/pcb/sinfo/cl_h') self.assert_path(resp, 'response/pcb/sinfo/cl_m') def verify_info_common(self) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'common') call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info/event_ctrl') self.assert_path(resp, 'response/info/item_lock_ctrl') def verify_info_ranking(self) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'ranking') info.add_child(Node.s32('ver', 0)) call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info/ver') self.assert_path(resp, 'response/info/ranking/weekly/bt') self.assert_path(resp, 'response/info/ranking/weekly/et') self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid') self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt') self.assert_path(resp, 'response/info/ranking/monthly/bt') self.assert_path(resp, 'response/info/ranking/monthly/et') self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid') self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt') self.assert_path(resp, 'response/info/ranking/total/bt') self.assert_path(resp, 'response/info/ranking/total/et') self.assert_path(resp, 'response/info/ranking/total/new/d/mid') self.assert_path(resp, 'response/info/ranking/total/new/d/cnt') def verify_player_start(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'start') player.add_child(Node.string('rid', refid)) player.add_child(Node.u8_array('ga', [127, 0, 0, 1])) player.add_child(Node.u16('gp', 10573)) player.add_child(Node.u8_array('la', [16, 0, 0, 0])) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player/plyid') self.assert_path(resp, 'response/player/start_time') self.assert_path(resp, 'response/player/event_ctrl') self.assert_path(resp, 'response/player/item_lock_ctrl') self.assert_path(resp, 'response/player/lincle_link_4') self.assert_path(resp, 'response/player/jbrbcollabo') self.assert_path(resp, 'response/player/tricolettepark') def verify_player_delete(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'delete') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player') def verify_player_end(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'end') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player') def verify_player_succeed(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'succeed') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player/name') self.assert_path(resp, 'response/player/lv') self.assert_path(resp, 'response/player/exp') self.assert_path(resp, 'response/player/grd') self.assert_path(resp, 'response/player/ap') self.assert_path(resp, 'response/player/released') self.assert_path(resp, 'response/player/mrecord') def verify_player_read(self, refid: str, location: str) ->List[Dict[str, int]]: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'read') player.add_child(Node.string('rid', refid)) player.add_child(Node.string('lid', location)) player.add_child(Node.s16('ver', 5)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player/pdata/account/usrid') self.assert_path(resp, 'response/player/pdata/account/tpc') self.assert_path(resp, 'response/player/pdata/account/dpc') self.assert_path(resp, 'response/player/pdata/account/crd') self.assert_path(resp, 'response/player/pdata/account/brd') self.assert_path(resp, 'response/player/pdata/account/tdc') self.assert_path(resp, 'response/player/pdata/account/intrvld') self.assert_path(resp, 'response/player/pdata/account/ver') self.assert_path(resp, 'response/player/pdata/account/pst') self.assert_path(resp, 'response/player/pdata/account/st') self.assert_path(resp, 'response/player/pdata/base/name') self.assert_path(resp, 'response/player/pdata/base/exp') self.assert_path(resp, 'response/player/pdata/base/lv') self.assert_path(resp, 'response/player/pdata/base/mg') self.assert_path(resp, 'response/player/pdata/base/ap') self.assert_path(resp, 'response/player/pdata/base/tid') self.assert_path(resp, 'response/player/pdata/base/tname') self.assert_path(resp, 'response/player/pdata/base/cmnt') self.assert_path(resp, 'response/player/pdata/base/uattr') self.assert_path(resp, 'response/player/pdata/base/hidden_param') self.assert_path(resp, 'response/player/pdata/base/tbs') self.assert_path(resp, 'response/player/pdata/base/tbs_r') self.assert_path(resp, 'response/player/pdata/rival') self.assert_path(resp, 'response/player/pdata/fav_music_slot') self.assert_path(resp, 'response/player/pdata/custom') self.assert_path(resp, 'response/player/pdata/config') self.assert_path(resp, 'response/player/pdata/stamp') self.assert_path(resp, 'response/player/pdata/released') self.assert_path(resp, 'response/player/pdata/record') if resp.child_value('player/pdata/base/name') != self.NAME: raise Exception('Invalid name {} returned on profile read!'. format(resp.child_value('player/pdata/base/name'))) scores = [] for child in resp.child('player/pdata/record').children: if child.name != 'rec': continue score = {'id': child.child_value('mid'), 'chart': child. child_value('ntgrd'), 'clear_type': child.child_value('ct'), 'achievement_rate': child.child_value('ar'), 'score': child .child_value('scr'), 'combo': child.child_value('cmb'), 'miss_count': child.child_value('ms')} scores.append(score) return scores def verify_player_write(self, refid: str, loc: str, scores: List[Dict[ str, int]]) ->int: call = self.call_node() player = Node.void('player') call.add_child(player) player.set_attribute('method', 'write') pdata = Node.void('pdata') player.add_child(pdata) account = Node.void('account') pdata.add_child(account) account.add_child(Node.s32('usrid', 0)) account.add_child(Node.s32('plyid', 0)) account.add_child(Node.s32('tpc', 1)) account.add_child(Node.s32('dpc', 1)) account.add_child(Node.s32('crd', 1)) account.add_child(Node.s32('brd', 1)) account.add_child(Node.s32('tdc', 1)) account.add_child(Node.string('rid', refid)) account.add_child(Node.string('lid', loc)) account.add_child(Node.u8('mode', 0)) account.add_child(Node.s16('ver', 5)) account.add_child(Node.bool('pp', True)) account.add_child(Node.bool('ps', True)) account.add_child(Node.s16('pay', 0)) account.add_child(Node.s16('pay_pc', 0)) account.add_child(Node.u64('st', int(time.time() * 1000))) base = Node.void('base') pdata.add_child(base) base.add_child(Node.string('name', self.NAME)) base.add_child(Node.s32('exp', 0)) base.add_child(Node.s32('lv', 1)) base.add_child(Node.s32('mg', -1)) base.add_child(Node.s32('ap', -1)) base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])) base.add_child(Node.bool('is_tut', True)) stglog = Node.void('stglog') pdata.add_child(stglog) index = 0 for score in scores: log = Node.void('log') stglog.add_child(log) log.add_child(Node.s8('stg', index)) log.add_child(Node.s16('mid', score['id'])) log.add_child(Node.s8('ng', score['chart'])) log.add_child(Node.s8('col', 0)) log.add_child(Node.s8('mt', 7)) log.add_child(Node.s8('rt', 0)) log.add_child(Node.s8('ct', score['clear_type'])) log.add_child(Node.s16('grd', 0)) log.add_child(Node.s16('ar', score['achievement_rate'])) log.add_child(Node.s16('sc', score['score'])) log.add_child(Node.s16('jt_jst', 0)) log.add_child(Node.s16('jt_grt', 0)) log.add_child(Node.s16('jt_gd', 0)) log.add_child(Node.s16('jt_ms', score['miss_count'])) log.add_child(Node.s16('jt_jr', 0)) log.add_child(Node.s16('cmb', score['combo'])) log.add_child(Node.s16('exp', 0)) log.add_child(Node.s32('r_uid', 0)) log.add_child(Node.s32('r_plyid', 0)) log.add_child(Node.s8('r_stg', 0)) log.add_child(Node.s8('r_ct', -1)) log.add_child(Node.s16('r_sc', 0)) log.add_child(Node.s16('r_grd', 0)) log.add_child(Node.s16('r_ar', 0)) log.add_child(Node.s8('r_cpuid', -1)) log.add_child(Node.s32('time', int(time.time()))) log.add_child(Node.s8('decide', 0)) index = index + 1 resp = self.exchange('', call) self.assert_path(resp, 'response/player/uid') return resp.child_value('player/uid') <|reserved_special_token_0|> def verify_lobby_entry(self, location: str, extid: int) ->int: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'entry') e = Node.void('e') lobby.add_child(e) e.add_child(Node.s32('eid', 0)) e.add_child(Node.u16('mid', 79)) e.add_child(Node.u8('ng', 0)) e.add_child(Node.s32('uid', extid)) e.add_child(Node.s32('uattr', 0)) e.add_child(Node.string('pn', self.NAME)) e.add_child(Node.s16('mg', 255)) e.add_child(Node.s32('mopt', 0)) e.add_child(Node.s32('tid', 0)) e.add_child(Node.string('tn', '')) e.add_child(Node.s32('topt', 0)) e.add_child(Node.string('lid', location)) e.add_child(Node.string('sn', '')) e.add_child(Node.u8('pref', 51)) e.add_child(Node.s8('stg', 4)) e.add_child(Node.s8('pside', 0)) e.add_child(Node.s16('eatime', 30)) e.add_child(Node.u8_array('ga', [127, 0, 0, 1])) e.add_child(Node.u16('gp', 10007)) e.add_child(Node.u8_array('la', [16, 0, 0, 0])) e.add_child(Node.u8('ver', 5)) lobby.add_child(Node.s32_array('friend', [])) call.add_child(lobby) resp = self.exchange('', call) self.assert_path(resp, 'response/lobby/interval') self.assert_path(resp, 'response/lobby/interval_p') self.assert_path(resp, 'response/lobby/eid') self.assert_path(resp, 'response/lobby/e/eid') self.assert_path(resp, 'response/lobby/e/mid') self.assert_path(resp, 'response/lobby/e/ng') self.assert_path(resp, 'response/lobby/e/uid') self.assert_path(resp, 'response/lobby/e/uattr') self.assert_path(resp, 'response/lobby/e/pn') self.assert_path(resp, 'response/lobby/e/mg') self.assert_path(resp, 'response/lobby/e/mopt') self.assert_path(resp, 'response/lobby/e/tid') self.assert_path(resp, 'response/lobby/e/tn') self.assert_path(resp, 'response/lobby/e/topt') self.assert_path(resp, 'response/lobby/e/lid') self.assert_path(resp, 'response/lobby/e/sn') self.assert_path(resp, 'response/lobby/e/pref') self.assert_path(resp, 'response/lobby/e/stg') self.assert_path(resp, 'response/lobby/e/pside') self.assert_path(resp, 'response/lobby/e/eatime') self.assert_path(resp, 'response/lobby/e/ga') self.assert_path(resp, 'response/lobby/e/gp') self.assert_path(resp, 'response/lobby/e/la') self.assert_path(resp, 'response/lobby/e/ver') return resp.child_value('lobby/eid') def verify_lobby_delete(self, eid: int) ->None: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'delete') lobby.add_child(Node.s32('eid', eid)) call.add_child(lobby) resp = self.exchange('', call) self.assert_path(resp, 'response/lobby') def verify_pzlcmt_read(self, extid: int) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'pzlcmt_read') info.add_child(Node.s32('uid', extid)) info.add_child(Node.s32('tid', 0)) info.add_child(Node.s32('time', 0)) info.add_child(Node.s32('limit', 30)) call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info/comment/time') self.assert_path(resp, 'response/info/c/uid') self.assert_path(resp, 'response/info/c/name') self.assert_path(resp, 'response/info/c/icon') self.assert_path(resp, 'response/info/c/bln') self.assert_path(resp, 'response/info/c/tid') self.assert_path(resp, 'response/info/c/t_name') self.assert_path(resp, 'response/info/c/pref') self.assert_path(resp, 'response/info/c/time') self.assert_path(resp, 'response/info/c/comment') self.assert_path(resp, 'response/info/c/is_tweet') found = False for child in resp.child('info').children: if child.name != 'c': continue if child.child_value('uid') == extid: name = child.child_value('name') comment = child.child_value('comment') if name != self.NAME: raise Exception("Invalid name '{}' returned for comment!" .format(name)) if comment != 'アメ〜〜!': raise Exception( "Invalid comment '{}' returned for comment!".format (comment)) found = True if not found: raise Exception('Comment we posted was not found!') def verify_pzlcmt_write(self, extid: int) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'pzlcmt_write') info.add_child(Node.s32('uid', extid)) info.add_child(Node.string('name', self.NAME)) info.add_child(Node.s16('icon', 0)) info.add_child(Node.s8('bln', 0)) info.add_child(Node.s32('tid', 0)) info.add_child(Node.string('t_name', '')) info.add_child(Node.s8('pref', 51)) info.add_child(Node.s32('time', int(time.time()))) info.add_child(Node.string('comment', 'アメ〜〜!')) info.add_child(Node.bool('is_tweet', True)) call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info') def verify_jbrbcollabo_save(self, refid: str) ->None: call = self.call_node() jbrbcollabo = Node.void('jbrbcollabo') jbrbcollabo.set_attribute('method', 'save') jbrbcollabo.add_child(Node.string('ref_id', refid)) jbrbcollabo.add_child(Node.u16('cre_count', 0)) call.add_child(jbrbcollabo) resp = self.exchange('', call) self.assert_path(resp, 'response/jbrbcollabo') def verify(self, cardid: Optional[str]) ->None: self.verify_services_get(expected_services=['pcbtracker', 'pcbevent', 'local', 'message', 'facility', 'cardmng', 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby', 'ntp', 'keepalive']) paseli_enabled = self.verify_pcbtracker_alive() self.verify_message_get() self.verify_package_list() location = self.verify_facility_get() self.verify_pcbevent_put() self.verify_pcb_boot(location) self.verify_info_common() if cardid is not None: card = cardid else: card = self.random_card() print('Generated random card ID {} for use.'.format(card)) if cardid is None: self.verify_cardmng_inquire(card, msg_type='unregistered', paseli_enabled=paseli_enabled) ref_id = self.verify_cardmng_getrefid(card) if len(ref_id) != 16: raise Exception( "Invalid refid '{}' returned when registering card". format(ref_id)) if ref_id != self.verify_cardmng_inquire(card, msg_type='new', paseli_enabled=paseli_enabled): raise Exception( "Invalid refid '{}' returned when querying card".format (ref_id)) self.verify_player_start(ref_id) self.verify_player_delete(ref_id) self.verify_player_succeed(ref_id) extid = self.verify_player_write(ref_id, location, [{'id': 0, 'chart': 0, 'clear_type': -1, 'achievement_rate': 0, 'score': 0, 'combo': 0, 'miss_count': 0}]) else: print('Skipping new card checks for existing card') ref_id = self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled) self.verify_cardmng_authpass(ref_id, correct=True) self.verify_cardmng_authpass(ref_id, correct=False) if ref_id != self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled): raise Exception("Invalid refid '{}' returned when querying card" .format(ref_id)) self.verify_lobby_read(location, extid) eid = self.verify_lobby_entry(location, extid) self.verify_lobby_delete(eid) self.verify_pzlcmt_write(extid) self.verify_pzlcmt_read(extid) self.verify_jbrbcollabo_save(ref_id) if cardid is None: for phase in [1, 2]: if phase == 1: dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2, 'achievement_rate': 7543, 'score': 432, 'combo': 123, 'miss_count': 5}, {'id': 1, 'chart': 0, 'clear_type': 4, 'achievement_rate': 9876, 'score': 543, 'combo': 543, 'miss_count': 0}, {'id': 3, 'chart': 2, 'clear_type': 2, 'achievement_rate': 1234, 'score': 123, 'combo': 42, 'miss_count': 54}, {'id': 3, 'chart': 0, 'clear_type': 2, 'achievement_rate': 1024, 'score': 50, 'combo': 12, 'miss_count': 90}] if phase == 2: dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3, 'achievement_rate': 8765, 'score': 469, 'combo': 468, 'miss_count': 1}, {'id': 1, 'chart': 0, 'clear_type': 2, 'achievement_rate': 8765, 'score': 432, 'combo': 321, 'miss_count': 15, 'expected_score': 543, 'expected_clear_type': 4, 'expected_achievement_rate': 9876, 'expected_combo': 543, 'expected_miss_count': 0}] self.verify_player_write(ref_id, location, dummyscores) scores = self.verify_player_read(ref_id, location) for expected in dummyscores: actual = None for received in scores: if received['id'] == expected['id'] and received[ 'chart'] == expected['chart']: actual = received break if actual is None: raise Exception( "Didn't find song {} chart {} in response!". format(expected['id'], expected['chart'])) if 'expected_score' in expected: expected_score = expected['expected_score'] else: expected_score = expected['score'] if 'expected_achievement_rate' in expected: expected_achievement_rate = expected[ 'expected_achievement_rate'] else: expected_achievement_rate = expected['achievement_rate' ] if 'expected_clear_type' in expected: expected_clear_type = expected['expected_clear_type'] else: expected_clear_type = expected['clear_type'] if 'expected_combo' in expected: expected_combo = expected['expected_combo'] else: expected_combo = expected['combo'] if 'expected_miss_count' in expected: expected_miss_count = expected['expected_miss_count'] else: expected_miss_count = expected['miss_count'] if actual['score'] != expected_score: raise Exception( "Expected a score of '{}' for song '{}' chart '{}' but got score '{}'" .format(expected_score, expected['id'], expected['chart'], actual['score'])) if actual['achievement_rate'] != expected_achievement_rate: raise Exception( "Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'" .format(expected_achievement_rate, expected[ 'id'], expected['chart'], actual[ 'achievement_rate'])) if actual['clear_type'] != expected_clear_type: raise Exception( "Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'" .format(expected_clear_type, expected['id'], expected['chart'], actual['clear_type'])) if actual['combo'] != expected_combo: raise Exception( "Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'" .format(expected_combo, expected['id'], expected['chart'], actual['combo'])) if actual['miss_count'] != expected_miss_count: raise Exception( "Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'" .format(expected_miss_count, expected['id'], expected['chart'], actual['miss_count'])) time.sleep(1) else: print('Skipping score checks for existing card') self.verify_player_end(ref_id) self.verify_info_ranking() if paseli_enabled: print('PASELI enabled for this PCBID, executing PASELI checks') else: print('PASELI disabled for this PCBID, skipping PASELI checks') return sessid, balance = self.verify_eacoin_checkin(card) if balance == 0: print('Skipping PASELI consume check because card has 0 balance') else: self.verify_eacoin_consume(sessid, balance, random.randint(0, balance)) self.verify_eacoin_checkout(sessid) <|reserved_special_token_1|> <|reserved_special_token_0|> class ReflecBeatColette(BaseClient): NAME = 'TEST' def verify_pcb_boot(self, loc: str) ->None: call = self.call_node() pcb = Node.void('pcb') pcb.set_attribute('method', 'boot') pcb.add_child(Node.string('lid', loc)) call.add_child(pcb) resp = self.exchange('', call) self.assert_path(resp, 'response/pcb/sinfo/nm') self.assert_path(resp, 'response/pcb/sinfo/cl_enbl') self.assert_path(resp, 'response/pcb/sinfo/cl_h') self.assert_path(resp, 'response/pcb/sinfo/cl_m') def verify_info_common(self) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'common') call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info/event_ctrl') self.assert_path(resp, 'response/info/item_lock_ctrl') def verify_info_ranking(self) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'ranking') info.add_child(Node.s32('ver', 0)) call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info/ver') self.assert_path(resp, 'response/info/ranking/weekly/bt') self.assert_path(resp, 'response/info/ranking/weekly/et') self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid') self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt') self.assert_path(resp, 'response/info/ranking/monthly/bt') self.assert_path(resp, 'response/info/ranking/monthly/et') self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid') self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt') self.assert_path(resp, 'response/info/ranking/total/bt') self.assert_path(resp, 'response/info/ranking/total/et') self.assert_path(resp, 'response/info/ranking/total/new/d/mid') self.assert_path(resp, 'response/info/ranking/total/new/d/cnt') def verify_player_start(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'start') player.add_child(Node.string('rid', refid)) player.add_child(Node.u8_array('ga', [127, 0, 0, 1])) player.add_child(Node.u16('gp', 10573)) player.add_child(Node.u8_array('la', [16, 0, 0, 0])) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player/plyid') self.assert_path(resp, 'response/player/start_time') self.assert_path(resp, 'response/player/event_ctrl') self.assert_path(resp, 'response/player/item_lock_ctrl') self.assert_path(resp, 'response/player/lincle_link_4') self.assert_path(resp, 'response/player/jbrbcollabo') self.assert_path(resp, 'response/player/tricolettepark') def verify_player_delete(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'delete') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player') def verify_player_end(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'end') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player') def verify_player_succeed(self, refid: str) ->None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'succeed') player.add_child(Node.string('rid', refid)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player/name') self.assert_path(resp, 'response/player/lv') self.assert_path(resp, 'response/player/exp') self.assert_path(resp, 'response/player/grd') self.assert_path(resp, 'response/player/ap') self.assert_path(resp, 'response/player/released') self.assert_path(resp, 'response/player/mrecord') def verify_player_read(self, refid: str, location: str) ->List[Dict[str, int]]: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'read') player.add_child(Node.string('rid', refid)) player.add_child(Node.string('lid', location)) player.add_child(Node.s16('ver', 5)) call.add_child(player) resp = self.exchange('', call) self.assert_path(resp, 'response/player/pdata/account/usrid') self.assert_path(resp, 'response/player/pdata/account/tpc') self.assert_path(resp, 'response/player/pdata/account/dpc') self.assert_path(resp, 'response/player/pdata/account/crd') self.assert_path(resp, 'response/player/pdata/account/brd') self.assert_path(resp, 'response/player/pdata/account/tdc') self.assert_path(resp, 'response/player/pdata/account/intrvld') self.assert_path(resp, 'response/player/pdata/account/ver') self.assert_path(resp, 'response/player/pdata/account/pst') self.assert_path(resp, 'response/player/pdata/account/st') self.assert_path(resp, 'response/player/pdata/base/name') self.assert_path(resp, 'response/player/pdata/base/exp') self.assert_path(resp, 'response/player/pdata/base/lv') self.assert_path(resp, 'response/player/pdata/base/mg') self.assert_path(resp, 'response/player/pdata/base/ap') self.assert_path(resp, 'response/player/pdata/base/tid') self.assert_path(resp, 'response/player/pdata/base/tname') self.assert_path(resp, 'response/player/pdata/base/cmnt') self.assert_path(resp, 'response/player/pdata/base/uattr') self.assert_path(resp, 'response/player/pdata/base/hidden_param') self.assert_path(resp, 'response/player/pdata/base/tbs') self.assert_path(resp, 'response/player/pdata/base/tbs_r') self.assert_path(resp, 'response/player/pdata/rival') self.assert_path(resp, 'response/player/pdata/fav_music_slot') self.assert_path(resp, 'response/player/pdata/custom') self.assert_path(resp, 'response/player/pdata/config') self.assert_path(resp, 'response/player/pdata/stamp') self.assert_path(resp, 'response/player/pdata/released') self.assert_path(resp, 'response/player/pdata/record') if resp.child_value('player/pdata/base/name') != self.NAME: raise Exception('Invalid name {} returned on profile read!'. format(resp.child_value('player/pdata/base/name'))) scores = [] for child in resp.child('player/pdata/record').children: if child.name != 'rec': continue score = {'id': child.child_value('mid'), 'chart': child. child_value('ntgrd'), 'clear_type': child.child_value('ct'), 'achievement_rate': child.child_value('ar'), 'score': child .child_value('scr'), 'combo': child.child_value('cmb'), 'miss_count': child.child_value('ms')} scores.append(score) return scores def verify_player_write(self, refid: str, loc: str, scores: List[Dict[ str, int]]) ->int: call = self.call_node() player = Node.void('player') call.add_child(player) player.set_attribute('method', 'write') pdata = Node.void('pdata') player.add_child(pdata) account = Node.void('account') pdata.add_child(account) account.add_child(Node.s32('usrid', 0)) account.add_child(Node.s32('plyid', 0)) account.add_child(Node.s32('tpc', 1)) account.add_child(Node.s32('dpc', 1)) account.add_child(Node.s32('crd', 1)) account.add_child(Node.s32('brd', 1)) account.add_child(Node.s32('tdc', 1)) account.add_child(Node.string('rid', refid)) account.add_child(Node.string('lid', loc)) account.add_child(Node.u8('mode', 0)) account.add_child(Node.s16('ver', 5)) account.add_child(Node.bool('pp', True)) account.add_child(Node.bool('ps', True)) account.add_child(Node.s16('pay', 0)) account.add_child(Node.s16('pay_pc', 0)) account.add_child(Node.u64('st', int(time.time() * 1000))) base = Node.void('base') pdata.add_child(base) base.add_child(Node.string('name', self.NAME)) base.add_child(Node.s32('exp', 0)) base.add_child(Node.s32('lv', 1)) base.add_child(Node.s32('mg', -1)) base.add_child(Node.s32('ap', -1)) base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])) base.add_child(Node.bool('is_tut', True)) stglog = Node.void('stglog') pdata.add_child(stglog) index = 0 for score in scores: log = Node.void('log') stglog.add_child(log) log.add_child(Node.s8('stg', index)) log.add_child(Node.s16('mid', score['id'])) log.add_child(Node.s8('ng', score['chart'])) log.add_child(Node.s8('col', 0)) log.add_child(Node.s8('mt', 7)) log.add_child(Node.s8('rt', 0)) log.add_child(Node.s8('ct', score['clear_type'])) log.add_child(Node.s16('grd', 0)) log.add_child(Node.s16('ar', score['achievement_rate'])) log.add_child(Node.s16('sc', score['score'])) log.add_child(Node.s16('jt_jst', 0)) log.add_child(Node.s16('jt_grt', 0)) log.add_child(Node.s16('jt_gd', 0)) log.add_child(Node.s16('jt_ms', score['miss_count'])) log.add_child(Node.s16('jt_jr', 0)) log.add_child(Node.s16('cmb', score['combo'])) log.add_child(Node.s16('exp', 0)) log.add_child(Node.s32('r_uid', 0)) log.add_child(Node.s32('r_plyid', 0)) log.add_child(Node.s8('r_stg', 0)) log.add_child(Node.s8('r_ct', -1)) log.add_child(Node.s16('r_sc', 0)) log.add_child(Node.s16('r_grd', 0)) log.add_child(Node.s16('r_ar', 0)) log.add_child(Node.s8('r_cpuid', -1)) log.add_child(Node.s32('time', int(time.time()))) log.add_child(Node.s8('decide', 0)) index = index + 1 resp = self.exchange('', call) self.assert_path(resp, 'response/player/uid') return resp.child_value('player/uid') def verify_lobby_read(self, location: str, extid: int) ->None: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'read') lobby.add_child(Node.s32('uid', extid)) lobby.add_child(Node.u8('m_grade', 255)) lobby.add_child(Node.string('lid', location)) lobby.add_child(Node.s32('max', 128)) lobby.add_child(Node.s32_array('friend', [])) lobby.add_child(Node.u8('var', 5)) call.add_child(lobby) resp = self.exchange('', call) self.assert_path(resp, 'response/lobby/interval') self.assert_path(resp, 'response/lobby/interval_p') def verify_lobby_entry(self, location: str, extid: int) ->int: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'entry') e = Node.void('e') lobby.add_child(e) e.add_child(Node.s32('eid', 0)) e.add_child(Node.u16('mid', 79)) e.add_child(Node.u8('ng', 0)) e.add_child(Node.s32('uid', extid)) e.add_child(Node.s32('uattr', 0)) e.add_child(Node.string('pn', self.NAME)) e.add_child(Node.s16('mg', 255)) e.add_child(Node.s32('mopt', 0)) e.add_child(Node.s32('tid', 0)) e.add_child(Node.string('tn', '')) e.add_child(Node.s32('topt', 0)) e.add_child(Node.string('lid', location)) e.add_child(Node.string('sn', '')) e.add_child(Node.u8('pref', 51)) e.add_child(Node.s8('stg', 4)) e.add_child(Node.s8('pside', 0)) e.add_child(Node.s16('eatime', 30)) e.add_child(Node.u8_array('ga', [127, 0, 0, 1])) e.add_child(Node.u16('gp', 10007)) e.add_child(Node.u8_array('la', [16, 0, 0, 0])) e.add_child(Node.u8('ver', 5)) lobby.add_child(Node.s32_array('friend', [])) call.add_child(lobby) resp = self.exchange('', call) self.assert_path(resp, 'response/lobby/interval') self.assert_path(resp, 'response/lobby/interval_p') self.assert_path(resp, 'response/lobby/eid') self.assert_path(resp, 'response/lobby/e/eid') self.assert_path(resp, 'response/lobby/e/mid') self.assert_path(resp, 'response/lobby/e/ng') self.assert_path(resp, 'response/lobby/e/uid') self.assert_path(resp, 'response/lobby/e/uattr') self.assert_path(resp, 'response/lobby/e/pn') self.assert_path(resp, 'response/lobby/e/mg') self.assert_path(resp, 'response/lobby/e/mopt') self.assert_path(resp, 'response/lobby/e/tid') self.assert_path(resp, 'response/lobby/e/tn') self.assert_path(resp, 'response/lobby/e/topt') self.assert_path(resp, 'response/lobby/e/lid') self.assert_path(resp, 'response/lobby/e/sn') self.assert_path(resp, 'response/lobby/e/pref') self.assert_path(resp, 'response/lobby/e/stg') self.assert_path(resp, 'response/lobby/e/pside') self.assert_path(resp, 'response/lobby/e/eatime') self.assert_path(resp, 'response/lobby/e/ga') self.assert_path(resp, 'response/lobby/e/gp') self.assert_path(resp, 'response/lobby/e/la') self.assert_path(resp, 'response/lobby/e/ver') return resp.child_value('lobby/eid') def verify_lobby_delete(self, eid: int) ->None: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'delete') lobby.add_child(Node.s32('eid', eid)) call.add_child(lobby) resp = self.exchange('', call) self.assert_path(resp, 'response/lobby') def verify_pzlcmt_read(self, extid: int) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'pzlcmt_read') info.add_child(Node.s32('uid', extid)) info.add_child(Node.s32('tid', 0)) info.add_child(Node.s32('time', 0)) info.add_child(Node.s32('limit', 30)) call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info/comment/time') self.assert_path(resp, 'response/info/c/uid') self.assert_path(resp, 'response/info/c/name') self.assert_path(resp, 'response/info/c/icon') self.assert_path(resp, 'response/info/c/bln') self.assert_path(resp, 'response/info/c/tid') self.assert_path(resp, 'response/info/c/t_name') self.assert_path(resp, 'response/info/c/pref') self.assert_path(resp, 'response/info/c/time') self.assert_path(resp, 'response/info/c/comment') self.assert_path(resp, 'response/info/c/is_tweet') found = False for child in resp.child('info').children: if child.name != 'c': continue if child.child_value('uid') == extid: name = child.child_value('name') comment = child.child_value('comment') if name != self.NAME: raise Exception("Invalid name '{}' returned for comment!" .format(name)) if comment != 'アメ〜〜!': raise Exception( "Invalid comment '{}' returned for comment!".format (comment)) found = True if not found: raise Exception('Comment we posted was not found!') def verify_pzlcmt_write(self, extid: int) ->None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'pzlcmt_write') info.add_child(Node.s32('uid', extid)) info.add_child(Node.string('name', self.NAME)) info.add_child(Node.s16('icon', 0)) info.add_child(Node.s8('bln', 0)) info.add_child(Node.s32('tid', 0)) info.add_child(Node.string('t_name', '')) info.add_child(Node.s8('pref', 51)) info.add_child(Node.s32('time', int(time.time()))) info.add_child(Node.string('comment', 'アメ〜〜!')) info.add_child(Node.bool('is_tweet', True)) call.add_child(info) resp = self.exchange('', call) self.assert_path(resp, 'response/info') def verify_jbrbcollabo_save(self, refid: str) ->None: call = self.call_node() jbrbcollabo = Node.void('jbrbcollabo') jbrbcollabo.set_attribute('method', 'save') jbrbcollabo.add_child(Node.string('ref_id', refid)) jbrbcollabo.add_child(Node.u16('cre_count', 0)) call.add_child(jbrbcollabo) resp = self.exchange('', call) self.assert_path(resp, 'response/jbrbcollabo') def verify(self, cardid: Optional[str]) ->None: self.verify_services_get(expected_services=['pcbtracker', 'pcbevent', 'local', 'message', 'facility', 'cardmng', 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby', 'ntp', 'keepalive']) paseli_enabled = self.verify_pcbtracker_alive() self.verify_message_get() self.verify_package_list() location = self.verify_facility_get() self.verify_pcbevent_put() self.verify_pcb_boot(location) self.verify_info_common() if cardid is not None: card = cardid else: card = self.random_card() print('Generated random card ID {} for use.'.format(card)) if cardid is None: self.verify_cardmng_inquire(card, msg_type='unregistered', paseli_enabled=paseli_enabled) ref_id = self.verify_cardmng_getrefid(card) if len(ref_id) != 16: raise Exception( "Invalid refid '{}' returned when registering card". format(ref_id)) if ref_id != self.verify_cardmng_inquire(card, msg_type='new', paseli_enabled=paseli_enabled): raise Exception( "Invalid refid '{}' returned when querying card".format (ref_id)) self.verify_player_start(ref_id) self.verify_player_delete(ref_id) self.verify_player_succeed(ref_id) extid = self.verify_player_write(ref_id, location, [{'id': 0, 'chart': 0, 'clear_type': -1, 'achievement_rate': 0, 'score': 0, 'combo': 0, 'miss_count': 0}]) else: print('Skipping new card checks for existing card') ref_id = self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled) self.verify_cardmng_authpass(ref_id, correct=True) self.verify_cardmng_authpass(ref_id, correct=False) if ref_id != self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled): raise Exception("Invalid refid '{}' returned when querying card" .format(ref_id)) self.verify_lobby_read(location, extid) eid = self.verify_lobby_entry(location, extid) self.verify_lobby_delete(eid) self.verify_pzlcmt_write(extid) self.verify_pzlcmt_read(extid) self.verify_jbrbcollabo_save(ref_id) if cardid is None: for phase in [1, 2]: if phase == 1: dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2, 'achievement_rate': 7543, 'score': 432, 'combo': 123, 'miss_count': 5}, {'id': 1, 'chart': 0, 'clear_type': 4, 'achievement_rate': 9876, 'score': 543, 'combo': 543, 'miss_count': 0}, {'id': 3, 'chart': 2, 'clear_type': 2, 'achievement_rate': 1234, 'score': 123, 'combo': 42, 'miss_count': 54}, {'id': 3, 'chart': 0, 'clear_type': 2, 'achievement_rate': 1024, 'score': 50, 'combo': 12, 'miss_count': 90}] if phase == 2: dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3, 'achievement_rate': 8765, 'score': 469, 'combo': 468, 'miss_count': 1}, {'id': 1, 'chart': 0, 'clear_type': 2, 'achievement_rate': 8765, 'score': 432, 'combo': 321, 'miss_count': 15, 'expected_score': 543, 'expected_clear_type': 4, 'expected_achievement_rate': 9876, 'expected_combo': 543, 'expected_miss_count': 0}] self.verify_player_write(ref_id, location, dummyscores) scores = self.verify_player_read(ref_id, location) for expected in dummyscores: actual = None for received in scores: if received['id'] == expected['id'] and received[ 'chart'] == expected['chart']: actual = received break if actual is None: raise Exception( "Didn't find song {} chart {} in response!". format(expected['id'], expected['chart'])) if 'expected_score' in expected: expected_score = expected['expected_score'] else: expected_score = expected['score'] if 'expected_achievement_rate' in expected: expected_achievement_rate = expected[ 'expected_achievement_rate'] else: expected_achievement_rate = expected['achievement_rate' ] if 'expected_clear_type' in expected: expected_clear_type = expected['expected_clear_type'] else: expected_clear_type = expected['clear_type'] if 'expected_combo' in expected: expected_combo = expected['expected_combo'] else: expected_combo = expected['combo'] if 'expected_miss_count' in expected: expected_miss_count = expected['expected_miss_count'] else: expected_miss_count = expected['miss_count'] if actual['score'] != expected_score: raise Exception( "Expected a score of '{}' for song '{}' chart '{}' but got score '{}'" .format(expected_score, expected['id'], expected['chart'], actual['score'])) if actual['achievement_rate'] != expected_achievement_rate: raise Exception( "Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'" .format(expected_achievement_rate, expected[ 'id'], expected['chart'], actual[ 'achievement_rate'])) if actual['clear_type'] != expected_clear_type: raise Exception( "Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'" .format(expected_clear_type, expected['id'], expected['chart'], actual['clear_type'])) if actual['combo'] != expected_combo: raise Exception( "Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'" .format(expected_combo, expected['id'], expected['chart'], actual['combo'])) if actual['miss_count'] != expected_miss_count: raise Exception( "Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'" .format(expected_miss_count, expected['id'], expected['chart'], actual['miss_count'])) time.sleep(1) else: print('Skipping score checks for existing card') self.verify_player_end(ref_id) self.verify_info_ranking() if paseli_enabled: print('PASELI enabled for this PCBID, executing PASELI checks') else: print('PASELI disabled for this PCBID, skipping PASELI checks') return sessid, balance = self.verify_eacoin_checkin(card) if balance == 0: print('Skipping PASELI consume check because card has 0 balance') else: self.verify_eacoin_consume(sessid, balance, random.randint(0, balance)) self.verify_eacoin_checkout(sessid) <|reserved_special_token_1|> import random import time from typing import Dict, List, Optional from bemani.client.base import BaseClient from bemani.protocol import Node class ReflecBeatColette(BaseClient): NAME = 'TEST' def verify_pcb_boot(self, loc: str) -> None: call = self.call_node() pcb = Node.void('pcb') pcb.set_attribute('method', 'boot') pcb.add_child(Node.string('lid', loc)) call.add_child(pcb) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/pcb/sinfo/nm") self.assert_path(resp, "response/pcb/sinfo/cl_enbl") self.assert_path(resp, "response/pcb/sinfo/cl_h") self.assert_path(resp, "response/pcb/sinfo/cl_m") def verify_info_common(self) -> None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'common') call.add_child(info) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/info/event_ctrl") self.assert_path(resp, "response/info/item_lock_ctrl") def verify_info_ranking(self) -> None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'ranking') info.add_child(Node.s32('ver', 0)) call.add_child(info) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/info/ver") self.assert_path(resp, "response/info/ranking/weekly/bt") self.assert_path(resp, "response/info/ranking/weekly/et") self.assert_path(resp, "response/info/ranking/weekly/new/d/mid") self.assert_path(resp, "response/info/ranking/weekly/new/d/cnt") self.assert_path(resp, "response/info/ranking/monthly/bt") self.assert_path(resp, "response/info/ranking/monthly/et") self.assert_path(resp, "response/info/ranking/monthly/new/d/mid") self.assert_path(resp, "response/info/ranking/monthly/new/d/cnt") self.assert_path(resp, "response/info/ranking/total/bt") self.assert_path(resp, "response/info/ranking/total/et") self.assert_path(resp, "response/info/ranking/total/new/d/mid") self.assert_path(resp, "response/info/ranking/total/new/d/cnt") def verify_player_start(self, refid: str) -> None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'start') player.add_child(Node.string('rid', refid)) player.add_child(Node.u8_array('ga', [127, 0, 0, 1])) player.add_child(Node.u16('gp', 10573)) player.add_child(Node.u8_array('la', [16, 0, 0, 0])) call.add_child(player) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/player/plyid") self.assert_path(resp, "response/player/start_time") self.assert_path(resp, "response/player/event_ctrl") self.assert_path(resp, "response/player/item_lock_ctrl") self.assert_path(resp, "response/player/lincle_link_4") self.assert_path(resp, "response/player/jbrbcollabo") self.assert_path(resp, "response/player/tricolettepark") def verify_player_delete(self, refid: str) -> None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'delete') player.add_child(Node.string('rid', refid)) call.add_child(player) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/player") def verify_player_end(self, refid: str) -> None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'end') player.add_child(Node.string('rid', refid)) call.add_child(player) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/player") def verify_player_succeed(self, refid: str) -> None: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'succeed') player.add_child(Node.string('rid', refid)) call.add_child(player) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/player/name") self.assert_path(resp, "response/player/lv") self.assert_path(resp, "response/player/exp") self.assert_path(resp, "response/player/grd") self.assert_path(resp, "response/player/ap") self.assert_path(resp, "response/player/released") self.assert_path(resp, "response/player/mrecord") def verify_player_read(self, refid: str, location: str) -> List[Dict[str, int]]: call = self.call_node() player = Node.void('player') player.set_attribute('method', 'read') player.add_child(Node.string('rid', refid)) player.add_child(Node.string('lid', location)) player.add_child(Node.s16('ver', 5)) call.add_child(player) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/player/pdata/account/usrid") self.assert_path(resp, "response/player/pdata/account/tpc") self.assert_path(resp, "response/player/pdata/account/dpc") self.assert_path(resp, "response/player/pdata/account/crd") self.assert_path(resp, "response/player/pdata/account/brd") self.assert_path(resp, "response/player/pdata/account/tdc") self.assert_path(resp, "response/player/pdata/account/intrvld") self.assert_path(resp, "response/player/pdata/account/ver") self.assert_path(resp, "response/player/pdata/account/pst") self.assert_path(resp, "response/player/pdata/account/st") self.assert_path(resp, "response/player/pdata/base/name") self.assert_path(resp, "response/player/pdata/base/exp") self.assert_path(resp, "response/player/pdata/base/lv") self.assert_path(resp, "response/player/pdata/base/mg") self.assert_path(resp, "response/player/pdata/base/ap") self.assert_path(resp, "response/player/pdata/base/tid") self.assert_path(resp, "response/player/pdata/base/tname") self.assert_path(resp, "response/player/pdata/base/cmnt") self.assert_path(resp, "response/player/pdata/base/uattr") self.assert_path(resp, "response/player/pdata/base/hidden_param") self.assert_path(resp, "response/player/pdata/base/tbs") self.assert_path(resp, "response/player/pdata/base/tbs_r") self.assert_path(resp, "response/player/pdata/rival") self.assert_path(resp, "response/player/pdata/fav_music_slot") self.assert_path(resp, "response/player/pdata/custom") self.assert_path(resp, "response/player/pdata/config") self.assert_path(resp, "response/player/pdata/stamp") self.assert_path(resp, "response/player/pdata/released") self.assert_path(resp, "response/player/pdata/record") if resp.child_value('player/pdata/base/name') != self.NAME: raise Exception('Invalid name {} returned on profile read!'.format(resp.child_value('player/pdata/base/name'))) scores = [] for child in resp.child('player/pdata/record').children: if child.name != 'rec': continue score = { 'id': child.child_value('mid'), 'chart': child.child_value('ntgrd'), 'clear_type': child.child_value('ct'), 'achievement_rate': child.child_value('ar'), 'score': child.child_value('scr'), 'combo': child.child_value('cmb'), 'miss_count': child.child_value('ms'), } scores.append(score) return scores def verify_player_write(self, refid: str, loc: str, scores: List[Dict[str, int]]) -> int: call = self.call_node() player = Node.void('player') call.add_child(player) player.set_attribute('method', 'write') pdata = Node.void('pdata') player.add_child(pdata) account = Node.void('account') pdata.add_child(account) account.add_child(Node.s32('usrid', 0)) account.add_child(Node.s32('plyid', 0)) account.add_child(Node.s32('tpc', 1)) account.add_child(Node.s32('dpc', 1)) account.add_child(Node.s32('crd', 1)) account.add_child(Node.s32('brd', 1)) account.add_child(Node.s32('tdc', 1)) account.add_child(Node.string('rid', refid)) account.add_child(Node.string('lid', loc)) account.add_child(Node.u8('mode', 0)) account.add_child(Node.s16('ver', 5)) account.add_child(Node.bool('pp', True)) account.add_child(Node.bool('ps', True)) account.add_child(Node.s16('pay', 0)) account.add_child(Node.s16('pay_pc', 0)) account.add_child(Node.u64('st', int(time.time() * 1000))) base = Node.void('base') pdata.add_child(base) base.add_child(Node.string('name', self.NAME)) base.add_child(Node.s32('exp', 0)) base.add_child(Node.s32('lv', 1)) base.add_child(Node.s32('mg', -1)) base.add_child(Node.s32('ap', -1)) base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])) base.add_child(Node.bool('is_tut', True)) stglog = Node.void('stglog') pdata.add_child(stglog) index = 0 for score in scores: log = Node.void('log') stglog.add_child(log) log.add_child(Node.s8('stg', index)) log.add_child(Node.s16('mid', score['id'])) log.add_child(Node.s8('ng', score['chart'])) log.add_child(Node.s8('col', 0)) log.add_child(Node.s8('mt', 7)) log.add_child(Node.s8('rt', 0)) log.add_child(Node.s8('ct', score['clear_type'])) log.add_child(Node.s16('grd', 0)) log.add_child(Node.s16('ar', score['achievement_rate'])) log.add_child(Node.s16('sc', score['score'])) log.add_child(Node.s16('jt_jst', 0)) log.add_child(Node.s16('jt_grt', 0)) log.add_child(Node.s16('jt_gd', 0)) log.add_child(Node.s16('jt_ms', score['miss_count'])) log.add_child(Node.s16('jt_jr', 0)) log.add_child(Node.s16('cmb', score['combo'])) log.add_child(Node.s16('exp', 0)) log.add_child(Node.s32('r_uid', 0)) log.add_child(Node.s32('r_plyid', 0)) log.add_child(Node.s8('r_stg', 0)) log.add_child(Node.s8('r_ct', -1)) log.add_child(Node.s16('r_sc', 0)) log.add_child(Node.s16('r_grd', 0)) log.add_child(Node.s16('r_ar', 0)) log.add_child(Node.s8('r_cpuid', -1)) log.add_child(Node.s32('time', int(time.time()))) log.add_child(Node.s8('decide', 0)) index = index + 1 # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/player/uid") return resp.child_value('player/uid') def verify_lobby_read(self, location: str, extid: int) -> None: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'read') lobby.add_child(Node.s32('uid', extid)) lobby.add_child(Node.u8('m_grade', 255)) lobby.add_child(Node.string('lid', location)) lobby.add_child(Node.s32('max', 128)) lobby.add_child(Node.s32_array('friend', [])) lobby.add_child(Node.u8('var', 5)) call.add_child(lobby) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/lobby/interval") self.assert_path(resp, "response/lobby/interval_p") def verify_lobby_entry(self, location: str, extid: int) -> int: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'entry') e = Node.void('e') lobby.add_child(e) e.add_child(Node.s32('eid', 0)) e.add_child(Node.u16('mid', 79)) e.add_child(Node.u8('ng', 0)) e.add_child(Node.s32('uid', extid)) e.add_child(Node.s32('uattr', 0)) e.add_child(Node.string('pn', self.NAME)) e.add_child(Node.s16('mg', 255)) e.add_child(Node.s32('mopt', 0)) e.add_child(Node.s32('tid', 0)) e.add_child(Node.string('tn', '')) e.add_child(Node.s32('topt', 0)) e.add_child(Node.string('lid', location)) e.add_child(Node.string('sn', '')) e.add_child(Node.u8('pref', 51)) e.add_child(Node.s8('stg', 4)) e.add_child(Node.s8('pside', 0)) e.add_child(Node.s16('eatime', 30)) e.add_child(Node.u8_array('ga', [127, 0, 0, 1])) e.add_child(Node.u16('gp', 10007)) e.add_child(Node.u8_array('la', [16, 0, 0, 0])) e.add_child(Node.u8('ver', 5)) lobby.add_child(Node.s32_array('friend', [])) call.add_child(lobby) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/lobby/interval") self.assert_path(resp, "response/lobby/interval_p") self.assert_path(resp, "response/lobby/eid") self.assert_path(resp, "response/lobby/e/eid") self.assert_path(resp, "response/lobby/e/mid") self.assert_path(resp, "response/lobby/e/ng") self.assert_path(resp, "response/lobby/e/uid") self.assert_path(resp, "response/lobby/e/uattr") self.assert_path(resp, "response/lobby/e/pn") self.assert_path(resp, "response/lobby/e/mg") self.assert_path(resp, "response/lobby/e/mopt") self.assert_path(resp, "response/lobby/e/tid") self.assert_path(resp, "response/lobby/e/tn") self.assert_path(resp, "response/lobby/e/topt") self.assert_path(resp, "response/lobby/e/lid") self.assert_path(resp, "response/lobby/e/sn") self.assert_path(resp, "response/lobby/e/pref") self.assert_path(resp, "response/lobby/e/stg") self.assert_path(resp, "response/lobby/e/pside") self.assert_path(resp, "response/lobby/e/eatime") self.assert_path(resp, "response/lobby/e/ga") self.assert_path(resp, "response/lobby/e/gp") self.assert_path(resp, "response/lobby/e/la") self.assert_path(resp, "response/lobby/e/ver") return resp.child_value('lobby/eid') def verify_lobby_delete(self, eid: int) -> None: call = self.call_node() lobby = Node.void('lobby') lobby.set_attribute('method', 'delete') lobby.add_child(Node.s32('eid', eid)) call.add_child(lobby) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/lobby") def verify_pzlcmt_read(self, extid: int) -> None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'pzlcmt_read') info.add_child(Node.s32('uid', extid)) info.add_child(Node.s32('tid', 0)) info.add_child(Node.s32('time', 0)) info.add_child(Node.s32('limit', 30)) call.add_child(info) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/info/comment/time") self.assert_path(resp, "response/info/c/uid") self.assert_path(resp, "response/info/c/name") self.assert_path(resp, "response/info/c/icon") self.assert_path(resp, "response/info/c/bln") self.assert_path(resp, "response/info/c/tid") self.assert_path(resp, "response/info/c/t_name") self.assert_path(resp, "response/info/c/pref") self.assert_path(resp, "response/info/c/time") self.assert_path(resp, "response/info/c/comment") self.assert_path(resp, "response/info/c/is_tweet") # Verify we posted our comment earlier found = False for child in resp.child('info').children: if child.name != 'c': continue if child.child_value('uid') == extid: name = child.child_value('name') comment = child.child_value('comment') if name != self.NAME: raise Exception('Invalid name \'{}\' returned for comment!'.format(name)) if comment != 'アメ〜〜!': raise Exception('Invalid comment \'{}\' returned for comment!'.format(comment)) found = True if not found: raise Exception('Comment we posted was not found!') def verify_pzlcmt_write(self, extid: int) -> None: call = self.call_node() info = Node.void('info') info.set_attribute('method', 'pzlcmt_write') info.add_child(Node.s32('uid', extid)) info.add_child(Node.string('name', self.NAME)) info.add_child(Node.s16('icon', 0)) info.add_child(Node.s8('bln', 0)) info.add_child(Node.s32('tid', 0)) info.add_child(Node.string('t_name', '')) info.add_child(Node.s8('pref', 51)) info.add_child(Node.s32('time', int(time.time()))) info.add_child(Node.string('comment', 'アメ〜〜!')) info.add_child(Node.bool('is_tweet', True)) call.add_child(info) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/info") def verify_jbrbcollabo_save(self, refid: str) -> None: call = self.call_node() jbrbcollabo = Node.void('jbrbcollabo') jbrbcollabo.set_attribute('method', 'save') jbrbcollabo.add_child(Node.string('ref_id', refid)) jbrbcollabo.add_child(Node.u16('cre_count', 0)) call.add_child(jbrbcollabo) # Swap with server resp = self.exchange('', call) # Verify that response is correct self.assert_path(resp, "response/jbrbcollabo") def verify(self, cardid: Optional[str]) -> None: # Verify boot sequence is okay self.verify_services_get( expected_services=[ 'pcbtracker', 'pcbevent', 'local', 'message', 'facility', 'cardmng', 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby', 'ntp', 'keepalive' ] ) paseli_enabled = self.verify_pcbtracker_alive() self.verify_message_get() self.verify_package_list() location = self.verify_facility_get() self.verify_pcbevent_put() self.verify_pcb_boot(location) self.verify_info_common() # Verify card registration and profile lookup if cardid is not None: card = cardid else: card = self.random_card() print("Generated random card ID {} for use.".format(card)) if cardid is None: self.verify_cardmng_inquire(card, msg_type='unregistered', paseli_enabled=paseli_enabled) ref_id = self.verify_cardmng_getrefid(card) if len(ref_id) != 16: raise Exception('Invalid refid \'{}\' returned when registering card'.format(ref_id)) if ref_id != self.verify_cardmng_inquire(card, msg_type='new', paseli_enabled=paseli_enabled): raise Exception('Invalid refid \'{}\' returned when querying card'.format(ref_id)) # Always get a player start, regardless of new profile or not self.verify_player_start(ref_id) self.verify_player_delete(ref_id) self.verify_player_succeed(ref_id) extid = self.verify_player_write( ref_id, location, [{ 'id': 0, 'chart': 0, 'clear_type': -1, 'achievement_rate': 0, 'score': 0, 'combo': 0, 'miss_count': 0, }] ) else: print("Skipping new card checks for existing card") ref_id = self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled) # Verify pin handling and return card handling self.verify_cardmng_authpass(ref_id, correct=True) self.verify_cardmng_authpass(ref_id, correct=False) if ref_id != self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled): raise Exception('Invalid refid \'{}\' returned when querying card'.format(ref_id)) # Verify lobby functionality self.verify_lobby_read(location, extid) eid = self.verify_lobby_entry(location, extid) self.verify_lobby_delete(eid) # Verify puzzle comment read and write self.verify_pzlcmt_write(extid) self.verify_pzlcmt_read(extid) # Verify Jubeat/ReflecBeat collabo save self.verify_jbrbcollabo_save(ref_id) if cardid is None: # Verify score saving and updating for phase in [1, 2]: if phase == 1: dummyscores = [ # An okay score on a chart { 'id': 1, 'chart': 1, 'clear_type': 2, 'achievement_rate': 7543, 'score': 432, 'combo': 123, 'miss_count': 5, }, # A good score on an easier chart of the same song { 'id': 1, 'chart': 0, 'clear_type': 4, 'achievement_rate': 9876, 'score': 543, 'combo': 543, 'miss_count': 0, }, # A bad score on a hard chart { 'id': 3, 'chart': 2, 'clear_type': 2, 'achievement_rate': 1234, 'score': 123, 'combo': 42, 'miss_count': 54, }, # A terrible score on an easy chart { 'id': 3, 'chart': 0, 'clear_type': 2, 'achievement_rate': 1024, 'score': 50, 'combo': 12, 'miss_count': 90, }, ] if phase == 2: dummyscores = [ # A better score on the same chart { 'id': 1, 'chart': 1, 'clear_type': 3, 'achievement_rate': 8765, 'score': 469, 'combo': 468, 'miss_count': 1, }, # A worse score on another same chart { 'id': 1, 'chart': 0, 'clear_type': 2, 'achievement_rate': 8765, 'score': 432, 'combo': 321, 'miss_count': 15, 'expected_score': 543, 'expected_clear_type': 4, 'expected_achievement_rate': 9876, 'expected_combo': 543, 'expected_miss_count': 0, }, ] self.verify_player_write(ref_id, location, dummyscores) scores = self.verify_player_read(ref_id, location) for expected in dummyscores: actual = None for received in scores: if received['id'] == expected['id'] and received['chart'] == expected['chart']: actual = received break if actual is None: raise Exception("Didn't find song {} chart {} in response!".format(expected['id'], expected['chart'])) if 'expected_score' in expected: expected_score = expected['expected_score'] else: expected_score = expected['score'] if 'expected_achievement_rate' in expected: expected_achievement_rate = expected['expected_achievement_rate'] else: expected_achievement_rate = expected['achievement_rate'] if 'expected_clear_type' in expected: expected_clear_type = expected['expected_clear_type'] else: expected_clear_type = expected['clear_type'] if 'expected_combo' in expected: expected_combo = expected['expected_combo'] else: expected_combo = expected['combo'] if 'expected_miss_count' in expected: expected_miss_count = expected['expected_miss_count'] else: expected_miss_count = expected['miss_count'] if actual['score'] != expected_score: raise Exception('Expected a score of \'{}\' for song \'{}\' chart \'{}\' but got score \'{}\''.format( expected_score, expected['id'], expected['chart'], actual['score'], )) if actual['achievement_rate'] != expected_achievement_rate: raise Exception('Expected an achievement rate of \'{}\' for song \'{}\' chart \'{}\' but got achievement rate \'{}\''.format( expected_achievement_rate, expected['id'], expected['chart'], actual['achievement_rate'], )) if actual['clear_type'] != expected_clear_type: raise Exception('Expected a clear_type of \'{}\' for song \'{}\' chart \'{}\' but got clear_type \'{}\''.format( expected_clear_type, expected['id'], expected['chart'], actual['clear_type'], )) if actual['combo'] != expected_combo: raise Exception('Expected a combo of \'{}\' for song \'{}\' chart \'{}\' but got combo \'{}\''.format( expected_combo, expected['id'], expected['chart'], actual['combo'], )) if actual['miss_count'] != expected_miss_count: raise Exception('Expected a miss count of \'{}\' for song \'{}\' chart \'{}\' but got miss count \'{}\''.format( expected_miss_count, expected['id'], expected['chart'], actual['miss_count'], )) # Sleep so we don't end up putting in score history on the same second time.sleep(1) else: print("Skipping score checks for existing card") # Verify ending game self.verify_player_end(ref_id) # Verify high score tables self.verify_info_ranking() # Verify paseli handling if paseli_enabled: print("PASELI enabled for this PCBID, executing PASELI checks") else: print("PASELI disabled for this PCBID, skipping PASELI checks") return sessid, balance = self.verify_eacoin_checkin(card) if balance == 0: print("Skipping PASELI consume check because card has 0 balance") else: self.verify_eacoin_consume(sessid, balance, random.randint(0, balance)) self.verify_eacoin_checkout(sessid)
flexible
{ "blob_id": "f781377a52400abd617e7f0c5529726120b78476", "index": 3426, "step-1": "<mask token>\n\n\nclass ReflecBeatColette(BaseClient):\n <mask token>\n\n def verify_pcb_boot(self, loc: str) ->None:\n call = self.call_node()\n pcb = Node.void('pcb')\n pcb.set_attribute('method', 'boot')\n pcb.add_child(Node.string('lid', loc))\n call.add_child(pcb)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/pcb/sinfo/nm')\n self.assert_path(resp, 'response/pcb/sinfo/cl_enbl')\n self.assert_path(resp, 'response/pcb/sinfo/cl_h')\n self.assert_path(resp, 'response/pcb/sinfo/cl_m')\n <mask token>\n\n def verify_info_ranking(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'ranking')\n info.add_child(Node.s32('ver', 0))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/ver')\n self.assert_path(resp, 'response/info/ranking/weekly/bt')\n self.assert_path(resp, 'response/info/ranking/weekly/et')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/monthly/bt')\n self.assert_path(resp, 'response/info/ranking/monthly/et')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/total/bt')\n self.assert_path(resp, 'response/info/ranking/total/et')\n self.assert_path(resp, 'response/info/ranking/total/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/total/new/d/cnt')\n <mask token>\n\n def verify_player_delete(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'delete')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_end(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'end')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_succeed(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'succeed')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/name')\n self.assert_path(resp, 'response/player/lv')\n self.assert_path(resp, 'response/player/exp')\n self.assert_path(resp, 'response/player/grd')\n self.assert_path(resp, 'response/player/ap')\n self.assert_path(resp, 'response/player/released')\n self.assert_path(resp, 'response/player/mrecord')\n\n def verify_player_read(self, refid: str, location: str) ->List[Dict[str,\n int]]:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'read')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.string('lid', location))\n player.add_child(Node.s16('ver', 5))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/pdata/account/usrid')\n self.assert_path(resp, 'response/player/pdata/account/tpc')\n self.assert_path(resp, 'response/player/pdata/account/dpc')\n self.assert_path(resp, 'response/player/pdata/account/crd')\n self.assert_path(resp, 'response/player/pdata/account/brd')\n self.assert_path(resp, 'response/player/pdata/account/tdc')\n self.assert_path(resp, 'response/player/pdata/account/intrvld')\n self.assert_path(resp, 'response/player/pdata/account/ver')\n self.assert_path(resp, 'response/player/pdata/account/pst')\n self.assert_path(resp, 'response/player/pdata/account/st')\n self.assert_path(resp, 'response/player/pdata/base/name')\n self.assert_path(resp, 'response/player/pdata/base/exp')\n self.assert_path(resp, 'response/player/pdata/base/lv')\n self.assert_path(resp, 'response/player/pdata/base/mg')\n self.assert_path(resp, 'response/player/pdata/base/ap')\n self.assert_path(resp, 'response/player/pdata/base/tid')\n self.assert_path(resp, 'response/player/pdata/base/tname')\n self.assert_path(resp, 'response/player/pdata/base/cmnt')\n self.assert_path(resp, 'response/player/pdata/base/uattr')\n self.assert_path(resp, 'response/player/pdata/base/hidden_param')\n self.assert_path(resp, 'response/player/pdata/base/tbs')\n self.assert_path(resp, 'response/player/pdata/base/tbs_r')\n self.assert_path(resp, 'response/player/pdata/rival')\n self.assert_path(resp, 'response/player/pdata/fav_music_slot')\n self.assert_path(resp, 'response/player/pdata/custom')\n self.assert_path(resp, 'response/player/pdata/config')\n self.assert_path(resp, 'response/player/pdata/stamp')\n self.assert_path(resp, 'response/player/pdata/released')\n self.assert_path(resp, 'response/player/pdata/record')\n if resp.child_value('player/pdata/base/name') != self.NAME:\n raise Exception('Invalid name {} returned on profile read!'.\n format(resp.child_value('player/pdata/base/name')))\n scores = []\n for child in resp.child('player/pdata/record').children:\n if child.name != 'rec':\n continue\n score = {'id': child.child_value('mid'), 'chart': child.\n child_value('ntgrd'), 'clear_type': child.child_value('ct'),\n 'achievement_rate': child.child_value('ar'), 'score': child\n .child_value('scr'), 'combo': child.child_value('cmb'),\n 'miss_count': child.child_value('ms')}\n scores.append(score)\n return scores\n\n def verify_player_write(self, refid: str, loc: str, scores: List[Dict[\n str, int]]) ->int:\n call = self.call_node()\n player = Node.void('player')\n call.add_child(player)\n player.set_attribute('method', 'write')\n pdata = Node.void('pdata')\n player.add_child(pdata)\n account = Node.void('account')\n pdata.add_child(account)\n account.add_child(Node.s32('usrid', 0))\n account.add_child(Node.s32('plyid', 0))\n account.add_child(Node.s32('tpc', 1))\n account.add_child(Node.s32('dpc', 1))\n account.add_child(Node.s32('crd', 1))\n account.add_child(Node.s32('brd', 1))\n account.add_child(Node.s32('tdc', 1))\n account.add_child(Node.string('rid', refid))\n account.add_child(Node.string('lid', loc))\n account.add_child(Node.u8('mode', 0))\n account.add_child(Node.s16('ver', 5))\n account.add_child(Node.bool('pp', True))\n account.add_child(Node.bool('ps', True))\n account.add_child(Node.s16('pay', 0))\n account.add_child(Node.s16('pay_pc', 0))\n account.add_child(Node.u64('st', int(time.time() * 1000)))\n base = Node.void('base')\n pdata.add_child(base)\n base.add_child(Node.string('name', self.NAME))\n base.add_child(Node.s32('exp', 0))\n base.add_child(Node.s32('lv', 1))\n base.add_child(Node.s32('mg', -1))\n base.add_child(Node.s32('ap', -1))\n base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))\n base.add_child(Node.bool('is_tut', True))\n stglog = Node.void('stglog')\n pdata.add_child(stglog)\n index = 0\n for score in scores:\n log = Node.void('log')\n stglog.add_child(log)\n log.add_child(Node.s8('stg', index))\n log.add_child(Node.s16('mid', score['id']))\n log.add_child(Node.s8('ng', score['chart']))\n log.add_child(Node.s8('col', 0))\n log.add_child(Node.s8('mt', 7))\n log.add_child(Node.s8('rt', 0))\n log.add_child(Node.s8('ct', score['clear_type']))\n log.add_child(Node.s16('grd', 0))\n log.add_child(Node.s16('ar', score['achievement_rate']))\n log.add_child(Node.s16('sc', score['score']))\n log.add_child(Node.s16('jt_jst', 0))\n log.add_child(Node.s16('jt_grt', 0))\n log.add_child(Node.s16('jt_gd', 0))\n log.add_child(Node.s16('jt_ms', score['miss_count']))\n log.add_child(Node.s16('jt_jr', 0))\n log.add_child(Node.s16('cmb', score['combo']))\n log.add_child(Node.s16('exp', 0))\n log.add_child(Node.s32('r_uid', 0))\n log.add_child(Node.s32('r_plyid', 0))\n log.add_child(Node.s8('r_stg', 0))\n log.add_child(Node.s8('r_ct', -1))\n log.add_child(Node.s16('r_sc', 0))\n log.add_child(Node.s16('r_grd', 0))\n log.add_child(Node.s16('r_ar', 0))\n log.add_child(Node.s8('r_cpuid', -1))\n log.add_child(Node.s32('time', int(time.time())))\n log.add_child(Node.s8('decide', 0))\n index = index + 1\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/uid')\n return resp.child_value('player/uid')\n <mask token>\n\n def verify_lobby_entry(self, location: str, extid: int) ->int:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'entry')\n e = Node.void('e')\n lobby.add_child(e)\n e.add_child(Node.s32('eid', 0))\n e.add_child(Node.u16('mid', 79))\n e.add_child(Node.u8('ng', 0))\n e.add_child(Node.s32('uid', extid))\n e.add_child(Node.s32('uattr', 0))\n e.add_child(Node.string('pn', self.NAME))\n e.add_child(Node.s16('mg', 255))\n e.add_child(Node.s32('mopt', 0))\n e.add_child(Node.s32('tid', 0))\n e.add_child(Node.string('tn', ''))\n e.add_child(Node.s32('topt', 0))\n e.add_child(Node.string('lid', location))\n e.add_child(Node.string('sn', ''))\n e.add_child(Node.u8('pref', 51))\n e.add_child(Node.s8('stg', 4))\n e.add_child(Node.s8('pside', 0))\n e.add_child(Node.s16('eatime', 30))\n e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n e.add_child(Node.u16('gp', 10007))\n e.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n e.add_child(Node.u8('ver', 5))\n lobby.add_child(Node.s32_array('friend', []))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby/interval')\n self.assert_path(resp, 'response/lobby/interval_p')\n self.assert_path(resp, 'response/lobby/eid')\n self.assert_path(resp, 'response/lobby/e/eid')\n self.assert_path(resp, 'response/lobby/e/mid')\n self.assert_path(resp, 'response/lobby/e/ng')\n self.assert_path(resp, 'response/lobby/e/uid')\n self.assert_path(resp, 'response/lobby/e/uattr')\n self.assert_path(resp, 'response/lobby/e/pn')\n self.assert_path(resp, 'response/lobby/e/mg')\n self.assert_path(resp, 'response/lobby/e/mopt')\n self.assert_path(resp, 'response/lobby/e/tid')\n self.assert_path(resp, 'response/lobby/e/tn')\n self.assert_path(resp, 'response/lobby/e/topt')\n self.assert_path(resp, 'response/lobby/e/lid')\n self.assert_path(resp, 'response/lobby/e/sn')\n self.assert_path(resp, 'response/lobby/e/pref')\n self.assert_path(resp, 'response/lobby/e/stg')\n self.assert_path(resp, 'response/lobby/e/pside')\n self.assert_path(resp, 'response/lobby/e/eatime')\n self.assert_path(resp, 'response/lobby/e/ga')\n self.assert_path(resp, 'response/lobby/e/gp')\n self.assert_path(resp, 'response/lobby/e/la')\n self.assert_path(resp, 'response/lobby/e/ver')\n return resp.child_value('lobby/eid')\n\n def verify_lobby_delete(self, eid: int) ->None:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'delete')\n lobby.add_child(Node.s32('eid', eid))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby')\n\n def verify_pzlcmt_read(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_read')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.s32('time', 0))\n info.add_child(Node.s32('limit', 30))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/comment/time')\n self.assert_path(resp, 'response/info/c/uid')\n self.assert_path(resp, 'response/info/c/name')\n self.assert_path(resp, 'response/info/c/icon')\n self.assert_path(resp, 'response/info/c/bln')\n self.assert_path(resp, 'response/info/c/tid')\n self.assert_path(resp, 'response/info/c/t_name')\n self.assert_path(resp, 'response/info/c/pref')\n self.assert_path(resp, 'response/info/c/time')\n self.assert_path(resp, 'response/info/c/comment')\n self.assert_path(resp, 'response/info/c/is_tweet')\n found = False\n for child in resp.child('info').children:\n if child.name != 'c':\n continue\n if child.child_value('uid') == extid:\n name = child.child_value('name')\n comment = child.child_value('comment')\n if name != self.NAME:\n raise Exception(\"Invalid name '{}' returned for comment!\"\n .format(name))\n if comment != 'アメ〜〜!':\n raise Exception(\n \"Invalid comment '{}' returned for comment!\".format\n (comment))\n found = True\n if not found:\n raise Exception('Comment we posted was not found!')\n <mask token>\n\n def verify_jbrbcollabo_save(self, refid: str) ->None:\n call = self.call_node()\n jbrbcollabo = Node.void('jbrbcollabo')\n jbrbcollabo.set_attribute('method', 'save')\n jbrbcollabo.add_child(Node.string('ref_id', refid))\n jbrbcollabo.add_child(Node.u16('cre_count', 0))\n call.add_child(jbrbcollabo)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/jbrbcollabo')\n\n def verify(self, cardid: Optional[str]) ->None:\n self.verify_services_get(expected_services=['pcbtracker',\n 'pcbevent', 'local', 'message', 'facility', 'cardmng',\n 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby',\n 'ntp', 'keepalive'])\n paseli_enabled = self.verify_pcbtracker_alive()\n self.verify_message_get()\n self.verify_package_list()\n location = self.verify_facility_get()\n self.verify_pcbevent_put()\n self.verify_pcb_boot(location)\n self.verify_info_common()\n if cardid is not None:\n card = cardid\n else:\n card = self.random_card()\n print('Generated random card ID {} for use.'.format(card))\n if cardid is None:\n self.verify_cardmng_inquire(card, msg_type='unregistered',\n paseli_enabled=paseli_enabled)\n ref_id = self.verify_cardmng_getrefid(card)\n if len(ref_id) != 16:\n raise Exception(\n \"Invalid refid '{}' returned when registering card\".\n format(ref_id))\n if ref_id != self.verify_cardmng_inquire(card, msg_type='new',\n paseli_enabled=paseli_enabled):\n raise Exception(\n \"Invalid refid '{}' returned when querying card\".format\n (ref_id))\n self.verify_player_start(ref_id)\n self.verify_player_delete(ref_id)\n self.verify_player_succeed(ref_id)\n extid = self.verify_player_write(ref_id, location, [{'id': 0,\n 'chart': 0, 'clear_type': -1, 'achievement_rate': 0,\n 'score': 0, 'combo': 0, 'miss_count': 0}])\n else:\n print('Skipping new card checks for existing card')\n ref_id = self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled)\n self.verify_cardmng_authpass(ref_id, correct=True)\n self.verify_cardmng_authpass(ref_id, correct=False)\n if ref_id != self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled):\n raise Exception(\"Invalid refid '{}' returned when querying card\"\n .format(ref_id))\n self.verify_lobby_read(location, extid)\n eid = self.verify_lobby_entry(location, extid)\n self.verify_lobby_delete(eid)\n self.verify_pzlcmt_write(extid)\n self.verify_pzlcmt_read(extid)\n self.verify_jbrbcollabo_save(ref_id)\n if cardid is None:\n for phase in [1, 2]:\n if phase == 1:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2,\n 'achievement_rate': 7543, 'score': 432, 'combo': \n 123, 'miss_count': 5}, {'id': 1, 'chart': 0,\n 'clear_type': 4, 'achievement_rate': 9876, 'score':\n 543, 'combo': 543, 'miss_count': 0}, {'id': 3,\n 'chart': 2, 'clear_type': 2, 'achievement_rate': \n 1234, 'score': 123, 'combo': 42, 'miss_count': 54},\n {'id': 3, 'chart': 0, 'clear_type': 2,\n 'achievement_rate': 1024, 'score': 50, 'combo': 12,\n 'miss_count': 90}]\n if phase == 2:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3,\n 'achievement_rate': 8765, 'score': 469, 'combo': \n 468, 'miss_count': 1}, {'id': 1, 'chart': 0,\n 'clear_type': 2, 'achievement_rate': 8765, 'score':\n 432, 'combo': 321, 'miss_count': 15,\n 'expected_score': 543, 'expected_clear_type': 4,\n 'expected_achievement_rate': 9876, 'expected_combo':\n 543, 'expected_miss_count': 0}]\n self.verify_player_write(ref_id, location, dummyscores)\n scores = self.verify_player_read(ref_id, location)\n for expected in dummyscores:\n actual = None\n for received in scores:\n if received['id'] == expected['id'] and received[\n 'chart'] == expected['chart']:\n actual = received\n break\n if actual is None:\n raise Exception(\n \"Didn't find song {} chart {} in response!\".\n format(expected['id'], expected['chart']))\n if 'expected_score' in expected:\n expected_score = expected['expected_score']\n else:\n expected_score = expected['score']\n if 'expected_achievement_rate' in expected:\n expected_achievement_rate = expected[\n 'expected_achievement_rate']\n else:\n expected_achievement_rate = expected['achievement_rate'\n ]\n if 'expected_clear_type' in expected:\n expected_clear_type = expected['expected_clear_type']\n else:\n expected_clear_type = expected['clear_type']\n if 'expected_combo' in expected:\n expected_combo = expected['expected_combo']\n else:\n expected_combo = expected['combo']\n if 'expected_miss_count' in expected:\n expected_miss_count = expected['expected_miss_count']\n else:\n expected_miss_count = expected['miss_count']\n if actual['score'] != expected_score:\n raise Exception(\n \"Expected a score of '{}' for song '{}' chart '{}' but got score '{}'\"\n .format(expected_score, expected['id'],\n expected['chart'], actual['score']))\n if actual['achievement_rate'] != expected_achievement_rate:\n raise Exception(\n \"Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'\"\n .format(expected_achievement_rate, expected[\n 'id'], expected['chart'], actual[\n 'achievement_rate']))\n if actual['clear_type'] != expected_clear_type:\n raise Exception(\n \"Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'\"\n .format(expected_clear_type, expected['id'],\n expected['chart'], actual['clear_type']))\n if actual['combo'] != expected_combo:\n raise Exception(\n \"Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'\"\n .format(expected_combo, expected['id'],\n expected['chart'], actual['combo']))\n if actual['miss_count'] != expected_miss_count:\n raise Exception(\n \"Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'\"\n .format(expected_miss_count, expected['id'],\n expected['chart'], actual['miss_count']))\n time.sleep(1)\n else:\n print('Skipping score checks for existing card')\n self.verify_player_end(ref_id)\n self.verify_info_ranking()\n if paseli_enabled:\n print('PASELI enabled for this PCBID, executing PASELI checks')\n else:\n print('PASELI disabled for this PCBID, skipping PASELI checks')\n return\n sessid, balance = self.verify_eacoin_checkin(card)\n if balance == 0:\n print('Skipping PASELI consume check because card has 0 balance')\n else:\n self.verify_eacoin_consume(sessid, balance, random.randint(0,\n balance))\n self.verify_eacoin_checkout(sessid)\n", "step-2": "<mask token>\n\n\nclass ReflecBeatColette(BaseClient):\n <mask token>\n\n def verify_pcb_boot(self, loc: str) ->None:\n call = self.call_node()\n pcb = Node.void('pcb')\n pcb.set_attribute('method', 'boot')\n pcb.add_child(Node.string('lid', loc))\n call.add_child(pcb)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/pcb/sinfo/nm')\n self.assert_path(resp, 'response/pcb/sinfo/cl_enbl')\n self.assert_path(resp, 'response/pcb/sinfo/cl_h')\n self.assert_path(resp, 'response/pcb/sinfo/cl_m')\n\n def verify_info_common(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'common')\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/event_ctrl')\n self.assert_path(resp, 'response/info/item_lock_ctrl')\n\n def verify_info_ranking(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'ranking')\n info.add_child(Node.s32('ver', 0))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/ver')\n self.assert_path(resp, 'response/info/ranking/weekly/bt')\n self.assert_path(resp, 'response/info/ranking/weekly/et')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/monthly/bt')\n self.assert_path(resp, 'response/info/ranking/monthly/et')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/total/bt')\n self.assert_path(resp, 'response/info/ranking/total/et')\n self.assert_path(resp, 'response/info/ranking/total/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/total/new/d/cnt')\n <mask token>\n\n def verify_player_delete(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'delete')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_end(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'end')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_succeed(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'succeed')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/name')\n self.assert_path(resp, 'response/player/lv')\n self.assert_path(resp, 'response/player/exp')\n self.assert_path(resp, 'response/player/grd')\n self.assert_path(resp, 'response/player/ap')\n self.assert_path(resp, 'response/player/released')\n self.assert_path(resp, 'response/player/mrecord')\n\n def verify_player_read(self, refid: str, location: str) ->List[Dict[str,\n int]]:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'read')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.string('lid', location))\n player.add_child(Node.s16('ver', 5))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/pdata/account/usrid')\n self.assert_path(resp, 'response/player/pdata/account/tpc')\n self.assert_path(resp, 'response/player/pdata/account/dpc')\n self.assert_path(resp, 'response/player/pdata/account/crd')\n self.assert_path(resp, 'response/player/pdata/account/brd')\n self.assert_path(resp, 'response/player/pdata/account/tdc')\n self.assert_path(resp, 'response/player/pdata/account/intrvld')\n self.assert_path(resp, 'response/player/pdata/account/ver')\n self.assert_path(resp, 'response/player/pdata/account/pst')\n self.assert_path(resp, 'response/player/pdata/account/st')\n self.assert_path(resp, 'response/player/pdata/base/name')\n self.assert_path(resp, 'response/player/pdata/base/exp')\n self.assert_path(resp, 'response/player/pdata/base/lv')\n self.assert_path(resp, 'response/player/pdata/base/mg')\n self.assert_path(resp, 'response/player/pdata/base/ap')\n self.assert_path(resp, 'response/player/pdata/base/tid')\n self.assert_path(resp, 'response/player/pdata/base/tname')\n self.assert_path(resp, 'response/player/pdata/base/cmnt')\n self.assert_path(resp, 'response/player/pdata/base/uattr')\n self.assert_path(resp, 'response/player/pdata/base/hidden_param')\n self.assert_path(resp, 'response/player/pdata/base/tbs')\n self.assert_path(resp, 'response/player/pdata/base/tbs_r')\n self.assert_path(resp, 'response/player/pdata/rival')\n self.assert_path(resp, 'response/player/pdata/fav_music_slot')\n self.assert_path(resp, 'response/player/pdata/custom')\n self.assert_path(resp, 'response/player/pdata/config')\n self.assert_path(resp, 'response/player/pdata/stamp')\n self.assert_path(resp, 'response/player/pdata/released')\n self.assert_path(resp, 'response/player/pdata/record')\n if resp.child_value('player/pdata/base/name') != self.NAME:\n raise Exception('Invalid name {} returned on profile read!'.\n format(resp.child_value('player/pdata/base/name')))\n scores = []\n for child in resp.child('player/pdata/record').children:\n if child.name != 'rec':\n continue\n score = {'id': child.child_value('mid'), 'chart': child.\n child_value('ntgrd'), 'clear_type': child.child_value('ct'),\n 'achievement_rate': child.child_value('ar'), 'score': child\n .child_value('scr'), 'combo': child.child_value('cmb'),\n 'miss_count': child.child_value('ms')}\n scores.append(score)\n return scores\n\n def verify_player_write(self, refid: str, loc: str, scores: List[Dict[\n str, int]]) ->int:\n call = self.call_node()\n player = Node.void('player')\n call.add_child(player)\n player.set_attribute('method', 'write')\n pdata = Node.void('pdata')\n player.add_child(pdata)\n account = Node.void('account')\n pdata.add_child(account)\n account.add_child(Node.s32('usrid', 0))\n account.add_child(Node.s32('plyid', 0))\n account.add_child(Node.s32('tpc', 1))\n account.add_child(Node.s32('dpc', 1))\n account.add_child(Node.s32('crd', 1))\n account.add_child(Node.s32('brd', 1))\n account.add_child(Node.s32('tdc', 1))\n account.add_child(Node.string('rid', refid))\n account.add_child(Node.string('lid', loc))\n account.add_child(Node.u8('mode', 0))\n account.add_child(Node.s16('ver', 5))\n account.add_child(Node.bool('pp', True))\n account.add_child(Node.bool('ps', True))\n account.add_child(Node.s16('pay', 0))\n account.add_child(Node.s16('pay_pc', 0))\n account.add_child(Node.u64('st', int(time.time() * 1000)))\n base = Node.void('base')\n pdata.add_child(base)\n base.add_child(Node.string('name', self.NAME))\n base.add_child(Node.s32('exp', 0))\n base.add_child(Node.s32('lv', 1))\n base.add_child(Node.s32('mg', -1))\n base.add_child(Node.s32('ap', -1))\n base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))\n base.add_child(Node.bool('is_tut', True))\n stglog = Node.void('stglog')\n pdata.add_child(stglog)\n index = 0\n for score in scores:\n log = Node.void('log')\n stglog.add_child(log)\n log.add_child(Node.s8('stg', index))\n log.add_child(Node.s16('mid', score['id']))\n log.add_child(Node.s8('ng', score['chart']))\n log.add_child(Node.s8('col', 0))\n log.add_child(Node.s8('mt', 7))\n log.add_child(Node.s8('rt', 0))\n log.add_child(Node.s8('ct', score['clear_type']))\n log.add_child(Node.s16('grd', 0))\n log.add_child(Node.s16('ar', score['achievement_rate']))\n log.add_child(Node.s16('sc', score['score']))\n log.add_child(Node.s16('jt_jst', 0))\n log.add_child(Node.s16('jt_grt', 0))\n log.add_child(Node.s16('jt_gd', 0))\n log.add_child(Node.s16('jt_ms', score['miss_count']))\n log.add_child(Node.s16('jt_jr', 0))\n log.add_child(Node.s16('cmb', score['combo']))\n log.add_child(Node.s16('exp', 0))\n log.add_child(Node.s32('r_uid', 0))\n log.add_child(Node.s32('r_plyid', 0))\n log.add_child(Node.s8('r_stg', 0))\n log.add_child(Node.s8('r_ct', -1))\n log.add_child(Node.s16('r_sc', 0))\n log.add_child(Node.s16('r_grd', 0))\n log.add_child(Node.s16('r_ar', 0))\n log.add_child(Node.s8('r_cpuid', -1))\n log.add_child(Node.s32('time', int(time.time())))\n log.add_child(Node.s8('decide', 0))\n index = index + 1\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/uid')\n return resp.child_value('player/uid')\n <mask token>\n\n def verify_lobby_entry(self, location: str, extid: int) ->int:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'entry')\n e = Node.void('e')\n lobby.add_child(e)\n e.add_child(Node.s32('eid', 0))\n e.add_child(Node.u16('mid', 79))\n e.add_child(Node.u8('ng', 0))\n e.add_child(Node.s32('uid', extid))\n e.add_child(Node.s32('uattr', 0))\n e.add_child(Node.string('pn', self.NAME))\n e.add_child(Node.s16('mg', 255))\n e.add_child(Node.s32('mopt', 0))\n e.add_child(Node.s32('tid', 0))\n e.add_child(Node.string('tn', ''))\n e.add_child(Node.s32('topt', 0))\n e.add_child(Node.string('lid', location))\n e.add_child(Node.string('sn', ''))\n e.add_child(Node.u8('pref', 51))\n e.add_child(Node.s8('stg', 4))\n e.add_child(Node.s8('pside', 0))\n e.add_child(Node.s16('eatime', 30))\n e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n e.add_child(Node.u16('gp', 10007))\n e.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n e.add_child(Node.u8('ver', 5))\n lobby.add_child(Node.s32_array('friend', []))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby/interval')\n self.assert_path(resp, 'response/lobby/interval_p')\n self.assert_path(resp, 'response/lobby/eid')\n self.assert_path(resp, 'response/lobby/e/eid')\n self.assert_path(resp, 'response/lobby/e/mid')\n self.assert_path(resp, 'response/lobby/e/ng')\n self.assert_path(resp, 'response/lobby/e/uid')\n self.assert_path(resp, 'response/lobby/e/uattr')\n self.assert_path(resp, 'response/lobby/e/pn')\n self.assert_path(resp, 'response/lobby/e/mg')\n self.assert_path(resp, 'response/lobby/e/mopt')\n self.assert_path(resp, 'response/lobby/e/tid')\n self.assert_path(resp, 'response/lobby/e/tn')\n self.assert_path(resp, 'response/lobby/e/topt')\n self.assert_path(resp, 'response/lobby/e/lid')\n self.assert_path(resp, 'response/lobby/e/sn')\n self.assert_path(resp, 'response/lobby/e/pref')\n self.assert_path(resp, 'response/lobby/e/stg')\n self.assert_path(resp, 'response/lobby/e/pside')\n self.assert_path(resp, 'response/lobby/e/eatime')\n self.assert_path(resp, 'response/lobby/e/ga')\n self.assert_path(resp, 'response/lobby/e/gp')\n self.assert_path(resp, 'response/lobby/e/la')\n self.assert_path(resp, 'response/lobby/e/ver')\n return resp.child_value('lobby/eid')\n\n def verify_lobby_delete(self, eid: int) ->None:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'delete')\n lobby.add_child(Node.s32('eid', eid))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby')\n\n def verify_pzlcmt_read(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_read')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.s32('time', 0))\n info.add_child(Node.s32('limit', 30))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/comment/time')\n self.assert_path(resp, 'response/info/c/uid')\n self.assert_path(resp, 'response/info/c/name')\n self.assert_path(resp, 'response/info/c/icon')\n self.assert_path(resp, 'response/info/c/bln')\n self.assert_path(resp, 'response/info/c/tid')\n self.assert_path(resp, 'response/info/c/t_name')\n self.assert_path(resp, 'response/info/c/pref')\n self.assert_path(resp, 'response/info/c/time')\n self.assert_path(resp, 'response/info/c/comment')\n self.assert_path(resp, 'response/info/c/is_tweet')\n found = False\n for child in resp.child('info').children:\n if child.name != 'c':\n continue\n if child.child_value('uid') == extid:\n name = child.child_value('name')\n comment = child.child_value('comment')\n if name != self.NAME:\n raise Exception(\"Invalid name '{}' returned for comment!\"\n .format(name))\n if comment != 'アメ〜〜!':\n raise Exception(\n \"Invalid comment '{}' returned for comment!\".format\n (comment))\n found = True\n if not found:\n raise Exception('Comment we posted was not found!')\n <mask token>\n\n def verify_jbrbcollabo_save(self, refid: str) ->None:\n call = self.call_node()\n jbrbcollabo = Node.void('jbrbcollabo')\n jbrbcollabo.set_attribute('method', 'save')\n jbrbcollabo.add_child(Node.string('ref_id', refid))\n jbrbcollabo.add_child(Node.u16('cre_count', 0))\n call.add_child(jbrbcollabo)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/jbrbcollabo')\n\n def verify(self, cardid: Optional[str]) ->None:\n self.verify_services_get(expected_services=['pcbtracker',\n 'pcbevent', 'local', 'message', 'facility', 'cardmng',\n 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby',\n 'ntp', 'keepalive'])\n paseli_enabled = self.verify_pcbtracker_alive()\n self.verify_message_get()\n self.verify_package_list()\n location = self.verify_facility_get()\n self.verify_pcbevent_put()\n self.verify_pcb_boot(location)\n self.verify_info_common()\n if cardid is not None:\n card = cardid\n else:\n card = self.random_card()\n print('Generated random card ID {} for use.'.format(card))\n if cardid is None:\n self.verify_cardmng_inquire(card, msg_type='unregistered',\n paseli_enabled=paseli_enabled)\n ref_id = self.verify_cardmng_getrefid(card)\n if len(ref_id) != 16:\n raise Exception(\n \"Invalid refid '{}' returned when registering card\".\n format(ref_id))\n if ref_id != self.verify_cardmng_inquire(card, msg_type='new',\n paseli_enabled=paseli_enabled):\n raise Exception(\n \"Invalid refid '{}' returned when querying card\".format\n (ref_id))\n self.verify_player_start(ref_id)\n self.verify_player_delete(ref_id)\n self.verify_player_succeed(ref_id)\n extid = self.verify_player_write(ref_id, location, [{'id': 0,\n 'chart': 0, 'clear_type': -1, 'achievement_rate': 0,\n 'score': 0, 'combo': 0, 'miss_count': 0}])\n else:\n print('Skipping new card checks for existing card')\n ref_id = self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled)\n self.verify_cardmng_authpass(ref_id, correct=True)\n self.verify_cardmng_authpass(ref_id, correct=False)\n if ref_id != self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled):\n raise Exception(\"Invalid refid '{}' returned when querying card\"\n .format(ref_id))\n self.verify_lobby_read(location, extid)\n eid = self.verify_lobby_entry(location, extid)\n self.verify_lobby_delete(eid)\n self.verify_pzlcmt_write(extid)\n self.verify_pzlcmt_read(extid)\n self.verify_jbrbcollabo_save(ref_id)\n if cardid is None:\n for phase in [1, 2]:\n if phase == 1:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2,\n 'achievement_rate': 7543, 'score': 432, 'combo': \n 123, 'miss_count': 5}, {'id': 1, 'chart': 0,\n 'clear_type': 4, 'achievement_rate': 9876, 'score':\n 543, 'combo': 543, 'miss_count': 0}, {'id': 3,\n 'chart': 2, 'clear_type': 2, 'achievement_rate': \n 1234, 'score': 123, 'combo': 42, 'miss_count': 54},\n {'id': 3, 'chart': 0, 'clear_type': 2,\n 'achievement_rate': 1024, 'score': 50, 'combo': 12,\n 'miss_count': 90}]\n if phase == 2:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3,\n 'achievement_rate': 8765, 'score': 469, 'combo': \n 468, 'miss_count': 1}, {'id': 1, 'chart': 0,\n 'clear_type': 2, 'achievement_rate': 8765, 'score':\n 432, 'combo': 321, 'miss_count': 15,\n 'expected_score': 543, 'expected_clear_type': 4,\n 'expected_achievement_rate': 9876, 'expected_combo':\n 543, 'expected_miss_count': 0}]\n self.verify_player_write(ref_id, location, dummyscores)\n scores = self.verify_player_read(ref_id, location)\n for expected in dummyscores:\n actual = None\n for received in scores:\n if received['id'] == expected['id'] and received[\n 'chart'] == expected['chart']:\n actual = received\n break\n if actual is None:\n raise Exception(\n \"Didn't find song {} chart {} in response!\".\n format(expected['id'], expected['chart']))\n if 'expected_score' in expected:\n expected_score = expected['expected_score']\n else:\n expected_score = expected['score']\n if 'expected_achievement_rate' in expected:\n expected_achievement_rate = expected[\n 'expected_achievement_rate']\n else:\n expected_achievement_rate = expected['achievement_rate'\n ]\n if 'expected_clear_type' in expected:\n expected_clear_type = expected['expected_clear_type']\n else:\n expected_clear_type = expected['clear_type']\n if 'expected_combo' in expected:\n expected_combo = expected['expected_combo']\n else:\n expected_combo = expected['combo']\n if 'expected_miss_count' in expected:\n expected_miss_count = expected['expected_miss_count']\n else:\n expected_miss_count = expected['miss_count']\n if actual['score'] != expected_score:\n raise Exception(\n \"Expected a score of '{}' for song '{}' chart '{}' but got score '{}'\"\n .format(expected_score, expected['id'],\n expected['chart'], actual['score']))\n if actual['achievement_rate'] != expected_achievement_rate:\n raise Exception(\n \"Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'\"\n .format(expected_achievement_rate, expected[\n 'id'], expected['chart'], actual[\n 'achievement_rate']))\n if actual['clear_type'] != expected_clear_type:\n raise Exception(\n \"Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'\"\n .format(expected_clear_type, expected['id'],\n expected['chart'], actual['clear_type']))\n if actual['combo'] != expected_combo:\n raise Exception(\n \"Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'\"\n .format(expected_combo, expected['id'],\n expected['chart'], actual['combo']))\n if actual['miss_count'] != expected_miss_count:\n raise Exception(\n \"Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'\"\n .format(expected_miss_count, expected['id'],\n expected['chart'], actual['miss_count']))\n time.sleep(1)\n else:\n print('Skipping score checks for existing card')\n self.verify_player_end(ref_id)\n self.verify_info_ranking()\n if paseli_enabled:\n print('PASELI enabled for this PCBID, executing PASELI checks')\n else:\n print('PASELI disabled for this PCBID, skipping PASELI checks')\n return\n sessid, balance = self.verify_eacoin_checkin(card)\n if balance == 0:\n print('Skipping PASELI consume check because card has 0 balance')\n else:\n self.verify_eacoin_consume(sessid, balance, random.randint(0,\n balance))\n self.verify_eacoin_checkout(sessid)\n", "step-3": "<mask token>\n\n\nclass ReflecBeatColette(BaseClient):\n <mask token>\n\n def verify_pcb_boot(self, loc: str) ->None:\n call = self.call_node()\n pcb = Node.void('pcb')\n pcb.set_attribute('method', 'boot')\n pcb.add_child(Node.string('lid', loc))\n call.add_child(pcb)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/pcb/sinfo/nm')\n self.assert_path(resp, 'response/pcb/sinfo/cl_enbl')\n self.assert_path(resp, 'response/pcb/sinfo/cl_h')\n self.assert_path(resp, 'response/pcb/sinfo/cl_m')\n\n def verify_info_common(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'common')\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/event_ctrl')\n self.assert_path(resp, 'response/info/item_lock_ctrl')\n\n def verify_info_ranking(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'ranking')\n info.add_child(Node.s32('ver', 0))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/ver')\n self.assert_path(resp, 'response/info/ranking/weekly/bt')\n self.assert_path(resp, 'response/info/ranking/weekly/et')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/monthly/bt')\n self.assert_path(resp, 'response/info/ranking/monthly/et')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/total/bt')\n self.assert_path(resp, 'response/info/ranking/total/et')\n self.assert_path(resp, 'response/info/ranking/total/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/total/new/d/cnt')\n\n def verify_player_start(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'start')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n player.add_child(Node.u16('gp', 10573))\n player.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/plyid')\n self.assert_path(resp, 'response/player/start_time')\n self.assert_path(resp, 'response/player/event_ctrl')\n self.assert_path(resp, 'response/player/item_lock_ctrl')\n self.assert_path(resp, 'response/player/lincle_link_4')\n self.assert_path(resp, 'response/player/jbrbcollabo')\n self.assert_path(resp, 'response/player/tricolettepark')\n\n def verify_player_delete(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'delete')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_end(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'end')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_succeed(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'succeed')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/name')\n self.assert_path(resp, 'response/player/lv')\n self.assert_path(resp, 'response/player/exp')\n self.assert_path(resp, 'response/player/grd')\n self.assert_path(resp, 'response/player/ap')\n self.assert_path(resp, 'response/player/released')\n self.assert_path(resp, 'response/player/mrecord')\n\n def verify_player_read(self, refid: str, location: str) ->List[Dict[str,\n int]]:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'read')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.string('lid', location))\n player.add_child(Node.s16('ver', 5))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/pdata/account/usrid')\n self.assert_path(resp, 'response/player/pdata/account/tpc')\n self.assert_path(resp, 'response/player/pdata/account/dpc')\n self.assert_path(resp, 'response/player/pdata/account/crd')\n self.assert_path(resp, 'response/player/pdata/account/brd')\n self.assert_path(resp, 'response/player/pdata/account/tdc')\n self.assert_path(resp, 'response/player/pdata/account/intrvld')\n self.assert_path(resp, 'response/player/pdata/account/ver')\n self.assert_path(resp, 'response/player/pdata/account/pst')\n self.assert_path(resp, 'response/player/pdata/account/st')\n self.assert_path(resp, 'response/player/pdata/base/name')\n self.assert_path(resp, 'response/player/pdata/base/exp')\n self.assert_path(resp, 'response/player/pdata/base/lv')\n self.assert_path(resp, 'response/player/pdata/base/mg')\n self.assert_path(resp, 'response/player/pdata/base/ap')\n self.assert_path(resp, 'response/player/pdata/base/tid')\n self.assert_path(resp, 'response/player/pdata/base/tname')\n self.assert_path(resp, 'response/player/pdata/base/cmnt')\n self.assert_path(resp, 'response/player/pdata/base/uattr')\n self.assert_path(resp, 'response/player/pdata/base/hidden_param')\n self.assert_path(resp, 'response/player/pdata/base/tbs')\n self.assert_path(resp, 'response/player/pdata/base/tbs_r')\n self.assert_path(resp, 'response/player/pdata/rival')\n self.assert_path(resp, 'response/player/pdata/fav_music_slot')\n self.assert_path(resp, 'response/player/pdata/custom')\n self.assert_path(resp, 'response/player/pdata/config')\n self.assert_path(resp, 'response/player/pdata/stamp')\n self.assert_path(resp, 'response/player/pdata/released')\n self.assert_path(resp, 'response/player/pdata/record')\n if resp.child_value('player/pdata/base/name') != self.NAME:\n raise Exception('Invalid name {} returned on profile read!'.\n format(resp.child_value('player/pdata/base/name')))\n scores = []\n for child in resp.child('player/pdata/record').children:\n if child.name != 'rec':\n continue\n score = {'id': child.child_value('mid'), 'chart': child.\n child_value('ntgrd'), 'clear_type': child.child_value('ct'),\n 'achievement_rate': child.child_value('ar'), 'score': child\n .child_value('scr'), 'combo': child.child_value('cmb'),\n 'miss_count': child.child_value('ms')}\n scores.append(score)\n return scores\n\n def verify_player_write(self, refid: str, loc: str, scores: List[Dict[\n str, int]]) ->int:\n call = self.call_node()\n player = Node.void('player')\n call.add_child(player)\n player.set_attribute('method', 'write')\n pdata = Node.void('pdata')\n player.add_child(pdata)\n account = Node.void('account')\n pdata.add_child(account)\n account.add_child(Node.s32('usrid', 0))\n account.add_child(Node.s32('plyid', 0))\n account.add_child(Node.s32('tpc', 1))\n account.add_child(Node.s32('dpc', 1))\n account.add_child(Node.s32('crd', 1))\n account.add_child(Node.s32('brd', 1))\n account.add_child(Node.s32('tdc', 1))\n account.add_child(Node.string('rid', refid))\n account.add_child(Node.string('lid', loc))\n account.add_child(Node.u8('mode', 0))\n account.add_child(Node.s16('ver', 5))\n account.add_child(Node.bool('pp', True))\n account.add_child(Node.bool('ps', True))\n account.add_child(Node.s16('pay', 0))\n account.add_child(Node.s16('pay_pc', 0))\n account.add_child(Node.u64('st', int(time.time() * 1000)))\n base = Node.void('base')\n pdata.add_child(base)\n base.add_child(Node.string('name', self.NAME))\n base.add_child(Node.s32('exp', 0))\n base.add_child(Node.s32('lv', 1))\n base.add_child(Node.s32('mg', -1))\n base.add_child(Node.s32('ap', -1))\n base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))\n base.add_child(Node.bool('is_tut', True))\n stglog = Node.void('stglog')\n pdata.add_child(stglog)\n index = 0\n for score in scores:\n log = Node.void('log')\n stglog.add_child(log)\n log.add_child(Node.s8('stg', index))\n log.add_child(Node.s16('mid', score['id']))\n log.add_child(Node.s8('ng', score['chart']))\n log.add_child(Node.s8('col', 0))\n log.add_child(Node.s8('mt', 7))\n log.add_child(Node.s8('rt', 0))\n log.add_child(Node.s8('ct', score['clear_type']))\n log.add_child(Node.s16('grd', 0))\n log.add_child(Node.s16('ar', score['achievement_rate']))\n log.add_child(Node.s16('sc', score['score']))\n log.add_child(Node.s16('jt_jst', 0))\n log.add_child(Node.s16('jt_grt', 0))\n log.add_child(Node.s16('jt_gd', 0))\n log.add_child(Node.s16('jt_ms', score['miss_count']))\n log.add_child(Node.s16('jt_jr', 0))\n log.add_child(Node.s16('cmb', score['combo']))\n log.add_child(Node.s16('exp', 0))\n log.add_child(Node.s32('r_uid', 0))\n log.add_child(Node.s32('r_plyid', 0))\n log.add_child(Node.s8('r_stg', 0))\n log.add_child(Node.s8('r_ct', -1))\n log.add_child(Node.s16('r_sc', 0))\n log.add_child(Node.s16('r_grd', 0))\n log.add_child(Node.s16('r_ar', 0))\n log.add_child(Node.s8('r_cpuid', -1))\n log.add_child(Node.s32('time', int(time.time())))\n log.add_child(Node.s8('decide', 0))\n index = index + 1\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/uid')\n return resp.child_value('player/uid')\n <mask token>\n\n def verify_lobby_entry(self, location: str, extid: int) ->int:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'entry')\n e = Node.void('e')\n lobby.add_child(e)\n e.add_child(Node.s32('eid', 0))\n e.add_child(Node.u16('mid', 79))\n e.add_child(Node.u8('ng', 0))\n e.add_child(Node.s32('uid', extid))\n e.add_child(Node.s32('uattr', 0))\n e.add_child(Node.string('pn', self.NAME))\n e.add_child(Node.s16('mg', 255))\n e.add_child(Node.s32('mopt', 0))\n e.add_child(Node.s32('tid', 0))\n e.add_child(Node.string('tn', ''))\n e.add_child(Node.s32('topt', 0))\n e.add_child(Node.string('lid', location))\n e.add_child(Node.string('sn', ''))\n e.add_child(Node.u8('pref', 51))\n e.add_child(Node.s8('stg', 4))\n e.add_child(Node.s8('pside', 0))\n e.add_child(Node.s16('eatime', 30))\n e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n e.add_child(Node.u16('gp', 10007))\n e.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n e.add_child(Node.u8('ver', 5))\n lobby.add_child(Node.s32_array('friend', []))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby/interval')\n self.assert_path(resp, 'response/lobby/interval_p')\n self.assert_path(resp, 'response/lobby/eid')\n self.assert_path(resp, 'response/lobby/e/eid')\n self.assert_path(resp, 'response/lobby/e/mid')\n self.assert_path(resp, 'response/lobby/e/ng')\n self.assert_path(resp, 'response/lobby/e/uid')\n self.assert_path(resp, 'response/lobby/e/uattr')\n self.assert_path(resp, 'response/lobby/e/pn')\n self.assert_path(resp, 'response/lobby/e/mg')\n self.assert_path(resp, 'response/lobby/e/mopt')\n self.assert_path(resp, 'response/lobby/e/tid')\n self.assert_path(resp, 'response/lobby/e/tn')\n self.assert_path(resp, 'response/lobby/e/topt')\n self.assert_path(resp, 'response/lobby/e/lid')\n self.assert_path(resp, 'response/lobby/e/sn')\n self.assert_path(resp, 'response/lobby/e/pref')\n self.assert_path(resp, 'response/lobby/e/stg')\n self.assert_path(resp, 'response/lobby/e/pside')\n self.assert_path(resp, 'response/lobby/e/eatime')\n self.assert_path(resp, 'response/lobby/e/ga')\n self.assert_path(resp, 'response/lobby/e/gp')\n self.assert_path(resp, 'response/lobby/e/la')\n self.assert_path(resp, 'response/lobby/e/ver')\n return resp.child_value('lobby/eid')\n\n def verify_lobby_delete(self, eid: int) ->None:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'delete')\n lobby.add_child(Node.s32('eid', eid))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby')\n\n def verify_pzlcmt_read(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_read')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.s32('time', 0))\n info.add_child(Node.s32('limit', 30))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/comment/time')\n self.assert_path(resp, 'response/info/c/uid')\n self.assert_path(resp, 'response/info/c/name')\n self.assert_path(resp, 'response/info/c/icon')\n self.assert_path(resp, 'response/info/c/bln')\n self.assert_path(resp, 'response/info/c/tid')\n self.assert_path(resp, 'response/info/c/t_name')\n self.assert_path(resp, 'response/info/c/pref')\n self.assert_path(resp, 'response/info/c/time')\n self.assert_path(resp, 'response/info/c/comment')\n self.assert_path(resp, 'response/info/c/is_tweet')\n found = False\n for child in resp.child('info').children:\n if child.name != 'c':\n continue\n if child.child_value('uid') == extid:\n name = child.child_value('name')\n comment = child.child_value('comment')\n if name != self.NAME:\n raise Exception(\"Invalid name '{}' returned for comment!\"\n .format(name))\n if comment != 'アメ〜〜!':\n raise Exception(\n \"Invalid comment '{}' returned for comment!\".format\n (comment))\n found = True\n if not found:\n raise Exception('Comment we posted was not found!')\n\n def verify_pzlcmt_write(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_write')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.string('name', self.NAME))\n info.add_child(Node.s16('icon', 0))\n info.add_child(Node.s8('bln', 0))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.string('t_name', ''))\n info.add_child(Node.s8('pref', 51))\n info.add_child(Node.s32('time', int(time.time())))\n info.add_child(Node.string('comment', 'アメ〜〜!'))\n info.add_child(Node.bool('is_tweet', True))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info')\n\n def verify_jbrbcollabo_save(self, refid: str) ->None:\n call = self.call_node()\n jbrbcollabo = Node.void('jbrbcollabo')\n jbrbcollabo.set_attribute('method', 'save')\n jbrbcollabo.add_child(Node.string('ref_id', refid))\n jbrbcollabo.add_child(Node.u16('cre_count', 0))\n call.add_child(jbrbcollabo)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/jbrbcollabo')\n\n def verify(self, cardid: Optional[str]) ->None:\n self.verify_services_get(expected_services=['pcbtracker',\n 'pcbevent', 'local', 'message', 'facility', 'cardmng',\n 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby',\n 'ntp', 'keepalive'])\n paseli_enabled = self.verify_pcbtracker_alive()\n self.verify_message_get()\n self.verify_package_list()\n location = self.verify_facility_get()\n self.verify_pcbevent_put()\n self.verify_pcb_boot(location)\n self.verify_info_common()\n if cardid is not None:\n card = cardid\n else:\n card = self.random_card()\n print('Generated random card ID {} for use.'.format(card))\n if cardid is None:\n self.verify_cardmng_inquire(card, msg_type='unregistered',\n paseli_enabled=paseli_enabled)\n ref_id = self.verify_cardmng_getrefid(card)\n if len(ref_id) != 16:\n raise Exception(\n \"Invalid refid '{}' returned when registering card\".\n format(ref_id))\n if ref_id != self.verify_cardmng_inquire(card, msg_type='new',\n paseli_enabled=paseli_enabled):\n raise Exception(\n \"Invalid refid '{}' returned when querying card\".format\n (ref_id))\n self.verify_player_start(ref_id)\n self.verify_player_delete(ref_id)\n self.verify_player_succeed(ref_id)\n extid = self.verify_player_write(ref_id, location, [{'id': 0,\n 'chart': 0, 'clear_type': -1, 'achievement_rate': 0,\n 'score': 0, 'combo': 0, 'miss_count': 0}])\n else:\n print('Skipping new card checks for existing card')\n ref_id = self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled)\n self.verify_cardmng_authpass(ref_id, correct=True)\n self.verify_cardmng_authpass(ref_id, correct=False)\n if ref_id != self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled):\n raise Exception(\"Invalid refid '{}' returned when querying card\"\n .format(ref_id))\n self.verify_lobby_read(location, extid)\n eid = self.verify_lobby_entry(location, extid)\n self.verify_lobby_delete(eid)\n self.verify_pzlcmt_write(extid)\n self.verify_pzlcmt_read(extid)\n self.verify_jbrbcollabo_save(ref_id)\n if cardid is None:\n for phase in [1, 2]:\n if phase == 1:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2,\n 'achievement_rate': 7543, 'score': 432, 'combo': \n 123, 'miss_count': 5}, {'id': 1, 'chart': 0,\n 'clear_type': 4, 'achievement_rate': 9876, 'score':\n 543, 'combo': 543, 'miss_count': 0}, {'id': 3,\n 'chart': 2, 'clear_type': 2, 'achievement_rate': \n 1234, 'score': 123, 'combo': 42, 'miss_count': 54},\n {'id': 3, 'chart': 0, 'clear_type': 2,\n 'achievement_rate': 1024, 'score': 50, 'combo': 12,\n 'miss_count': 90}]\n if phase == 2:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3,\n 'achievement_rate': 8765, 'score': 469, 'combo': \n 468, 'miss_count': 1}, {'id': 1, 'chart': 0,\n 'clear_type': 2, 'achievement_rate': 8765, 'score':\n 432, 'combo': 321, 'miss_count': 15,\n 'expected_score': 543, 'expected_clear_type': 4,\n 'expected_achievement_rate': 9876, 'expected_combo':\n 543, 'expected_miss_count': 0}]\n self.verify_player_write(ref_id, location, dummyscores)\n scores = self.verify_player_read(ref_id, location)\n for expected in dummyscores:\n actual = None\n for received in scores:\n if received['id'] == expected['id'] and received[\n 'chart'] == expected['chart']:\n actual = received\n break\n if actual is None:\n raise Exception(\n \"Didn't find song {} chart {} in response!\".\n format(expected['id'], expected['chart']))\n if 'expected_score' in expected:\n expected_score = expected['expected_score']\n else:\n expected_score = expected['score']\n if 'expected_achievement_rate' in expected:\n expected_achievement_rate = expected[\n 'expected_achievement_rate']\n else:\n expected_achievement_rate = expected['achievement_rate'\n ]\n if 'expected_clear_type' in expected:\n expected_clear_type = expected['expected_clear_type']\n else:\n expected_clear_type = expected['clear_type']\n if 'expected_combo' in expected:\n expected_combo = expected['expected_combo']\n else:\n expected_combo = expected['combo']\n if 'expected_miss_count' in expected:\n expected_miss_count = expected['expected_miss_count']\n else:\n expected_miss_count = expected['miss_count']\n if actual['score'] != expected_score:\n raise Exception(\n \"Expected a score of '{}' for song '{}' chart '{}' but got score '{}'\"\n .format(expected_score, expected['id'],\n expected['chart'], actual['score']))\n if actual['achievement_rate'] != expected_achievement_rate:\n raise Exception(\n \"Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'\"\n .format(expected_achievement_rate, expected[\n 'id'], expected['chart'], actual[\n 'achievement_rate']))\n if actual['clear_type'] != expected_clear_type:\n raise Exception(\n \"Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'\"\n .format(expected_clear_type, expected['id'],\n expected['chart'], actual['clear_type']))\n if actual['combo'] != expected_combo:\n raise Exception(\n \"Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'\"\n .format(expected_combo, expected['id'],\n expected['chart'], actual['combo']))\n if actual['miss_count'] != expected_miss_count:\n raise Exception(\n \"Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'\"\n .format(expected_miss_count, expected['id'],\n expected['chart'], actual['miss_count']))\n time.sleep(1)\n else:\n print('Skipping score checks for existing card')\n self.verify_player_end(ref_id)\n self.verify_info_ranking()\n if paseli_enabled:\n print('PASELI enabled for this PCBID, executing PASELI checks')\n else:\n print('PASELI disabled for this PCBID, skipping PASELI checks')\n return\n sessid, balance = self.verify_eacoin_checkin(card)\n if balance == 0:\n print('Skipping PASELI consume check because card has 0 balance')\n else:\n self.verify_eacoin_consume(sessid, balance, random.randint(0,\n balance))\n self.verify_eacoin_checkout(sessid)\n", "step-4": "<mask token>\n\n\nclass ReflecBeatColette(BaseClient):\n NAME = 'TEST'\n\n def verify_pcb_boot(self, loc: str) ->None:\n call = self.call_node()\n pcb = Node.void('pcb')\n pcb.set_attribute('method', 'boot')\n pcb.add_child(Node.string('lid', loc))\n call.add_child(pcb)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/pcb/sinfo/nm')\n self.assert_path(resp, 'response/pcb/sinfo/cl_enbl')\n self.assert_path(resp, 'response/pcb/sinfo/cl_h')\n self.assert_path(resp, 'response/pcb/sinfo/cl_m')\n\n def verify_info_common(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'common')\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/event_ctrl')\n self.assert_path(resp, 'response/info/item_lock_ctrl')\n\n def verify_info_ranking(self) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'ranking')\n info.add_child(Node.s32('ver', 0))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/ver')\n self.assert_path(resp, 'response/info/ranking/weekly/bt')\n self.assert_path(resp, 'response/info/ranking/weekly/et')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/weekly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/monthly/bt')\n self.assert_path(resp, 'response/info/ranking/monthly/et')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/monthly/new/d/cnt')\n self.assert_path(resp, 'response/info/ranking/total/bt')\n self.assert_path(resp, 'response/info/ranking/total/et')\n self.assert_path(resp, 'response/info/ranking/total/new/d/mid')\n self.assert_path(resp, 'response/info/ranking/total/new/d/cnt')\n\n def verify_player_start(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'start')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n player.add_child(Node.u16('gp', 10573))\n player.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/plyid')\n self.assert_path(resp, 'response/player/start_time')\n self.assert_path(resp, 'response/player/event_ctrl')\n self.assert_path(resp, 'response/player/item_lock_ctrl')\n self.assert_path(resp, 'response/player/lincle_link_4')\n self.assert_path(resp, 'response/player/jbrbcollabo')\n self.assert_path(resp, 'response/player/tricolettepark')\n\n def verify_player_delete(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'delete')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_end(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'end')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player')\n\n def verify_player_succeed(self, refid: str) ->None:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'succeed')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/name')\n self.assert_path(resp, 'response/player/lv')\n self.assert_path(resp, 'response/player/exp')\n self.assert_path(resp, 'response/player/grd')\n self.assert_path(resp, 'response/player/ap')\n self.assert_path(resp, 'response/player/released')\n self.assert_path(resp, 'response/player/mrecord')\n\n def verify_player_read(self, refid: str, location: str) ->List[Dict[str,\n int]]:\n call = self.call_node()\n player = Node.void('player')\n player.set_attribute('method', 'read')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.string('lid', location))\n player.add_child(Node.s16('ver', 5))\n call.add_child(player)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/pdata/account/usrid')\n self.assert_path(resp, 'response/player/pdata/account/tpc')\n self.assert_path(resp, 'response/player/pdata/account/dpc')\n self.assert_path(resp, 'response/player/pdata/account/crd')\n self.assert_path(resp, 'response/player/pdata/account/brd')\n self.assert_path(resp, 'response/player/pdata/account/tdc')\n self.assert_path(resp, 'response/player/pdata/account/intrvld')\n self.assert_path(resp, 'response/player/pdata/account/ver')\n self.assert_path(resp, 'response/player/pdata/account/pst')\n self.assert_path(resp, 'response/player/pdata/account/st')\n self.assert_path(resp, 'response/player/pdata/base/name')\n self.assert_path(resp, 'response/player/pdata/base/exp')\n self.assert_path(resp, 'response/player/pdata/base/lv')\n self.assert_path(resp, 'response/player/pdata/base/mg')\n self.assert_path(resp, 'response/player/pdata/base/ap')\n self.assert_path(resp, 'response/player/pdata/base/tid')\n self.assert_path(resp, 'response/player/pdata/base/tname')\n self.assert_path(resp, 'response/player/pdata/base/cmnt')\n self.assert_path(resp, 'response/player/pdata/base/uattr')\n self.assert_path(resp, 'response/player/pdata/base/hidden_param')\n self.assert_path(resp, 'response/player/pdata/base/tbs')\n self.assert_path(resp, 'response/player/pdata/base/tbs_r')\n self.assert_path(resp, 'response/player/pdata/rival')\n self.assert_path(resp, 'response/player/pdata/fav_music_slot')\n self.assert_path(resp, 'response/player/pdata/custom')\n self.assert_path(resp, 'response/player/pdata/config')\n self.assert_path(resp, 'response/player/pdata/stamp')\n self.assert_path(resp, 'response/player/pdata/released')\n self.assert_path(resp, 'response/player/pdata/record')\n if resp.child_value('player/pdata/base/name') != self.NAME:\n raise Exception('Invalid name {} returned on profile read!'.\n format(resp.child_value('player/pdata/base/name')))\n scores = []\n for child in resp.child('player/pdata/record').children:\n if child.name != 'rec':\n continue\n score = {'id': child.child_value('mid'), 'chart': child.\n child_value('ntgrd'), 'clear_type': child.child_value('ct'),\n 'achievement_rate': child.child_value('ar'), 'score': child\n .child_value('scr'), 'combo': child.child_value('cmb'),\n 'miss_count': child.child_value('ms')}\n scores.append(score)\n return scores\n\n def verify_player_write(self, refid: str, loc: str, scores: List[Dict[\n str, int]]) ->int:\n call = self.call_node()\n player = Node.void('player')\n call.add_child(player)\n player.set_attribute('method', 'write')\n pdata = Node.void('pdata')\n player.add_child(pdata)\n account = Node.void('account')\n pdata.add_child(account)\n account.add_child(Node.s32('usrid', 0))\n account.add_child(Node.s32('plyid', 0))\n account.add_child(Node.s32('tpc', 1))\n account.add_child(Node.s32('dpc', 1))\n account.add_child(Node.s32('crd', 1))\n account.add_child(Node.s32('brd', 1))\n account.add_child(Node.s32('tdc', 1))\n account.add_child(Node.string('rid', refid))\n account.add_child(Node.string('lid', loc))\n account.add_child(Node.u8('mode', 0))\n account.add_child(Node.s16('ver', 5))\n account.add_child(Node.bool('pp', True))\n account.add_child(Node.bool('ps', True))\n account.add_child(Node.s16('pay', 0))\n account.add_child(Node.s16('pay_pc', 0))\n account.add_child(Node.u64('st', int(time.time() * 1000)))\n base = Node.void('base')\n pdata.add_child(base)\n base.add_child(Node.string('name', self.NAME))\n base.add_child(Node.s32('exp', 0))\n base.add_child(Node.s32('lv', 1))\n base.add_child(Node.s32('mg', -1))\n base.add_child(Node.s32('ap', -1))\n base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))\n base.add_child(Node.bool('is_tut', True))\n stglog = Node.void('stglog')\n pdata.add_child(stglog)\n index = 0\n for score in scores:\n log = Node.void('log')\n stglog.add_child(log)\n log.add_child(Node.s8('stg', index))\n log.add_child(Node.s16('mid', score['id']))\n log.add_child(Node.s8('ng', score['chart']))\n log.add_child(Node.s8('col', 0))\n log.add_child(Node.s8('mt', 7))\n log.add_child(Node.s8('rt', 0))\n log.add_child(Node.s8('ct', score['clear_type']))\n log.add_child(Node.s16('grd', 0))\n log.add_child(Node.s16('ar', score['achievement_rate']))\n log.add_child(Node.s16('sc', score['score']))\n log.add_child(Node.s16('jt_jst', 0))\n log.add_child(Node.s16('jt_grt', 0))\n log.add_child(Node.s16('jt_gd', 0))\n log.add_child(Node.s16('jt_ms', score['miss_count']))\n log.add_child(Node.s16('jt_jr', 0))\n log.add_child(Node.s16('cmb', score['combo']))\n log.add_child(Node.s16('exp', 0))\n log.add_child(Node.s32('r_uid', 0))\n log.add_child(Node.s32('r_plyid', 0))\n log.add_child(Node.s8('r_stg', 0))\n log.add_child(Node.s8('r_ct', -1))\n log.add_child(Node.s16('r_sc', 0))\n log.add_child(Node.s16('r_grd', 0))\n log.add_child(Node.s16('r_ar', 0))\n log.add_child(Node.s8('r_cpuid', -1))\n log.add_child(Node.s32('time', int(time.time())))\n log.add_child(Node.s8('decide', 0))\n index = index + 1\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/player/uid')\n return resp.child_value('player/uid')\n\n def verify_lobby_read(self, location: str, extid: int) ->None:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'read')\n lobby.add_child(Node.s32('uid', extid))\n lobby.add_child(Node.u8('m_grade', 255))\n lobby.add_child(Node.string('lid', location))\n lobby.add_child(Node.s32('max', 128))\n lobby.add_child(Node.s32_array('friend', []))\n lobby.add_child(Node.u8('var', 5))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby/interval')\n self.assert_path(resp, 'response/lobby/interval_p')\n\n def verify_lobby_entry(self, location: str, extid: int) ->int:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'entry')\n e = Node.void('e')\n lobby.add_child(e)\n e.add_child(Node.s32('eid', 0))\n e.add_child(Node.u16('mid', 79))\n e.add_child(Node.u8('ng', 0))\n e.add_child(Node.s32('uid', extid))\n e.add_child(Node.s32('uattr', 0))\n e.add_child(Node.string('pn', self.NAME))\n e.add_child(Node.s16('mg', 255))\n e.add_child(Node.s32('mopt', 0))\n e.add_child(Node.s32('tid', 0))\n e.add_child(Node.string('tn', ''))\n e.add_child(Node.s32('topt', 0))\n e.add_child(Node.string('lid', location))\n e.add_child(Node.string('sn', ''))\n e.add_child(Node.u8('pref', 51))\n e.add_child(Node.s8('stg', 4))\n e.add_child(Node.s8('pside', 0))\n e.add_child(Node.s16('eatime', 30))\n e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n e.add_child(Node.u16('gp', 10007))\n e.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n e.add_child(Node.u8('ver', 5))\n lobby.add_child(Node.s32_array('friend', []))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby/interval')\n self.assert_path(resp, 'response/lobby/interval_p')\n self.assert_path(resp, 'response/lobby/eid')\n self.assert_path(resp, 'response/lobby/e/eid')\n self.assert_path(resp, 'response/lobby/e/mid')\n self.assert_path(resp, 'response/lobby/e/ng')\n self.assert_path(resp, 'response/lobby/e/uid')\n self.assert_path(resp, 'response/lobby/e/uattr')\n self.assert_path(resp, 'response/lobby/e/pn')\n self.assert_path(resp, 'response/lobby/e/mg')\n self.assert_path(resp, 'response/lobby/e/mopt')\n self.assert_path(resp, 'response/lobby/e/tid')\n self.assert_path(resp, 'response/lobby/e/tn')\n self.assert_path(resp, 'response/lobby/e/topt')\n self.assert_path(resp, 'response/lobby/e/lid')\n self.assert_path(resp, 'response/lobby/e/sn')\n self.assert_path(resp, 'response/lobby/e/pref')\n self.assert_path(resp, 'response/lobby/e/stg')\n self.assert_path(resp, 'response/lobby/e/pside')\n self.assert_path(resp, 'response/lobby/e/eatime')\n self.assert_path(resp, 'response/lobby/e/ga')\n self.assert_path(resp, 'response/lobby/e/gp')\n self.assert_path(resp, 'response/lobby/e/la')\n self.assert_path(resp, 'response/lobby/e/ver')\n return resp.child_value('lobby/eid')\n\n def verify_lobby_delete(self, eid: int) ->None:\n call = self.call_node()\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'delete')\n lobby.add_child(Node.s32('eid', eid))\n call.add_child(lobby)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/lobby')\n\n def verify_pzlcmt_read(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_read')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.s32('time', 0))\n info.add_child(Node.s32('limit', 30))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info/comment/time')\n self.assert_path(resp, 'response/info/c/uid')\n self.assert_path(resp, 'response/info/c/name')\n self.assert_path(resp, 'response/info/c/icon')\n self.assert_path(resp, 'response/info/c/bln')\n self.assert_path(resp, 'response/info/c/tid')\n self.assert_path(resp, 'response/info/c/t_name')\n self.assert_path(resp, 'response/info/c/pref')\n self.assert_path(resp, 'response/info/c/time')\n self.assert_path(resp, 'response/info/c/comment')\n self.assert_path(resp, 'response/info/c/is_tweet')\n found = False\n for child in resp.child('info').children:\n if child.name != 'c':\n continue\n if child.child_value('uid') == extid:\n name = child.child_value('name')\n comment = child.child_value('comment')\n if name != self.NAME:\n raise Exception(\"Invalid name '{}' returned for comment!\"\n .format(name))\n if comment != 'アメ〜〜!':\n raise Exception(\n \"Invalid comment '{}' returned for comment!\".format\n (comment))\n found = True\n if not found:\n raise Exception('Comment we posted was not found!')\n\n def verify_pzlcmt_write(self, extid: int) ->None:\n call = self.call_node()\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_write')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.string('name', self.NAME))\n info.add_child(Node.s16('icon', 0))\n info.add_child(Node.s8('bln', 0))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.string('t_name', ''))\n info.add_child(Node.s8('pref', 51))\n info.add_child(Node.s32('time', int(time.time())))\n info.add_child(Node.string('comment', 'アメ〜〜!'))\n info.add_child(Node.bool('is_tweet', True))\n call.add_child(info)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/info')\n\n def verify_jbrbcollabo_save(self, refid: str) ->None:\n call = self.call_node()\n jbrbcollabo = Node.void('jbrbcollabo')\n jbrbcollabo.set_attribute('method', 'save')\n jbrbcollabo.add_child(Node.string('ref_id', refid))\n jbrbcollabo.add_child(Node.u16('cre_count', 0))\n call.add_child(jbrbcollabo)\n resp = self.exchange('', call)\n self.assert_path(resp, 'response/jbrbcollabo')\n\n def verify(self, cardid: Optional[str]) ->None:\n self.verify_services_get(expected_services=['pcbtracker',\n 'pcbevent', 'local', 'message', 'facility', 'cardmng',\n 'package', 'posevent', 'pkglist', 'dlstatus', 'eacoin', 'lobby',\n 'ntp', 'keepalive'])\n paseli_enabled = self.verify_pcbtracker_alive()\n self.verify_message_get()\n self.verify_package_list()\n location = self.verify_facility_get()\n self.verify_pcbevent_put()\n self.verify_pcb_boot(location)\n self.verify_info_common()\n if cardid is not None:\n card = cardid\n else:\n card = self.random_card()\n print('Generated random card ID {} for use.'.format(card))\n if cardid is None:\n self.verify_cardmng_inquire(card, msg_type='unregistered',\n paseli_enabled=paseli_enabled)\n ref_id = self.verify_cardmng_getrefid(card)\n if len(ref_id) != 16:\n raise Exception(\n \"Invalid refid '{}' returned when registering card\".\n format(ref_id))\n if ref_id != self.verify_cardmng_inquire(card, msg_type='new',\n paseli_enabled=paseli_enabled):\n raise Exception(\n \"Invalid refid '{}' returned when querying card\".format\n (ref_id))\n self.verify_player_start(ref_id)\n self.verify_player_delete(ref_id)\n self.verify_player_succeed(ref_id)\n extid = self.verify_player_write(ref_id, location, [{'id': 0,\n 'chart': 0, 'clear_type': -1, 'achievement_rate': 0,\n 'score': 0, 'combo': 0, 'miss_count': 0}])\n else:\n print('Skipping new card checks for existing card')\n ref_id = self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled)\n self.verify_cardmng_authpass(ref_id, correct=True)\n self.verify_cardmng_authpass(ref_id, correct=False)\n if ref_id != self.verify_cardmng_inquire(card, msg_type='query',\n paseli_enabled=paseli_enabled):\n raise Exception(\"Invalid refid '{}' returned when querying card\"\n .format(ref_id))\n self.verify_lobby_read(location, extid)\n eid = self.verify_lobby_entry(location, extid)\n self.verify_lobby_delete(eid)\n self.verify_pzlcmt_write(extid)\n self.verify_pzlcmt_read(extid)\n self.verify_jbrbcollabo_save(ref_id)\n if cardid is None:\n for phase in [1, 2]:\n if phase == 1:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 2,\n 'achievement_rate': 7543, 'score': 432, 'combo': \n 123, 'miss_count': 5}, {'id': 1, 'chart': 0,\n 'clear_type': 4, 'achievement_rate': 9876, 'score':\n 543, 'combo': 543, 'miss_count': 0}, {'id': 3,\n 'chart': 2, 'clear_type': 2, 'achievement_rate': \n 1234, 'score': 123, 'combo': 42, 'miss_count': 54},\n {'id': 3, 'chart': 0, 'clear_type': 2,\n 'achievement_rate': 1024, 'score': 50, 'combo': 12,\n 'miss_count': 90}]\n if phase == 2:\n dummyscores = [{'id': 1, 'chart': 1, 'clear_type': 3,\n 'achievement_rate': 8765, 'score': 469, 'combo': \n 468, 'miss_count': 1}, {'id': 1, 'chart': 0,\n 'clear_type': 2, 'achievement_rate': 8765, 'score':\n 432, 'combo': 321, 'miss_count': 15,\n 'expected_score': 543, 'expected_clear_type': 4,\n 'expected_achievement_rate': 9876, 'expected_combo':\n 543, 'expected_miss_count': 0}]\n self.verify_player_write(ref_id, location, dummyscores)\n scores = self.verify_player_read(ref_id, location)\n for expected in dummyscores:\n actual = None\n for received in scores:\n if received['id'] == expected['id'] and received[\n 'chart'] == expected['chart']:\n actual = received\n break\n if actual is None:\n raise Exception(\n \"Didn't find song {} chart {} in response!\".\n format(expected['id'], expected['chart']))\n if 'expected_score' in expected:\n expected_score = expected['expected_score']\n else:\n expected_score = expected['score']\n if 'expected_achievement_rate' in expected:\n expected_achievement_rate = expected[\n 'expected_achievement_rate']\n else:\n expected_achievement_rate = expected['achievement_rate'\n ]\n if 'expected_clear_type' in expected:\n expected_clear_type = expected['expected_clear_type']\n else:\n expected_clear_type = expected['clear_type']\n if 'expected_combo' in expected:\n expected_combo = expected['expected_combo']\n else:\n expected_combo = expected['combo']\n if 'expected_miss_count' in expected:\n expected_miss_count = expected['expected_miss_count']\n else:\n expected_miss_count = expected['miss_count']\n if actual['score'] != expected_score:\n raise Exception(\n \"Expected a score of '{}' for song '{}' chart '{}' but got score '{}'\"\n .format(expected_score, expected['id'],\n expected['chart'], actual['score']))\n if actual['achievement_rate'] != expected_achievement_rate:\n raise Exception(\n \"Expected an achievement rate of '{}' for song '{}' chart '{}' but got achievement rate '{}'\"\n .format(expected_achievement_rate, expected[\n 'id'], expected['chart'], actual[\n 'achievement_rate']))\n if actual['clear_type'] != expected_clear_type:\n raise Exception(\n \"Expected a clear_type of '{}' for song '{}' chart '{}' but got clear_type '{}'\"\n .format(expected_clear_type, expected['id'],\n expected['chart'], actual['clear_type']))\n if actual['combo'] != expected_combo:\n raise Exception(\n \"Expected a combo of '{}' for song '{}' chart '{}' but got combo '{}'\"\n .format(expected_combo, expected['id'],\n expected['chart'], actual['combo']))\n if actual['miss_count'] != expected_miss_count:\n raise Exception(\n \"Expected a miss count of '{}' for song '{}' chart '{}' but got miss count '{}'\"\n .format(expected_miss_count, expected['id'],\n expected['chart'], actual['miss_count']))\n time.sleep(1)\n else:\n print('Skipping score checks for existing card')\n self.verify_player_end(ref_id)\n self.verify_info_ranking()\n if paseli_enabled:\n print('PASELI enabled for this PCBID, executing PASELI checks')\n else:\n print('PASELI disabled for this PCBID, skipping PASELI checks')\n return\n sessid, balance = self.verify_eacoin_checkin(card)\n if balance == 0:\n print('Skipping PASELI consume check because card has 0 balance')\n else:\n self.verify_eacoin_consume(sessid, balance, random.randint(0,\n balance))\n self.verify_eacoin_checkout(sessid)\n", "step-5": "import random\nimport time\nfrom typing import Dict, List, Optional\n\nfrom bemani.client.base import BaseClient\nfrom bemani.protocol import Node\n\n\nclass ReflecBeatColette(BaseClient):\n NAME = 'TEST'\n\n def verify_pcb_boot(self, loc: str) -> None:\n call = self.call_node()\n\n pcb = Node.void('pcb')\n pcb.set_attribute('method', 'boot')\n pcb.add_child(Node.string('lid', loc))\n call.add_child(pcb)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/pcb/sinfo/nm\")\n self.assert_path(resp, \"response/pcb/sinfo/cl_enbl\")\n self.assert_path(resp, \"response/pcb/sinfo/cl_h\")\n self.assert_path(resp, \"response/pcb/sinfo/cl_m\")\n\n def verify_info_common(self) -> None:\n call = self.call_node()\n\n info = Node.void('info')\n info.set_attribute('method', 'common')\n call.add_child(info)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/info/event_ctrl\")\n self.assert_path(resp, \"response/info/item_lock_ctrl\")\n\n def verify_info_ranking(self) -> None:\n call = self.call_node()\n\n info = Node.void('info')\n info.set_attribute('method', 'ranking')\n info.add_child(Node.s32('ver', 0))\n call.add_child(info)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/info/ver\")\n self.assert_path(resp, \"response/info/ranking/weekly/bt\")\n self.assert_path(resp, \"response/info/ranking/weekly/et\")\n self.assert_path(resp, \"response/info/ranking/weekly/new/d/mid\")\n self.assert_path(resp, \"response/info/ranking/weekly/new/d/cnt\")\n self.assert_path(resp, \"response/info/ranking/monthly/bt\")\n self.assert_path(resp, \"response/info/ranking/monthly/et\")\n self.assert_path(resp, \"response/info/ranking/monthly/new/d/mid\")\n self.assert_path(resp, \"response/info/ranking/monthly/new/d/cnt\")\n self.assert_path(resp, \"response/info/ranking/total/bt\")\n self.assert_path(resp, \"response/info/ranking/total/et\")\n self.assert_path(resp, \"response/info/ranking/total/new/d/mid\")\n self.assert_path(resp, \"response/info/ranking/total/new/d/cnt\")\n\n def verify_player_start(self, refid: str) -> None:\n call = self.call_node()\n\n player = Node.void('player')\n player.set_attribute('method', 'start')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n player.add_child(Node.u16('gp', 10573))\n player.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n call.add_child(player)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player/plyid\")\n self.assert_path(resp, \"response/player/start_time\")\n self.assert_path(resp, \"response/player/event_ctrl\")\n self.assert_path(resp, \"response/player/item_lock_ctrl\")\n self.assert_path(resp, \"response/player/lincle_link_4\")\n self.assert_path(resp, \"response/player/jbrbcollabo\")\n self.assert_path(resp, \"response/player/tricolettepark\")\n\n def verify_player_delete(self, refid: str) -> None:\n call = self.call_node()\n\n player = Node.void('player')\n player.set_attribute('method', 'delete')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player\")\n\n def verify_player_end(self, refid: str) -> None:\n call = self.call_node()\n\n player = Node.void('player')\n player.set_attribute('method', 'end')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player\")\n\n def verify_player_succeed(self, refid: str) -> None:\n call = self.call_node()\n\n player = Node.void('player')\n player.set_attribute('method', 'succeed')\n player.add_child(Node.string('rid', refid))\n call.add_child(player)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player/name\")\n self.assert_path(resp, \"response/player/lv\")\n self.assert_path(resp, \"response/player/exp\")\n self.assert_path(resp, \"response/player/grd\")\n self.assert_path(resp, \"response/player/ap\")\n self.assert_path(resp, \"response/player/released\")\n self.assert_path(resp, \"response/player/mrecord\")\n\n def verify_player_read(self, refid: str, location: str) -> List[Dict[str, int]]:\n call = self.call_node()\n\n player = Node.void('player')\n player.set_attribute('method', 'read')\n player.add_child(Node.string('rid', refid))\n player.add_child(Node.string('lid', location))\n player.add_child(Node.s16('ver', 5))\n call.add_child(player)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player/pdata/account/usrid\")\n self.assert_path(resp, \"response/player/pdata/account/tpc\")\n self.assert_path(resp, \"response/player/pdata/account/dpc\")\n self.assert_path(resp, \"response/player/pdata/account/crd\")\n self.assert_path(resp, \"response/player/pdata/account/brd\")\n self.assert_path(resp, \"response/player/pdata/account/tdc\")\n self.assert_path(resp, \"response/player/pdata/account/intrvld\")\n self.assert_path(resp, \"response/player/pdata/account/ver\")\n self.assert_path(resp, \"response/player/pdata/account/pst\")\n self.assert_path(resp, \"response/player/pdata/account/st\")\n self.assert_path(resp, \"response/player/pdata/base/name\")\n self.assert_path(resp, \"response/player/pdata/base/exp\")\n self.assert_path(resp, \"response/player/pdata/base/lv\")\n self.assert_path(resp, \"response/player/pdata/base/mg\")\n self.assert_path(resp, \"response/player/pdata/base/ap\")\n self.assert_path(resp, \"response/player/pdata/base/tid\")\n self.assert_path(resp, \"response/player/pdata/base/tname\")\n self.assert_path(resp, \"response/player/pdata/base/cmnt\")\n self.assert_path(resp, \"response/player/pdata/base/uattr\")\n self.assert_path(resp, \"response/player/pdata/base/hidden_param\")\n self.assert_path(resp, \"response/player/pdata/base/tbs\")\n self.assert_path(resp, \"response/player/pdata/base/tbs_r\")\n self.assert_path(resp, \"response/player/pdata/rival\")\n self.assert_path(resp, \"response/player/pdata/fav_music_slot\")\n self.assert_path(resp, \"response/player/pdata/custom\")\n self.assert_path(resp, \"response/player/pdata/config\")\n self.assert_path(resp, \"response/player/pdata/stamp\")\n self.assert_path(resp, \"response/player/pdata/released\")\n self.assert_path(resp, \"response/player/pdata/record\")\n\n if resp.child_value('player/pdata/base/name') != self.NAME:\n raise Exception('Invalid name {} returned on profile read!'.format(resp.child_value('player/pdata/base/name')))\n\n scores = []\n for child in resp.child('player/pdata/record').children:\n if child.name != 'rec':\n continue\n\n score = {\n 'id': child.child_value('mid'),\n 'chart': child.child_value('ntgrd'),\n 'clear_type': child.child_value('ct'),\n 'achievement_rate': child.child_value('ar'),\n 'score': child.child_value('scr'),\n 'combo': child.child_value('cmb'),\n 'miss_count': child.child_value('ms'),\n }\n scores.append(score)\n return scores\n\n def verify_player_write(self, refid: str, loc: str, scores: List[Dict[str, int]]) -> int:\n call = self.call_node()\n\n player = Node.void('player')\n call.add_child(player)\n player.set_attribute('method', 'write')\n pdata = Node.void('pdata')\n player.add_child(pdata)\n account = Node.void('account')\n pdata.add_child(account)\n account.add_child(Node.s32('usrid', 0))\n account.add_child(Node.s32('plyid', 0))\n account.add_child(Node.s32('tpc', 1))\n account.add_child(Node.s32('dpc', 1))\n account.add_child(Node.s32('crd', 1))\n account.add_child(Node.s32('brd', 1))\n account.add_child(Node.s32('tdc', 1))\n account.add_child(Node.string('rid', refid))\n account.add_child(Node.string('lid', loc))\n account.add_child(Node.u8('mode', 0))\n account.add_child(Node.s16('ver', 5))\n account.add_child(Node.bool('pp', True))\n account.add_child(Node.bool('ps', True))\n account.add_child(Node.s16('pay', 0))\n account.add_child(Node.s16('pay_pc', 0))\n account.add_child(Node.u64('st', int(time.time() * 1000)))\n base = Node.void('base')\n pdata.add_child(base)\n base.add_child(Node.string('name', self.NAME))\n base.add_child(Node.s32('exp', 0))\n base.add_child(Node.s32('lv', 1))\n base.add_child(Node.s32('mg', -1))\n base.add_child(Node.s32('ap', -1))\n base.add_child(Node.s32_array('hidden_param', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]))\n base.add_child(Node.bool('is_tut', True))\n stglog = Node.void('stglog')\n pdata.add_child(stglog)\n index = 0\n for score in scores:\n log = Node.void('log')\n stglog.add_child(log)\n log.add_child(Node.s8('stg', index))\n log.add_child(Node.s16('mid', score['id']))\n log.add_child(Node.s8('ng', score['chart']))\n log.add_child(Node.s8('col', 0))\n log.add_child(Node.s8('mt', 7))\n log.add_child(Node.s8('rt', 0))\n log.add_child(Node.s8('ct', score['clear_type']))\n log.add_child(Node.s16('grd', 0))\n log.add_child(Node.s16('ar', score['achievement_rate']))\n log.add_child(Node.s16('sc', score['score']))\n log.add_child(Node.s16('jt_jst', 0))\n log.add_child(Node.s16('jt_grt', 0))\n log.add_child(Node.s16('jt_gd', 0))\n log.add_child(Node.s16('jt_ms', score['miss_count']))\n log.add_child(Node.s16('jt_jr', 0))\n log.add_child(Node.s16('cmb', score['combo']))\n log.add_child(Node.s16('exp', 0))\n log.add_child(Node.s32('r_uid', 0))\n log.add_child(Node.s32('r_plyid', 0))\n log.add_child(Node.s8('r_stg', 0))\n log.add_child(Node.s8('r_ct', -1))\n log.add_child(Node.s16('r_sc', 0))\n log.add_child(Node.s16('r_grd', 0))\n log.add_child(Node.s16('r_ar', 0))\n log.add_child(Node.s8('r_cpuid', -1))\n log.add_child(Node.s32('time', int(time.time())))\n log.add_child(Node.s8('decide', 0))\n index = index + 1\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/player/uid\")\n return resp.child_value('player/uid')\n\n def verify_lobby_read(self, location: str, extid: int) -> None:\n call = self.call_node()\n\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'read')\n lobby.add_child(Node.s32('uid', extid))\n lobby.add_child(Node.u8('m_grade', 255))\n lobby.add_child(Node.string('lid', location))\n lobby.add_child(Node.s32('max', 128))\n lobby.add_child(Node.s32_array('friend', []))\n lobby.add_child(Node.u8('var', 5))\n call.add_child(lobby)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/lobby/interval\")\n self.assert_path(resp, \"response/lobby/interval_p\")\n\n def verify_lobby_entry(self, location: str, extid: int) -> int:\n call = self.call_node()\n\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'entry')\n e = Node.void('e')\n lobby.add_child(e)\n e.add_child(Node.s32('eid', 0))\n e.add_child(Node.u16('mid', 79))\n e.add_child(Node.u8('ng', 0))\n e.add_child(Node.s32('uid', extid))\n e.add_child(Node.s32('uattr', 0))\n e.add_child(Node.string('pn', self.NAME))\n e.add_child(Node.s16('mg', 255))\n e.add_child(Node.s32('mopt', 0))\n e.add_child(Node.s32('tid', 0))\n e.add_child(Node.string('tn', ''))\n e.add_child(Node.s32('topt', 0))\n e.add_child(Node.string('lid', location))\n e.add_child(Node.string('sn', ''))\n e.add_child(Node.u8('pref', 51))\n e.add_child(Node.s8('stg', 4))\n e.add_child(Node.s8('pside', 0))\n e.add_child(Node.s16('eatime', 30))\n e.add_child(Node.u8_array('ga', [127, 0, 0, 1]))\n e.add_child(Node.u16('gp', 10007))\n e.add_child(Node.u8_array('la', [16, 0, 0, 0]))\n e.add_child(Node.u8('ver', 5))\n lobby.add_child(Node.s32_array('friend', []))\n call.add_child(lobby)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/lobby/interval\")\n self.assert_path(resp, \"response/lobby/interval_p\")\n self.assert_path(resp, \"response/lobby/eid\")\n self.assert_path(resp, \"response/lobby/e/eid\")\n self.assert_path(resp, \"response/lobby/e/mid\")\n self.assert_path(resp, \"response/lobby/e/ng\")\n self.assert_path(resp, \"response/lobby/e/uid\")\n self.assert_path(resp, \"response/lobby/e/uattr\")\n self.assert_path(resp, \"response/lobby/e/pn\")\n self.assert_path(resp, \"response/lobby/e/mg\")\n self.assert_path(resp, \"response/lobby/e/mopt\")\n self.assert_path(resp, \"response/lobby/e/tid\")\n self.assert_path(resp, \"response/lobby/e/tn\")\n self.assert_path(resp, \"response/lobby/e/topt\")\n self.assert_path(resp, \"response/lobby/e/lid\")\n self.assert_path(resp, \"response/lobby/e/sn\")\n self.assert_path(resp, \"response/lobby/e/pref\")\n self.assert_path(resp, \"response/lobby/e/stg\")\n self.assert_path(resp, \"response/lobby/e/pside\")\n self.assert_path(resp, \"response/lobby/e/eatime\")\n self.assert_path(resp, \"response/lobby/e/ga\")\n self.assert_path(resp, \"response/lobby/e/gp\")\n self.assert_path(resp, \"response/lobby/e/la\")\n self.assert_path(resp, \"response/lobby/e/ver\")\n return resp.child_value('lobby/eid')\n\n def verify_lobby_delete(self, eid: int) -> None:\n call = self.call_node()\n\n lobby = Node.void('lobby')\n lobby.set_attribute('method', 'delete')\n lobby.add_child(Node.s32('eid', eid))\n call.add_child(lobby)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/lobby\")\n\n def verify_pzlcmt_read(self, extid: int) -> None:\n call = self.call_node()\n\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_read')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.s32('time', 0))\n info.add_child(Node.s32('limit', 30))\n call.add_child(info)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/info/comment/time\")\n self.assert_path(resp, \"response/info/c/uid\")\n self.assert_path(resp, \"response/info/c/name\")\n self.assert_path(resp, \"response/info/c/icon\")\n self.assert_path(resp, \"response/info/c/bln\")\n self.assert_path(resp, \"response/info/c/tid\")\n self.assert_path(resp, \"response/info/c/t_name\")\n self.assert_path(resp, \"response/info/c/pref\")\n self.assert_path(resp, \"response/info/c/time\")\n self.assert_path(resp, \"response/info/c/comment\")\n self.assert_path(resp, \"response/info/c/is_tweet\")\n\n # Verify we posted our comment earlier\n found = False\n for child in resp.child('info').children:\n if child.name != 'c':\n continue\n if child.child_value('uid') == extid:\n name = child.child_value('name')\n comment = child.child_value('comment')\n if name != self.NAME:\n raise Exception('Invalid name \\'{}\\' returned for comment!'.format(name))\n if comment != 'アメ〜〜!':\n raise Exception('Invalid comment \\'{}\\' returned for comment!'.format(comment))\n found = True\n\n if not found:\n raise Exception('Comment we posted was not found!')\n\n def verify_pzlcmt_write(self, extid: int) -> None:\n call = self.call_node()\n\n info = Node.void('info')\n info.set_attribute('method', 'pzlcmt_write')\n info.add_child(Node.s32('uid', extid))\n info.add_child(Node.string('name', self.NAME))\n info.add_child(Node.s16('icon', 0))\n info.add_child(Node.s8('bln', 0))\n info.add_child(Node.s32('tid', 0))\n info.add_child(Node.string('t_name', ''))\n info.add_child(Node.s8('pref', 51))\n info.add_child(Node.s32('time', int(time.time())))\n info.add_child(Node.string('comment', 'アメ〜〜!'))\n info.add_child(Node.bool('is_tweet', True))\n call.add_child(info)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/info\")\n\n def verify_jbrbcollabo_save(self, refid: str) -> None:\n call = self.call_node()\n\n jbrbcollabo = Node.void('jbrbcollabo')\n jbrbcollabo.set_attribute('method', 'save')\n jbrbcollabo.add_child(Node.string('ref_id', refid))\n jbrbcollabo.add_child(Node.u16('cre_count', 0))\n call.add_child(jbrbcollabo)\n\n # Swap with server\n resp = self.exchange('', call)\n\n # Verify that response is correct\n self.assert_path(resp, \"response/jbrbcollabo\")\n\n def verify(self, cardid: Optional[str]) -> None:\n # Verify boot sequence is okay\n self.verify_services_get(\n expected_services=[\n 'pcbtracker',\n 'pcbevent',\n 'local',\n 'message',\n 'facility',\n 'cardmng',\n 'package',\n 'posevent',\n 'pkglist',\n 'dlstatus',\n 'eacoin',\n 'lobby',\n 'ntp',\n 'keepalive'\n ]\n )\n paseli_enabled = self.verify_pcbtracker_alive()\n self.verify_message_get()\n self.verify_package_list()\n location = self.verify_facility_get()\n self.verify_pcbevent_put()\n self.verify_pcb_boot(location)\n self.verify_info_common()\n\n # Verify card registration and profile lookup\n if cardid is not None:\n card = cardid\n else:\n card = self.random_card()\n print(\"Generated random card ID {} for use.\".format(card))\n\n if cardid is None:\n self.verify_cardmng_inquire(card, msg_type='unregistered', paseli_enabled=paseli_enabled)\n ref_id = self.verify_cardmng_getrefid(card)\n if len(ref_id) != 16:\n raise Exception('Invalid refid \\'{}\\' returned when registering card'.format(ref_id))\n if ref_id != self.verify_cardmng_inquire(card, msg_type='new', paseli_enabled=paseli_enabled):\n raise Exception('Invalid refid \\'{}\\' returned when querying card'.format(ref_id))\n # Always get a player start, regardless of new profile or not\n self.verify_player_start(ref_id)\n self.verify_player_delete(ref_id)\n self.verify_player_succeed(ref_id)\n extid = self.verify_player_write(\n ref_id,\n location,\n [{\n 'id': 0,\n 'chart': 0,\n 'clear_type': -1,\n 'achievement_rate': 0,\n 'score': 0,\n 'combo': 0,\n 'miss_count': 0,\n }]\n )\n else:\n print(\"Skipping new card checks for existing card\")\n ref_id = self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled)\n\n # Verify pin handling and return card handling\n self.verify_cardmng_authpass(ref_id, correct=True)\n self.verify_cardmng_authpass(ref_id, correct=False)\n if ref_id != self.verify_cardmng_inquire(card, msg_type='query', paseli_enabled=paseli_enabled):\n raise Exception('Invalid refid \\'{}\\' returned when querying card'.format(ref_id))\n\n # Verify lobby functionality\n self.verify_lobby_read(location, extid)\n eid = self.verify_lobby_entry(location, extid)\n self.verify_lobby_delete(eid)\n\n # Verify puzzle comment read and write\n self.verify_pzlcmt_write(extid)\n self.verify_pzlcmt_read(extid)\n\n # Verify Jubeat/ReflecBeat collabo save\n self.verify_jbrbcollabo_save(ref_id)\n\n if cardid is None:\n # Verify score saving and updating\n for phase in [1, 2]:\n if phase == 1:\n dummyscores = [\n # An okay score on a chart\n {\n 'id': 1,\n 'chart': 1,\n 'clear_type': 2,\n 'achievement_rate': 7543,\n 'score': 432,\n 'combo': 123,\n 'miss_count': 5,\n },\n # A good score on an easier chart of the same song\n {\n 'id': 1,\n 'chart': 0,\n 'clear_type': 4,\n 'achievement_rate': 9876,\n 'score': 543,\n 'combo': 543,\n 'miss_count': 0,\n },\n # A bad score on a hard chart\n {\n 'id': 3,\n 'chart': 2,\n 'clear_type': 2,\n 'achievement_rate': 1234,\n 'score': 123,\n 'combo': 42,\n 'miss_count': 54,\n },\n # A terrible score on an easy chart\n {\n 'id': 3,\n 'chart': 0,\n 'clear_type': 2,\n 'achievement_rate': 1024,\n 'score': 50,\n 'combo': 12,\n 'miss_count': 90,\n },\n ]\n if phase == 2:\n dummyscores = [\n # A better score on the same chart\n {\n 'id': 1,\n 'chart': 1,\n 'clear_type': 3,\n 'achievement_rate': 8765,\n 'score': 469,\n 'combo': 468,\n 'miss_count': 1,\n },\n # A worse score on another same chart\n {\n 'id': 1,\n 'chart': 0,\n 'clear_type': 2,\n 'achievement_rate': 8765,\n 'score': 432,\n 'combo': 321,\n 'miss_count': 15,\n 'expected_score': 543,\n 'expected_clear_type': 4,\n 'expected_achievement_rate': 9876,\n 'expected_combo': 543,\n 'expected_miss_count': 0,\n },\n ]\n self.verify_player_write(ref_id, location, dummyscores)\n\n scores = self.verify_player_read(ref_id, location)\n for expected in dummyscores:\n actual = None\n for received in scores:\n if received['id'] == expected['id'] and received['chart'] == expected['chart']:\n actual = received\n break\n\n if actual is None:\n raise Exception(\"Didn't find song {} chart {} in response!\".format(expected['id'], expected['chart']))\n\n if 'expected_score' in expected:\n expected_score = expected['expected_score']\n else:\n expected_score = expected['score']\n if 'expected_achievement_rate' in expected:\n expected_achievement_rate = expected['expected_achievement_rate']\n else:\n expected_achievement_rate = expected['achievement_rate']\n if 'expected_clear_type' in expected:\n expected_clear_type = expected['expected_clear_type']\n else:\n expected_clear_type = expected['clear_type']\n if 'expected_combo' in expected:\n expected_combo = expected['expected_combo']\n else:\n expected_combo = expected['combo']\n if 'expected_miss_count' in expected:\n expected_miss_count = expected['expected_miss_count']\n else:\n expected_miss_count = expected['miss_count']\n\n if actual['score'] != expected_score:\n raise Exception('Expected a score of \\'{}\\' for song \\'{}\\' chart \\'{}\\' but got score \\'{}\\''.format(\n expected_score, expected['id'], expected['chart'], actual['score'],\n ))\n if actual['achievement_rate'] != expected_achievement_rate:\n raise Exception('Expected an achievement rate of \\'{}\\' for song \\'{}\\' chart \\'{}\\' but got achievement rate \\'{}\\''.format(\n expected_achievement_rate, expected['id'], expected['chart'], actual['achievement_rate'],\n ))\n if actual['clear_type'] != expected_clear_type:\n raise Exception('Expected a clear_type of \\'{}\\' for song \\'{}\\' chart \\'{}\\' but got clear_type \\'{}\\''.format(\n expected_clear_type, expected['id'], expected['chart'], actual['clear_type'],\n ))\n if actual['combo'] != expected_combo:\n raise Exception('Expected a combo of \\'{}\\' for song \\'{}\\' chart \\'{}\\' but got combo \\'{}\\''.format(\n expected_combo, expected['id'], expected['chart'], actual['combo'],\n ))\n if actual['miss_count'] != expected_miss_count:\n raise Exception('Expected a miss count of \\'{}\\' for song \\'{}\\' chart \\'{}\\' but got miss count \\'{}\\''.format(\n expected_miss_count, expected['id'], expected['chart'], actual['miss_count'],\n ))\n\n # Sleep so we don't end up putting in score history on the same second\n time.sleep(1)\n\n else:\n print(\"Skipping score checks for existing card\")\n\n # Verify ending game\n self.verify_player_end(ref_id)\n\n # Verify high score tables\n self.verify_info_ranking()\n\n # Verify paseli handling\n if paseli_enabled:\n print(\"PASELI enabled for this PCBID, executing PASELI checks\")\n else:\n print(\"PASELI disabled for this PCBID, skipping PASELI checks\")\n return\n\n sessid, balance = self.verify_eacoin_checkin(card)\n if balance == 0:\n print(\"Skipping PASELI consume check because card has 0 balance\")\n else:\n self.verify_eacoin_consume(sessid, balance, random.randint(0, balance))\n self.verify_eacoin_checkout(sessid)\n", "step-ids": [ 13, 14, 16, 18, 20 ] }
[ 13, 14, 16, 18, 20 ]
import pygame from pygame.sprite import Sprite import spritesheet class Bunker(Sprite): def __init__(self, ai_settings, bunker_x, bunker_y, screen, images): """Initialize the ship and set its starting position""" super(Bunker, self).__init__() self.screen = screen self.images = images self.image = self.images[18] self.rect = self.image.get_rect() self.screen_rect = screen.get_rect() # Start each new bunker at the bottom of the screen self.rect.centerx = bunker_x self.rect.bottom = bunker_y # Store a decimal value for the ship's center. #self.center = float(self.rect.centerx) self.bunker_health = 5 def update(self): """Track the HP of the bunker""" if self.bunker_health == 0: self.kill() def blitme(self): """Draw the ship at its current location""" self.screen.blit(self.image, self.rect)
normal
{ "blob_id": "d088aadc4d88267b908c4f6de2928c812ef36739", "index": 1603, "step-1": "<mask token>\n\n\nclass Bunker(Sprite):\n <mask token>\n <mask token>\n\n def blitme(self):\n \"\"\"Draw the ship at its current location\"\"\"\n self.screen.blit(self.image, self.rect)\n", "step-2": "<mask token>\n\n\nclass Bunker(Sprite):\n\n def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):\n \"\"\"Initialize the ship and set its starting position\"\"\"\n super(Bunker, self).__init__()\n self.screen = screen\n self.images = images\n self.image = self.images[18]\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n self.rect.centerx = bunker_x\n self.rect.bottom = bunker_y\n self.bunker_health = 5\n <mask token>\n\n def blitme(self):\n \"\"\"Draw the ship at its current location\"\"\"\n self.screen.blit(self.image, self.rect)\n", "step-3": "<mask token>\n\n\nclass Bunker(Sprite):\n\n def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):\n \"\"\"Initialize the ship and set its starting position\"\"\"\n super(Bunker, self).__init__()\n self.screen = screen\n self.images = images\n self.image = self.images[18]\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n self.rect.centerx = bunker_x\n self.rect.bottom = bunker_y\n self.bunker_health = 5\n\n def update(self):\n \"\"\"Track the HP of the bunker\"\"\"\n if self.bunker_health == 0:\n self.kill()\n\n def blitme(self):\n \"\"\"Draw the ship at its current location\"\"\"\n self.screen.blit(self.image, self.rect)\n", "step-4": "import pygame\nfrom pygame.sprite import Sprite\nimport spritesheet\n\n\nclass Bunker(Sprite):\n\n def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):\n \"\"\"Initialize the ship and set its starting position\"\"\"\n super(Bunker, self).__init__()\n self.screen = screen\n self.images = images\n self.image = self.images[18]\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n self.rect.centerx = bunker_x\n self.rect.bottom = bunker_y\n self.bunker_health = 5\n\n def update(self):\n \"\"\"Track the HP of the bunker\"\"\"\n if self.bunker_health == 0:\n self.kill()\n\n def blitme(self):\n \"\"\"Draw the ship at its current location\"\"\"\n self.screen.blit(self.image, self.rect)\n", "step-5": "import pygame\nfrom pygame.sprite import Sprite\nimport spritesheet\n\nclass Bunker(Sprite):\n\n def __init__(self, ai_settings, bunker_x, bunker_y, screen, images):\n \"\"\"Initialize the ship and set its starting position\"\"\"\n super(Bunker, self).__init__()\n self.screen = screen\n self.images = images\n\n self.image = self.images[18]\n\n self.rect = self.image.get_rect()\n self.screen_rect = screen.get_rect()\n\n # Start each new bunker at the bottom of the screen\n self.rect.centerx = bunker_x\n self.rect.bottom = bunker_y\n\n # Store a decimal value for the ship's center.\n #self.center = float(self.rect.centerx)\n\n self.bunker_health = 5\n\n def update(self):\n \"\"\"Track the HP of the bunker\"\"\"\n if self.bunker_health == 0:\n self.kill()\n\n def blitme(self):\n \"\"\"Draw the ship at its current location\"\"\"\n self.screen.blit(self.image, self.rect)\n\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
class Odwroc: <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> class Odwroc: def __init__(self, dane): self.dane = dane self.indeks = len(dane) <|reserved_special_token_0|> def __next__(self): if self.indeks == 0: raise StopIteration self.indeks -= 1 return self.dane[self.indeks] <|reserved_special_token_0|> <|reserved_special_token_1|> class Odwroc: def __init__(self, dane): self.dane = dane self.indeks = len(dane) def __iter__(self): return self def __next__(self): if self.indeks == 0: raise StopIteration self.indeks -= 1 return self.dane[self.indeks] <|reserved_special_token_0|> <|reserved_special_token_1|> class Odwroc: def __init__(self, dane): self.dane = dane self.indeks = len(dane) def __iter__(self): return self def __next__(self): if self.indeks == 0: raise StopIteration self.indeks -= 1 return self.dane[self.indeks] for i in Odwroc('Martusia'): print(i, end='') <|reserved_special_token_1|> class Odwroc(): def __init__(self,dane): self.dane = dane self.indeks = len(dane) def __iter__(self): return self def __next__(self): if self.indeks == 0: raise StopIteration self.indeks -= 1 return self.dane[self.indeks] for i in Odwroc('Martusia'): print(i,end = '')
flexible
{ "blob_id": "763c0baf919b48ff135f7aa18974da5b85ee40f5", "index": 1133, "step-1": "class Odwroc:\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "class Odwroc:\n\n def __init__(self, dane):\n self.dane = dane\n self.indeks = len(dane)\n <mask token>\n\n def __next__(self):\n if self.indeks == 0:\n raise StopIteration\n self.indeks -= 1\n return self.dane[self.indeks]\n\n\n<mask token>\n", "step-3": "class Odwroc:\n\n def __init__(self, dane):\n self.dane = dane\n self.indeks = len(dane)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.indeks == 0:\n raise StopIteration\n self.indeks -= 1\n return self.dane[self.indeks]\n\n\n<mask token>\n", "step-4": "class Odwroc:\n\n def __init__(self, dane):\n self.dane = dane\n self.indeks = len(dane)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.indeks == 0:\n raise StopIteration\n self.indeks -= 1\n return self.dane[self.indeks]\n\n\nfor i in Odwroc('Martusia'):\n print(i, end='')\n", "step-5": "class Odwroc():\n def __init__(self,dane):\n self.dane = dane\n self.indeks = len(dane)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.indeks == 0:\n raise StopIteration\n\n self.indeks -= 1\n return self.dane[self.indeks]\n\nfor i in Odwroc('Martusia'):\n print(i,end = '')\n\n", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- # File: wenshu/actions.py # Author: Carolusian <https://github.com/carolusian> # Date: 22.09.2018 # Last Modified Date: 22.09.2018 # # Copyright 2018 Carolusian import time import itertools import re import requests import json import os from random import randint from selenium import webdriver from selenium.webdriver.common.by import By from selenium.common.exceptions import ElementNotInteractableException from .exceptions import UnsupportedPlatformException from .config import get_logger, DOC_LINK_BASE from .utils import retry logger = get_logger(__name__) def sleep(min_seconds=1, max_seconds=10): """Allow a browser instance to wait for a few seconds before do something""" time.sleep(randint(min_seconds, max_seconds)) def click(elem): try: elem.click() except ElementNotInteractableException: pass def open_website(url): """ Open website of target url """ browser = webdriver.Firefox() browser.get(url) return browser def is_finished(browser): finish_text = '无符合条件的数据...' sleep_secs = 15 time.sleep(sleep_secs) result_list = browser.find_element_by_id('resultList') # Refresh if no result found if finish_text in result_list.text: logger.info('Try refresh to reload content') browser.refresh() time.sleep(sleep_secs) # If still not result found, finish downloading result_list = browser.find_element_by_id('resultList') if finish_text in result_list.text: return True return False def download_docs(browser, save_dir='./', click_next_page=False): if click_next_page: next_page = browser.find_elements(By.XPATH, '//*[@id="pageNumber"]/a[contains(text(), "下一页")]') next_page[0].click() if is_finished(browser): logger.info('Finished downloading documents in this page.') return link_xpath = '//*[@class="dataItem"]' keywords_elems = browser.find_elements(By.XPATH, '//*[@class="contentCondtion"]') subfolder = '-'.join([el.text for el in keywords_elems]) elems = browser.find_elements(By.XPATH, link_xpath) for el in elems: save_doc(browser, el, os.path.join(save_dir, subfolder)) time.sleep(1) # Goto next page after this page is download download_docs(browser, save_dir, click_next_page=True) @retry(times=5, delay=5, allowed_exceptions=IndexError) def save_doc(browser, doc_elem, save_dir): doc_key = doc_elem.get_attribute('key') doc_title = doc_elem.get_attribute('title') logger.info('Found document %s.' % doc_title) unzipped_id = browser.execute_script('return unzip("%s")' % doc_key) doc_id = browser.execute_script('return com.str.Decrypt("%s")' % unzipped_id) doc_link = DOC_LINK_BASE % doc_id headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'} p = re.compile('(var jsonHtmlData = ")(.+)(\\"}";)') resp = requests.get(doc_link, headers=headers) resp_text = resp.text resp_obj = p.findall(resp_text)[0][1].replace('\\', '') + '"}' resp_obj = json.loads(resp_obj) os.makedirs(save_dir, exist_ok=True) with open(os.path.join(save_dir, resp_obj['Title'] + '.html'), 'w') as f: f.write(resp_obj['Html']) logger.info('Downloaded %s.' % resp_obj['Title'])
normal
{ "blob_id": "01de85b0d480c105c8cc1a8154c3de936ab3226d", "index": 9143, "step-1": "<mask token>\n\n\ndef sleep(min_seconds=1, max_seconds=10):\n \"\"\"Allow a browser instance to wait for a few seconds before do something\"\"\"\n time.sleep(randint(min_seconds, max_seconds))\n\n\ndef click(elem):\n try:\n elem.click()\n except ElementNotInteractableException:\n pass\n\n\ndef open_website(url):\n \"\"\"\n Open website of target url\n \"\"\"\n browser = webdriver.Firefox()\n browser.get(url)\n return browser\n\n\ndef is_finished(browser):\n finish_text = '无符合条件的数据...'\n sleep_secs = 15\n time.sleep(sleep_secs)\n result_list = browser.find_element_by_id('resultList')\n if finish_text in result_list.text:\n logger.info('Try refresh to reload content')\n browser.refresh()\n time.sleep(sleep_secs)\n result_list = browser.find_element_by_id('resultList')\n if finish_text in result_list.text:\n return True\n return False\n\n\n<mask token>\n\n\n@retry(times=5, delay=5, allowed_exceptions=IndexError)\ndef save_doc(browser, doc_elem, save_dir):\n doc_key = doc_elem.get_attribute('key')\n doc_title = doc_elem.get_attribute('title')\n logger.info('Found document %s.' % doc_title)\n unzipped_id = browser.execute_script('return unzip(\"%s\")' % doc_key)\n doc_id = browser.execute_script('return com.str.Decrypt(\"%s\")' %\n unzipped_id)\n doc_link = DOC_LINK_BASE % doc_id\n headers = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'\n }\n p = re.compile('(var jsonHtmlData = \")(.+)(\\\\\"}\";)')\n resp = requests.get(doc_link, headers=headers)\n resp_text = resp.text\n resp_obj = p.findall(resp_text)[0][1].replace('\\\\', '') + '\"}'\n resp_obj = json.loads(resp_obj)\n os.makedirs(save_dir, exist_ok=True)\n with open(os.path.join(save_dir, resp_obj['Title'] + '.html'), 'w') as f:\n f.write(resp_obj['Html'])\n logger.info('Downloaded %s.' % resp_obj['Title'])\n", "step-2": "<mask token>\n\n\ndef sleep(min_seconds=1, max_seconds=10):\n \"\"\"Allow a browser instance to wait for a few seconds before do something\"\"\"\n time.sleep(randint(min_seconds, max_seconds))\n\n\ndef click(elem):\n try:\n elem.click()\n except ElementNotInteractableException:\n pass\n\n\ndef open_website(url):\n \"\"\"\n Open website of target url\n \"\"\"\n browser = webdriver.Firefox()\n browser.get(url)\n return browser\n\n\ndef is_finished(browser):\n finish_text = '无符合条件的数据...'\n sleep_secs = 15\n time.sleep(sleep_secs)\n result_list = browser.find_element_by_id('resultList')\n if finish_text in result_list.text:\n logger.info('Try refresh to reload content')\n browser.refresh()\n time.sleep(sleep_secs)\n result_list = browser.find_element_by_id('resultList')\n if finish_text in result_list.text:\n return True\n return False\n\n\ndef download_docs(browser, save_dir='./', click_next_page=False):\n if click_next_page:\n next_page = browser.find_elements(By.XPATH,\n '//*[@id=\"pageNumber\"]/a[contains(text(), \"下一页\")]')\n next_page[0].click()\n if is_finished(browser):\n logger.info('Finished downloading documents in this page.')\n return\n link_xpath = '//*[@class=\"dataItem\"]'\n keywords_elems = browser.find_elements(By.XPATH,\n '//*[@class=\"contentCondtion\"]')\n subfolder = '-'.join([el.text for el in keywords_elems])\n elems = browser.find_elements(By.XPATH, link_xpath)\n for el in elems:\n save_doc(browser, el, os.path.join(save_dir, subfolder))\n time.sleep(1)\n download_docs(browser, save_dir, click_next_page=True)\n\n\n@retry(times=5, delay=5, allowed_exceptions=IndexError)\ndef save_doc(browser, doc_elem, save_dir):\n doc_key = doc_elem.get_attribute('key')\n doc_title = doc_elem.get_attribute('title')\n logger.info('Found document %s.' % doc_title)\n unzipped_id = browser.execute_script('return unzip(\"%s\")' % doc_key)\n doc_id = browser.execute_script('return com.str.Decrypt(\"%s\")' %\n unzipped_id)\n doc_link = DOC_LINK_BASE % doc_id\n headers = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'\n }\n p = re.compile('(var jsonHtmlData = \")(.+)(\\\\\"}\";)')\n resp = requests.get(doc_link, headers=headers)\n resp_text = resp.text\n resp_obj = p.findall(resp_text)[0][1].replace('\\\\', '') + '\"}'\n resp_obj = json.loads(resp_obj)\n os.makedirs(save_dir, exist_ok=True)\n with open(os.path.join(save_dir, resp_obj['Title'] + '.html'), 'w') as f:\n f.write(resp_obj['Html'])\n logger.info('Downloaded %s.' % resp_obj['Title'])\n", "step-3": "<mask token>\nlogger = get_logger(__name__)\n\n\ndef sleep(min_seconds=1, max_seconds=10):\n \"\"\"Allow a browser instance to wait for a few seconds before do something\"\"\"\n time.sleep(randint(min_seconds, max_seconds))\n\n\ndef click(elem):\n try:\n elem.click()\n except ElementNotInteractableException:\n pass\n\n\ndef open_website(url):\n \"\"\"\n Open website of target url\n \"\"\"\n browser = webdriver.Firefox()\n browser.get(url)\n return browser\n\n\ndef is_finished(browser):\n finish_text = '无符合条件的数据...'\n sleep_secs = 15\n time.sleep(sleep_secs)\n result_list = browser.find_element_by_id('resultList')\n if finish_text in result_list.text:\n logger.info('Try refresh to reload content')\n browser.refresh()\n time.sleep(sleep_secs)\n result_list = browser.find_element_by_id('resultList')\n if finish_text in result_list.text:\n return True\n return False\n\n\ndef download_docs(browser, save_dir='./', click_next_page=False):\n if click_next_page:\n next_page = browser.find_elements(By.XPATH,\n '//*[@id=\"pageNumber\"]/a[contains(text(), \"下一页\")]')\n next_page[0].click()\n if is_finished(browser):\n logger.info('Finished downloading documents in this page.')\n return\n link_xpath = '//*[@class=\"dataItem\"]'\n keywords_elems = browser.find_elements(By.XPATH,\n '//*[@class=\"contentCondtion\"]')\n subfolder = '-'.join([el.text for el in keywords_elems])\n elems = browser.find_elements(By.XPATH, link_xpath)\n for el in elems:\n save_doc(browser, el, os.path.join(save_dir, subfolder))\n time.sleep(1)\n download_docs(browser, save_dir, click_next_page=True)\n\n\n@retry(times=5, delay=5, allowed_exceptions=IndexError)\ndef save_doc(browser, doc_elem, save_dir):\n doc_key = doc_elem.get_attribute('key')\n doc_title = doc_elem.get_attribute('title')\n logger.info('Found document %s.' % doc_title)\n unzipped_id = browser.execute_script('return unzip(\"%s\")' % doc_key)\n doc_id = browser.execute_script('return com.str.Decrypt(\"%s\")' %\n unzipped_id)\n doc_link = DOC_LINK_BASE % doc_id\n headers = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'\n }\n p = re.compile('(var jsonHtmlData = \")(.+)(\\\\\"}\";)')\n resp = requests.get(doc_link, headers=headers)\n resp_text = resp.text\n resp_obj = p.findall(resp_text)[0][1].replace('\\\\', '') + '\"}'\n resp_obj = json.loads(resp_obj)\n os.makedirs(save_dir, exist_ok=True)\n with open(os.path.join(save_dir, resp_obj['Title'] + '.html'), 'w') as f:\n f.write(resp_obj['Html'])\n logger.info('Downloaded %s.' % resp_obj['Title'])\n", "step-4": "import time\nimport itertools\nimport re\nimport requests\nimport json\nimport os\nfrom random import randint\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import ElementNotInteractableException\nfrom .exceptions import UnsupportedPlatformException\nfrom .config import get_logger, DOC_LINK_BASE\nfrom .utils import retry\nlogger = get_logger(__name__)\n\n\ndef sleep(min_seconds=1, max_seconds=10):\n \"\"\"Allow a browser instance to wait for a few seconds before do something\"\"\"\n time.sleep(randint(min_seconds, max_seconds))\n\n\ndef click(elem):\n try:\n elem.click()\n except ElementNotInteractableException:\n pass\n\n\ndef open_website(url):\n \"\"\"\n Open website of target url\n \"\"\"\n browser = webdriver.Firefox()\n browser.get(url)\n return browser\n\n\ndef is_finished(browser):\n finish_text = '无符合条件的数据...'\n sleep_secs = 15\n time.sleep(sleep_secs)\n result_list = browser.find_element_by_id('resultList')\n if finish_text in result_list.text:\n logger.info('Try refresh to reload content')\n browser.refresh()\n time.sleep(sleep_secs)\n result_list = browser.find_element_by_id('resultList')\n if finish_text in result_list.text:\n return True\n return False\n\n\ndef download_docs(browser, save_dir='./', click_next_page=False):\n if click_next_page:\n next_page = browser.find_elements(By.XPATH,\n '//*[@id=\"pageNumber\"]/a[contains(text(), \"下一页\")]')\n next_page[0].click()\n if is_finished(browser):\n logger.info('Finished downloading documents in this page.')\n return\n link_xpath = '//*[@class=\"dataItem\"]'\n keywords_elems = browser.find_elements(By.XPATH,\n '//*[@class=\"contentCondtion\"]')\n subfolder = '-'.join([el.text for el in keywords_elems])\n elems = browser.find_elements(By.XPATH, link_xpath)\n for el in elems:\n save_doc(browser, el, os.path.join(save_dir, subfolder))\n time.sleep(1)\n download_docs(browser, save_dir, click_next_page=True)\n\n\n@retry(times=5, delay=5, allowed_exceptions=IndexError)\ndef save_doc(browser, doc_elem, save_dir):\n doc_key = doc_elem.get_attribute('key')\n doc_title = doc_elem.get_attribute('title')\n logger.info('Found document %s.' % doc_title)\n unzipped_id = browser.execute_script('return unzip(\"%s\")' % doc_key)\n doc_id = browser.execute_script('return com.str.Decrypt(\"%s\")' %\n unzipped_id)\n doc_link = DOC_LINK_BASE % doc_id\n headers = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'\n }\n p = re.compile('(var jsonHtmlData = \")(.+)(\\\\\"}\";)')\n resp = requests.get(doc_link, headers=headers)\n resp_text = resp.text\n resp_obj = p.findall(resp_text)[0][1].replace('\\\\', '') + '\"}'\n resp_obj = json.loads(resp_obj)\n os.makedirs(save_dir, exist_ok=True)\n with open(os.path.join(save_dir, resp_obj['Title'] + '.html'), 'w') as f:\n f.write(resp_obj['Html'])\n logger.info('Downloaded %s.' % resp_obj['Title'])\n", "step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# File: wenshu/actions.py\n# Author: Carolusian <https://github.com/carolusian>\n# Date: 22.09.2018\n# Last Modified Date: 22.09.2018\n#\n# Copyright 2018 Carolusian\n\nimport time\nimport itertools\nimport re\nimport requests\nimport json\nimport os\nfrom random import randint\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import ElementNotInteractableException\nfrom .exceptions import UnsupportedPlatformException\n\nfrom .config import get_logger, DOC_LINK_BASE\nfrom .utils import retry\n\n\nlogger = get_logger(__name__)\n\n\ndef sleep(min_seconds=1, max_seconds=10):\n \"\"\"Allow a browser instance to wait for a few seconds before do something\"\"\"\n time.sleep(randint(min_seconds, max_seconds))\n\n\ndef click(elem):\n try:\n elem.click()\n except ElementNotInteractableException:\n pass\n\n\ndef open_website(url):\n \"\"\"\n Open website of target url\n \"\"\"\n browser = webdriver.Firefox()\n browser.get(url)\n return browser\n\n\ndef is_finished(browser):\n finish_text = '无符合条件的数据...'\n sleep_secs = 15\n time.sleep(sleep_secs)\n result_list = browser.find_element_by_id('resultList')\n # Refresh if no result found\n if finish_text in result_list.text:\n logger.info('Try refresh to reload content')\n browser.refresh()\n time.sleep(sleep_secs)\n\n # If still not result found, finish downloading\n result_list = browser.find_element_by_id('resultList')\n if finish_text in result_list.text:\n return True\n return False\n\n\ndef download_docs(browser, save_dir='./', click_next_page=False): \n if click_next_page:\n next_page = browser.find_elements(By.XPATH, '//*[@id=\"pageNumber\"]/a[contains(text(), \"下一页\")]')\n next_page[0].click()\n if is_finished(browser):\n logger.info('Finished downloading documents in this page.')\n return\n\n link_xpath = '//*[@class=\"dataItem\"]'\n keywords_elems = browser.find_elements(By.XPATH, '//*[@class=\"contentCondtion\"]')\n subfolder = '-'.join([el.text for el in keywords_elems])\n elems = browser.find_elements(By.XPATH, link_xpath)\n for el in elems:\n save_doc(browser, el, os.path.join(save_dir, subfolder))\n time.sleep(1)\n\n # Goto next page after this page is download\n download_docs(browser, save_dir, click_next_page=True)\n\n\n@retry(times=5, delay=5, allowed_exceptions=IndexError)\ndef save_doc(browser, doc_elem, save_dir):\n doc_key = doc_elem.get_attribute('key')\n doc_title = doc_elem.get_attribute('title')\n logger.info('Found document %s.' % doc_title)\n\n unzipped_id = browser.execute_script('return unzip(\"%s\")' % doc_key)\n doc_id = browser.execute_script('return com.str.Decrypt(\"%s\")' % unzipped_id)\n doc_link = DOC_LINK_BASE % doc_id\n\n headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n p = re.compile('(var jsonHtmlData = \")(.+)(\\\\\"}\";)')\n \n resp = requests.get(doc_link, headers=headers)\n resp_text = resp.text\n\n resp_obj = p.findall(resp_text)[0][1].replace('\\\\', '') + '\"}'\n resp_obj = json.loads(resp_obj)\n\n os.makedirs(save_dir, exist_ok=True)\n with open(os.path.join(save_dir, resp_obj['Title'] + '.html'), 'w') as f:\n f.write(resp_obj['Html'])\n logger.info('Downloaded %s.' % resp_obj['Title'])\n\n\n \n\n\n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'wenchao.hao' """ data.guid package. """ from .guid import Guid
normal
{ "blob_id": "88a379747f955b0410ab2bb33c1165034c701673", "index": 8597, "step-1": "<mask token>\n", "step-2": "__author__ = 'wenchao.hao'\n<mask token>\n", "step-3": "__author__ = 'wenchao.hao'\n<mask token>\nfrom .guid import Guid\n", "step-4": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n__author__ = 'wenchao.hao'\n\n\"\"\"\ndata.guid package.\n\"\"\"\n\nfrom .guid import Guid\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
def myswap(a, b): temp = a a = b b = temp if a < b: print(a, b) else: print(b, a) a, b = map(int, input().split()) myswap(a, b)
normal
{ "blob_id": "e6efd2de5f92d66f1b734a2173fc8681af3c4cc8", "index": 8040, "step-1": "<mask token>\n", "step-2": "def myswap(a, b):\n temp = a\n a = b\n b = temp\n if a < b:\n print(a, b)\n else:\n print(b, a)\n\n\n<mask token>\n", "step-3": "def myswap(a, b):\n temp = a\n a = b\n b = temp\n if a < b:\n print(a, b)\n else:\n print(b, a)\n\n\n<mask token>\nmyswap(a, b)\n", "step-4": "def myswap(a, b):\n temp = a\n a = b\n b = temp\n if a < b:\n print(a, b)\n else:\n print(b, a)\n\n\na, b = map(int, input().split())\nmyswap(a, b)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import numpy as np import ipywidgets as widgets from ipywidgets import interact, interactive, fixed, Layout, VBox, HBox import matplotlib.pyplot as plt import matplotlib as mpl import matplotlib.tri as tri import matplotlib.colors as colors from matplotlib.colors import LinearSegmentedColormap import scipy.stats as sps import matplotlib.ticker as mtick mpl.rcParams['pdf.fonttype'] = 42 mpl.rcParams['ps.fonttype'] = 42 #### FORC plotting #### def forc(X): #unpack data Xi = X['Xi'] Yi = X['Yi'] Zi = X['Zi'] SEi = X['SEi'] Pi = X['Pi'] Hc1 = X['Hc1'] Hc2 = X['Hc2'] Hb1 = X['Hb1'] Hb2 = X['Hb2'] #Set up widgets for interactive plot style = {'description_width': 'initial'} #general style settings #DEFINE INTERACTIVE WIDGETS #should a colorbar be included colorbar_widge = widgets.Checkbox(value=False, description = 'Show final FORC plot',style=style) pval_widge = widgets.Checkbox(value=False, description = 'Show 0.05 significance contour',style=style) colormin_widge = widgets.FloatSlider( value=0.0, min=0.00, max=0.999, step=0.001, description='Rescale colormap minimum', disabled=False, continuous_update=False, orientation='horizontal', readout=False, readout_format='.2f', style=style ) colormax_widge = widgets.FloatSlider( value=1.0, min=0.001, max=1, step=0.001, description='Rescale colormap maximum', disabled=False, continuous_update=False, orientation='horizontal', readout=False, readout_format='.2f', style=style ) #Frequency for contour lines to be included in plot contour_widge = widgets.Select( options=[['Select contour frequency',-1], ['Every level',1], ['Every 2nd level',2], ['Every 3rd level',3], ['Every 4th level',4], ['Every 5th level',5], ['Every 10th level',10], ['Every 20th level',20], ['Every 50th level',50], ], value=-1, rows=1, description='Plot contours',style=style) contourpts_widge = widgets.FloatSlider(value=1.0,min=0.5,max=3.0,step=0.5, description = 'Contour line width [pts]',style=style) #check box for plot download download_widge = widgets.Checkbox(value=False, description = 'Download plot',style=style) #How many contour levels should be included level_widge = widgets.Select( options=[['20',20],['30',30],['50',50],['75',75],['100',100],['200',200],['500',500]], value=100, rows=1, description='Number of color levels',style=style) #plot limit widgets if X['unit']=='SI': xmin_widge = widgets.FloatText(value=0,description='Minimum B$_\mathrm{c}$ [Oe]',style=style,step=10) xmax_widge = widgets.FloatText(value=np.round(Hc2*1000)/1000,description='Maximum B$_\mathrm{c}$ [Oe]',style=style,step=10) ymin_widge = widgets.FloatText(value=np.round((Hb1-Hc2)*1000)/1000,description='Minimum B$_\mathrm{u}$ [Oe]',style=style,step=10) ymax_widge = widgets.FloatText(value=np.round(Hb2*1000)/1000,description='Maximum B$_\mathrm{u}$ [Oe]',style=style,step=10) elif X['unit']=='cgs': xmin_widge = widgets.FloatText(value=0,description='Minimum H$_\mathrm{c}$ [Oe]',style=style,step=10) xmax_widge = widgets.FloatText(value=np.round(Hc2*1000)/1000,description='Maximum H$_\mathrm{c}$ [Oe]',style=style,step=10) ymin_widge = widgets.FloatText(value=np.round((Hb1-Hc2)*1000)/1000,description='Minimum H$_\mathrm{u}$ [Oe]',style=style,step=10) ymax_widge = widgets.FloatText(value=np.round(Hb2*1000)/1000,description='Maximum H$_\mathrm{u}$ [Oe]',style=style,step=10) #launch the interactive FORC plot x = interactive(forcplot, Xi=fixed(Xi), #X point grid Yi=fixed(Yi), #Y point grid Zi=fixed(Zi), #interpolated Z values SEi = fixed(SEi), #interpolated standard errors Pi = fixed(Pi), #P values fn=fixed(X['sample']), #File information mass=fixed(X['mass']), #Preprocessing information unit=fixed(X['unit']), colorbar=colorbar_widge, #Include colorbar level=level_widge, #Number of levels to plot contour=contour_widge, #Contour levels to plot contourpts=contourpts_widge, #Contour line width xmin=xmin_widge, #X-minimum xmax=xmax_widge, #X-maximum ymin=ymin_widge, #Y-minimum ymax=ymax_widge, #Y-maximum colormin = colormin_widge, #adjust colormap minimum colormax = colormax_widge, #adjust colormap minimum download = download_widge #download plot ) #create tabs tab_nest = widgets.Tab() # tab_nest.children = [tab_visualise] tab_nest.set_title(0, 'FORC PLOTTING') #interact function in isolation tab_nest.children = [VBox(children = x.children)] display(tab_nest) #display(x) #display the interactive plot def forcplot(Xi,Yi,Zi,SEi,Pi,fn,mass,unit,colorbar,level,contour,contourpts,xmin,xmax,ymin,ymax,colormin,colormax,download): fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(1,1,1) if mass.value<0.0: Xi_new = Xi Yi_new = Yi Zi_new = Zi SEi_new = SEi Pi_new = Pi SEi_new[Zi_new==0.0]=0.0 SEi_new[np.isnan(SEi_new)]=0.0 if unit=='SI': xlabel_text = 'B$_\mathrm{c}$ [T]' #label Hc axis [SI units] xlabel_csv = 'Bc [T]' ylabel_text = 'B$_\mathrm{u}$ [T]' #label Hu axis [SI units] ylabel_csv = 'Bu [T]' cbar_text = 'Am$^2$ T$^{-2}$' se_csv = 'rho [Am**2 / T**2]' elif unit=='cgs': xlabel_text = 'H$_\mathrm{c}$ [Oe]' #label Hc axis [SI units] xlabel_csv = 'Hc [Oe]' ylabel_text = 'H$_\mathrm{u}$ [Oe]' #label Hu axis [SI units] ylabel_csv = 'Hu [Oe]' cbar_text = 'emu$ Oe$^{-2}$' se_csv = 'rho [emu / Oe**2]' else: Xi_new = Xi Yi_new = Yi Zi_new = Zi / (mass.value/1000.0) SEi_new = SEi / (mass.value/1000.0) SEi_new[Zi_new==0.0]=0.0 SEi_new[np.isnan(SEi_new)]=0.0 Pi_new = Pi if unit=='SI': Zi_new = Zi / (mass.value/1000.0) SEi_new = SEi / (mass.value/1000.0) xlabel_text = 'B$_\mathrm{c}$ [T]' #label Hc axis [SI units] xlabel_csv = 'Bc [T]' ylabel_text = 'B$_\mathrm{u}$ [T]' #label Hu axis [SI units] ylabel_csv = 'Bu [T]' cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$' se_csv = 'se [Am**2 / T**2 / kg]' elif unit=='cgs': Zi_new = Zi / (mass.value) SEi_new = SEi / (mass.value) xlabel_text = 'H$_\mathrm{c}$ [Oe]' #label Hc axis [SI units] xlabel_csv = 'Hc [Oe]' ylabel_text = 'H$_\mathrm{u}$ [Oe]' #label Hu axis [SI units] ylabel_csv = 'Hu [Oe]' cbar_text = 'emu Oe$^{-2}$ g$^{-1}$' se_csv = 'se [emu/ Oe**2 / g]' SEi_new[Zi_new==0.0]=0.0 SEi_new[np.isnan(SEi_new)]=0.0 #define colormaps idx=(Xi_new>=xmin) & (Xi_new<=xmax) & (Yi_new>=ymin) & (Yi_new<=ymax) #find points currently in view cmap,vmin,vmax = FORCinel_colormap(Zi_new[idx]) #cmap, norm = FORCinel_colormap(Zi_new[idx]) Zi_trunc = np.copy(Zi_new) Zi_trunc[np.isnan(Zi_trunc)] = 0.0 Zi_trunc[Zi_trunc<vmin]=vmin vmini = vmin*(1-colormin) vmaxi = vmax*colormax idx = (Zi_trunc>=vmini) & (Zi_trunc<=vmaxi) cmap,vmin,vmax = FORCinel_colormap(Zi_trunc[idx]) CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap = cmap, vmin=vmin, vmax=vmax) if (contour>0) & (contour<level): CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',linewidths=contourpts) #if pval==True: # CS3 = ax.contour(Xi_new, Yi_new, Pi_new, levels=[0.05], colors=['r']) ax.set_xlabel(xlabel_text,fontsize=14) #label Hc axis [SI units] ax.set_ylabel(ylabel_text,fontsize=14) #label Hu axis [SI units] # Set plot Xlimits xlimits = np.sort((xmin,xmax)) ax.set_xlim(xlimits) #Set plot Ylimits ylimits = np.sort((ymin,ymax)) ax.set_ylim(ylimits) #Set ticks and plot aspect ratio ax.tick_params(labelsize=14) ax.set_aspect('equal') #set 1:1 aspect ratio ax.minorticks_on() #add minor ticks #Add colorbar if colorbar == True: cbar = fig.colorbar(CS,fraction=0.04, pad=0.08,format='%.2e') cbar.ax.tick_params(labelsize=14) #cbar.ax.set_title(cbar_text,fontsize=14) cbar.set_label(cbar_text,fontsize=14) #cbar.ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e')) #Activate download to same folder as data file if download==True: outputfile = fn.value+'_FORC.pdf' plt.savefig(outputfile, dpi=300, bbox_inches="tight") ar = np.column_stack((np.reshape(Xi_new,(-1,1)),np.reshape(Yi_new,(-1,1)),np.reshape(Zi_trunc,(-1,1)),np.reshape(SEi,(-1,1)))) outputfile = fn.value+'_XYZ.csv' with open(outputfile, 'w') as fp: fp.write(xlabel_csv+','+ylabel_csv+','+se_csv+','+se_csv + '\n') np.savetxt(fp, ar, '%s', ',') #show the final plot plt.show() def FORCinel_colormap(Z): #setup initial colormap assuming that negative range does not require extension cdict = {'red': ((0.0, 127/255, 127/255), (0.1387, 255/255, 255/255), (0.1597, 255/255, 255/255), (0.1807, 255/255, 255/255), (0.3193, 102/255, 102/255), (0.563, 204/255, 204/255), (0.6975, 204/255, 204/255), (0.8319, 153/255, 153/255), (0.9748, 76/255, 76/255), (1.0, 76/255, 76/255)), 'green': ((0.0, 127/255, 127/255), (0.1387, 255/255, 255/255), (0.1597, 255/255, 255/255), (0.1807, 255/255, 255/255), (0.3193, 178/255, 178/255), (0.563, 204/255, 204/255), (0.6975, 76/255, 76/255), (0.8319, 102/255, 102/255), (0.9748, 25/255, 25/255), (1.0, 25/255, 25/255)), 'blue': ((0.0, 255/255, 255/255), (0.1387, 255/255, 255/255), (0.1597, 255/255, 255/255), (0.1807, 255/255, 255/255), (0.3193, 102/255, 102/255), (0.563, 76/255, 76/255), (0.6975, 76/255, 76/255), (0.8319, 153/255, 153/255), (0.9748, 76/255, 76/255), (1.0, 76/255, 76/255))} if np.abs(np.min(Z))<=np.max(Z)*0.19: #negative extension is not required #cmap = LinearSegmentedColormap('forc_cmap', cdict) vmin = -np.max(Z)*0.19 vmax = np.max(Z) else: #negative extension is required vmin=np.min(Z) vmax=np.max(Z) anchors = np.zeros(10) anchors[1]=(-0.025*vmax-vmin)/(vmax-vmin) anchors[2]=(-0.005*vmax-vmin)/(vmax-vmin) anchors[3]=(0.025*vmax-vmin)/(vmax-vmin) anchors[4]=(0.19*vmax-vmin)/(vmax-vmin) anchors[5]=(0.48*vmax-vmin)/(vmax-vmin) anchors[6]=(0.64*vmax-vmin)/(vmax-vmin) anchors[7]=(0.80*vmax-vmin)/(vmax-vmin) anchors[8]=(0.97*vmax-vmin)/(vmax-vmin) anchors[9]=1.0 Rlst = list(cdict['red']) Glst = list(cdict['green']) Blst = list(cdict['blue']) for i in range(9): Rlst[i] = tuple((anchors[i],Rlst[i][1],Rlst[i][2])) Glst[i] = tuple((anchors[i],Glst[i][1],Glst[i][2])) Blst[i] = tuple((anchors[i],Blst[i][1],Blst[i][2])) cdict['red'] = tuple(Rlst) cdict['green'] = tuple(Glst) cdict['blue'] = tuple(Blst) cmap = LinearSegmentedColormap('forc_cmap', cdict) return cmap, vmin, vmax #### Profile Plotting #### #### Profile plotting #### def profile_options(X): Hb1 = X['Hb1']-X['Hc2'] Hb2 = X['Hb2'] Hc1 = np.maximum(X['Hc1'],0) Hc2 = X['Hc2'] style = {'description_width': 'initial'} #general style settings HL = widgets.HTML(value='<hr style="height:3px;border:none;color:#333;background-color:#333;" />') P_title = widgets.HTML(value='<h3>Select profile type:</h3>') P_widge = widgets.RadioButtons(options=[('Horizontal profile',0), ('Vertical profile',1)], value=0, style=style) H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>') if X['unit'] == 'SI': x_Hb_widge = widgets.FloatSlider( value=0.0, min=Hb1, max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.3f', layout={'width': '350px'}, style = style ) elif X['unit'] == 'cgs': x_Hb_widge = widgets.FloatSlider( value=0.0, min=Hb1, max=Hb2, step=10, description='H$_u$ [Oe]', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.3f', layout={'width': '350px'}, style = style ) if X['unit'] == 'SI': x_Hc_widge = widgets.FloatRangeSlider( value=[Hc1,Hc2], min=Hc1, max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.3f', layout={'width': '350px'}, style = style ) elif X['unit'] == 'cgs': x_Hc_widge = widgets.FloatRangeSlider( value=[Hc1,Hc2], min=Hc1, max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.3f', layout={'width': '350px'}, style = style ) V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>') if X['unit'] == 'SI': y_Hc_widge = widgets.FloatSlider( value=(Hc1+Hc2)/2.0, min=Hc1, max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.3f', layout={'width': '350px'}, style = style ) elif X['unit'] == 'cgs': y_Hc_widge = widgets.FloatSlider( value=(Hc1+Hc2)/2.0, min=Hc1, max=Hc2, step=10, description='H$_c$ [T]', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.3f', layout={'width': '350px'}, style = style ) if X['unit'] == 'SI': y_Hb_widge = widgets.FloatRangeSlider( value=[Hb1,Hb2], min=Hb1, max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.3f', layout={'width': '350px'}, style = style ) elif X['unit'] == 'cgs': y_Hb_widge = widgets.FloatRangeSlider( value=[Hb1,Hb2], min=Hb1, max=Hb2, step=10, description='H$_u$ [T]', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.3f', layout={'width': '350px'}, style = style ) profile_widge = VBox([P_title,P_widge,HL,H_title,x_Hb_widge,x_Hc_widge, \ HL,V_title,y_Hc_widge,y_Hb_widge]) profile_nest = widgets.Tab() profile_nest.children = [profile_widge] profile_nest.set_title(0, 'PLOT PROFILES') display(profile_nest) X['P_widge'] = P_widge X['x_Hb_widge'] = x_Hb_widge X['x_Hc_widge'] = x_Hc_widge X['y_Hc_widge'] = y_Hc_widge X['y_Hb_widge'] = y_Hb_widge return X def profile_plot(X): if X['P_widge'].value==0: X = x_profile(X,X['x_Hc_widge'].value,X['x_Hb_widge'].value) else: X = y_profile(X,X['y_Hc_widge'].value,X['y_Hb_widge'].value) return X def x_profile(X,Hc,Hb): Hc1, Hc2 = Hc[0], Hc[1] dH = X['dH'] NH = int(np.sqrt((Hc2-Hc1)**2)/dH) Hc0 = np.linspace(Hc1,Hc2,NH) Hb0 = np.linspace(Hb,Hb,NH) rho_int = X['Zint'](Hc0,Hb0) coef = sps.norm.ppf(0.025/np.sum(rho_int.mask==False)) CI_int = X['SEint'](Hc0,Hb0)*coef fig = plt.figure(figsize=(5,5)) ax1 = fig.add_subplot(1,1,1) if X['mass'].value>0.0: if X['unit'] == 'SI': ax1.plot(Hc0,rho_int/(X['mass'].value/1000.0),color='k') ax1.fill_between(Hc0, (rho_int-CI_int)/(X['mass'].value/1000.0), (rho_int+CI_int)/(X['mass'].value/1000.0),color='lightgrey') ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$',fontsize=14) elif X['unit'] == 'cgs': ax1.plot(Hc0,rho_int/(X['mass'].value),color='k') ax1.fill_between(Hc0, (rho_int-CI_int)/(X['mass'].value), (rho_int+CI_int)/(X['mass'].value),color='lightgrey') ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$',fontsize=14) else: ax1.plot(Hc0,rho_int,color='k') ax1.fill_between(Hc0, (rho_int-CI_int), (rho_int+CI_int),color='lightgrey') if X['unit'] == 'SI': ax1.set_ylabel('Am$^2$ T$^{-2}$',fontsize=14) elif X['unit'] == 'cgs': ax1.set_ylabel('emu Oe$^{-2}$',fontsize=14) ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='14') ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k') if X['unit'] == 'SI': ax1.set_xlabel('B$_\mathrm{c}$ [T]',fontsize=14) elif X['unit'] == 'cgs': ax1.set_xlabel('H$_\mathrm{c}$ [Oe]',fontsize=14) ax1.minorticks_on() ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e')) outputfile = X['sample'].value+'_Hc_PROFILE.pdf' plt.savefig(outputfile, dpi=300, bbox_inches="tight") plt.show return X def y_profile(X,Hc,Hb): Hb1, Hb2 = Hb[0], Hb[1] dH = X['dH'] NH = int(np.sqrt((Hb2-Hb1)**2)/dH) Hc0 = np.linspace(Hc,Hc,NH) Hb0 = np.linspace(Hb1,Hb2,NH) rho_int = X['Zint'](Hc0,Hb0) coef = sps.norm.ppf(0.025/np.sum(rho_int.mask==False)) CI_int = X['SEint'](Hc0,Hb0)*coef fig = plt.figure(figsize=(5,5)) ax1 = fig.add_subplot(1,1,1) if X['mass'].value>0.0: if X['unit'] == 'SI': ax1.plot(Hb0,rho_int/(X['mass'].value/1000.0),color='k') ax1.fill_between(Hb0, (rho_int-CI_int)/(X['mass'].value/1000.0), (rho_int+CI_int)/(X['mass'].value/1000.0),color='lightgrey') ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$',fontsize=14) elif X['unit'] == 'cgs': ax1.plot(Hb0,rho_int/(X['mass'].value),color='k') ax1.fill_between(Hb0, (rho_int-CI_int)/(X['mass'].value), (rho_int+CI_int)/(X['mass'].value),color='lightgrey') ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$',fontsize=14) else: ax1.plot(Hb0,rho_int,color='k') ax1.fill_between(Hb0, (rho_int-CI_int), (rho_int+CI_int),color='lightgrey') if X['unit'] == 'SI': ax1.set_ylabel('Am$^2$ T$^{-2}$',fontsize=14) elif X['unit'] == 'cgs': ax1.set_ylabel('emu Oe$^{-2}$',fontsize=14) ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='14') ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k') if X['unit'] == 'SI': ax1.set_xlabel('B$_\mathrm{u}$ [T]',fontsize=14) elif X['unit'] == 'cgs': ax1.set_xlabel('H$_\mathrm{u}$ [Oe]',fontsize=14) ax1.minorticks_on() ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e')) outputfile = X['sample'].value+'_Hu_PROFILE.pdf' plt.savefig(outputfile, dpi=300, bbox_inches="tight") plt.show return X
normal
{ "blob_id": "e5a4ae2ec0fab1ca8cdce229c69725ece2dcc476", "index": 8272, "step-1": "<mask token>\n\n\ndef forc(X):\n Xi = X['Xi']\n Yi = X['Yi']\n Zi = X['Zi']\n SEi = X['SEi']\n Pi = X['Pi']\n Hc1 = X['Hc1']\n Hc2 = X['Hc2']\n Hb1 = X['Hb1']\n Hb2 = X['Hb2']\n style = {'description_width': 'initial'}\n colorbar_widge = widgets.Checkbox(value=False, description=\n 'Show final FORC plot', style=style)\n pval_widge = widgets.Checkbox(value=False, description=\n 'Show 0.05 significance contour', style=style)\n colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,\n step=0.001, description='Rescale colormap minimum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=\n 0.001, description='Rescale colormap maximum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n contour_widge = widgets.Select(options=[['Select contour frequency', -1\n ], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level', \n 3], ['Every 4th level', 4], ['Every 5th level', 5], [\n 'Every 10th level', 10], ['Every 20th level', 20], [\n 'Every 50th level', 50]], value=-1, rows=1, description=\n 'Plot contours', style=style)\n contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,\n step=0.5, description='Contour line width [pts]', style=style)\n download_widge = widgets.Checkbox(value=False, description=\n 'Download plot', style=style)\n level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50\n ], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,\n rows=1, description='Number of color levels', style=style)\n if X['unit'] == 'SI':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum B$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n elif X['unit'] == 'cgs':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum H$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi\n =fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[\n 'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=\n level_widge, contour=contour_widge, contourpts=contourpts_widge,\n xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,\n colormin=colormin_widge, colormax=colormax_widge, download=\n download_widge)\n tab_nest = widgets.Tab()\n tab_nest.set_title(0, 'FORC PLOTTING')\n tab_nest.children = [VBox(children=x.children)]\n display(tab_nest)\n\n\ndef forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,\n contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(1, 1, 1)\n if mass.value < 0.0:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi\n SEi_new = SEi\n Pi_new = Pi\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n if unit == 'SI':\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$'\n se_csv = 'rho [Am**2 / T**2]'\n elif unit == 'cgs':\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu$ Oe$^{-2}$'\n se_csv = 'rho [emu / Oe**2]'\n else:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n Pi_new = Pi\n if unit == 'SI':\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'\n se_csv = 'se [Am**2 / T**2 / kg]'\n elif unit == 'cgs':\n Zi_new = Zi / mass.value\n SEi_new = SEi / mass.value\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'\n se_csv = 'se [emu/ Oe**2 / g]'\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=\n ymax)\n cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])\n Zi_trunc = np.copy(Zi_new)\n Zi_trunc[np.isnan(Zi_trunc)] = 0.0\n Zi_trunc[Zi_trunc < vmin] = vmin\n vmini = vmin * (1 - colormin)\n vmaxi = vmax * colormax\n idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)\n cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])\n CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,\n vmax=vmax)\n if (contour > 0) & (contour < level):\n CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',\n linewidths=contourpts)\n ax.set_xlabel(xlabel_text, fontsize=14)\n ax.set_ylabel(ylabel_text, fontsize=14)\n xlimits = np.sort((xmin, xmax))\n ax.set_xlim(xlimits)\n ylimits = np.sort((ymin, ymax))\n ax.set_ylim(ylimits)\n ax.tick_params(labelsize=14)\n ax.set_aspect('equal')\n ax.minorticks_on()\n if colorbar == True:\n cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')\n cbar.ax.tick_params(labelsize=14)\n cbar.set_label(cbar_text, fontsize=14)\n if download == True:\n outputfile = fn.value + '_FORC.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(\n Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,\n (-1, 1))))\n outputfile = fn.value + '_XYZ.csv'\n with open(outputfile, 'w') as fp:\n fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +\n se_csv + '\\n')\n np.savetxt(fp, ar, '%s', ',')\n plt.show()\n\n\ndef FORCinel_colormap(Z):\n cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /\n 255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255\n ), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (\n 0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (\n (0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (\n 0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (\n 0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (\n 0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((\n 0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,\n 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /\n 255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /\n 255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),\n (1.0, 76 / 255, 76 / 255))}\n if np.abs(np.min(Z)) <= np.max(Z) * 0.19:\n vmin = -np.max(Z) * 0.19\n vmax = np.max(Z)\n else:\n vmin = np.min(Z)\n vmax = np.max(Z)\n anchors = np.zeros(10)\n anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)\n anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)\n anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)\n anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)\n anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)\n anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)\n anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)\n anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)\n anchors[9] = 1.0\n Rlst = list(cdict['red'])\n Glst = list(cdict['green'])\n Blst = list(cdict['blue'])\n for i in range(9):\n Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))\n Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))\n Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))\n cdict['red'] = tuple(Rlst)\n cdict['green'] = tuple(Glst)\n cdict['blue'] = tuple(Blst)\n cmap = LinearSegmentedColormap('forc_cmap', cdict)\n return cmap, vmin, vmax\n\n\ndef profile_options(X):\n Hb1 = X['Hb1'] - X['Hc2']\n Hb2 = X['Hb2']\n Hc1 = np.maximum(X['Hc1'], 0)\n Hc2 = X['Hc2']\n style = {'description_width': 'initial'}\n HL = widgets.HTML(value=\n '<hr style=\"height:3px;border:none;color:#333;background-color:#333;\" />'\n )\n P_title = widgets.HTML(value='<h3>Select profile type:</h3>')\n P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (\n 'Vertical profile', 1)], value=0, style=style)\n H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')\n if X['unit'] == 'SI':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 10, description='H$_u$ [Oe]', disabled=False, continuous_update\n =False, orientation='horizontal', readout=True, readout_format=\n '.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')\n if X['unit'] == 'SI':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=10, description='H$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=10, description='H$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,\n x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])\n profile_nest = widgets.Tab()\n profile_nest.children = [profile_widge]\n profile_nest.set_title(0, 'PLOT PROFILES')\n display(profile_nest)\n X['P_widge'] = P_widge\n X['x_Hb_widge'] = x_Hb_widge\n X['x_Hc_widge'] = x_Hc_widge\n X['y_Hc_widge'] = y_Hc_widge\n X['y_Hb_widge'] = y_Hb_widge\n return X\n\n\n<mask token>\n\n\ndef x_profile(X, Hc, Hb):\n Hc1, Hc2 = Hc[0], Hc[1]\n dH = X['dH']\n NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)\n Hc0 = np.linspace(Hc1, Hc2, NH)\n Hb0 = np.linspace(Hb, Hb, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hc0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hc0, rho_int, color='k')\n ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{c}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{c}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hc_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n\n\ndef y_profile(X, Hc, Hb):\n Hb1, Hb2 = Hb[0], Hb[1]\n dH = X['dH']\n NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)\n Hc0 = np.linspace(Hc, Hc, NH)\n Hb0 = np.linspace(Hb1, Hb2, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hb0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hb0, rho_int, color='k')\n ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{u}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{u}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hu_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n", "step-2": "<mask token>\n\n\ndef forc(X):\n Xi = X['Xi']\n Yi = X['Yi']\n Zi = X['Zi']\n SEi = X['SEi']\n Pi = X['Pi']\n Hc1 = X['Hc1']\n Hc2 = X['Hc2']\n Hb1 = X['Hb1']\n Hb2 = X['Hb2']\n style = {'description_width': 'initial'}\n colorbar_widge = widgets.Checkbox(value=False, description=\n 'Show final FORC plot', style=style)\n pval_widge = widgets.Checkbox(value=False, description=\n 'Show 0.05 significance contour', style=style)\n colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,\n step=0.001, description='Rescale colormap minimum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=\n 0.001, description='Rescale colormap maximum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n contour_widge = widgets.Select(options=[['Select contour frequency', -1\n ], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level', \n 3], ['Every 4th level', 4], ['Every 5th level', 5], [\n 'Every 10th level', 10], ['Every 20th level', 20], [\n 'Every 50th level', 50]], value=-1, rows=1, description=\n 'Plot contours', style=style)\n contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,\n step=0.5, description='Contour line width [pts]', style=style)\n download_widge = widgets.Checkbox(value=False, description=\n 'Download plot', style=style)\n level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50\n ], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,\n rows=1, description='Number of color levels', style=style)\n if X['unit'] == 'SI':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum B$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n elif X['unit'] == 'cgs':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum H$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi\n =fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[\n 'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=\n level_widge, contour=contour_widge, contourpts=contourpts_widge,\n xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,\n colormin=colormin_widge, colormax=colormax_widge, download=\n download_widge)\n tab_nest = widgets.Tab()\n tab_nest.set_title(0, 'FORC PLOTTING')\n tab_nest.children = [VBox(children=x.children)]\n display(tab_nest)\n\n\ndef forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,\n contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(1, 1, 1)\n if mass.value < 0.0:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi\n SEi_new = SEi\n Pi_new = Pi\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n if unit == 'SI':\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$'\n se_csv = 'rho [Am**2 / T**2]'\n elif unit == 'cgs':\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu$ Oe$^{-2}$'\n se_csv = 'rho [emu / Oe**2]'\n else:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n Pi_new = Pi\n if unit == 'SI':\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'\n se_csv = 'se [Am**2 / T**2 / kg]'\n elif unit == 'cgs':\n Zi_new = Zi / mass.value\n SEi_new = SEi / mass.value\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'\n se_csv = 'se [emu/ Oe**2 / g]'\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=\n ymax)\n cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])\n Zi_trunc = np.copy(Zi_new)\n Zi_trunc[np.isnan(Zi_trunc)] = 0.0\n Zi_trunc[Zi_trunc < vmin] = vmin\n vmini = vmin * (1 - colormin)\n vmaxi = vmax * colormax\n idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)\n cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])\n CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,\n vmax=vmax)\n if (contour > 0) & (contour < level):\n CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',\n linewidths=contourpts)\n ax.set_xlabel(xlabel_text, fontsize=14)\n ax.set_ylabel(ylabel_text, fontsize=14)\n xlimits = np.sort((xmin, xmax))\n ax.set_xlim(xlimits)\n ylimits = np.sort((ymin, ymax))\n ax.set_ylim(ylimits)\n ax.tick_params(labelsize=14)\n ax.set_aspect('equal')\n ax.minorticks_on()\n if colorbar == True:\n cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')\n cbar.ax.tick_params(labelsize=14)\n cbar.set_label(cbar_text, fontsize=14)\n if download == True:\n outputfile = fn.value + '_FORC.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(\n Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,\n (-1, 1))))\n outputfile = fn.value + '_XYZ.csv'\n with open(outputfile, 'w') as fp:\n fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +\n se_csv + '\\n')\n np.savetxt(fp, ar, '%s', ',')\n plt.show()\n\n\ndef FORCinel_colormap(Z):\n cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /\n 255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255\n ), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (\n 0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (\n (0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (\n 0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (\n 0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (\n 0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((\n 0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,\n 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /\n 255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /\n 255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),\n (1.0, 76 / 255, 76 / 255))}\n if np.abs(np.min(Z)) <= np.max(Z) * 0.19:\n vmin = -np.max(Z) * 0.19\n vmax = np.max(Z)\n else:\n vmin = np.min(Z)\n vmax = np.max(Z)\n anchors = np.zeros(10)\n anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)\n anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)\n anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)\n anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)\n anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)\n anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)\n anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)\n anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)\n anchors[9] = 1.0\n Rlst = list(cdict['red'])\n Glst = list(cdict['green'])\n Blst = list(cdict['blue'])\n for i in range(9):\n Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))\n Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))\n Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))\n cdict['red'] = tuple(Rlst)\n cdict['green'] = tuple(Glst)\n cdict['blue'] = tuple(Blst)\n cmap = LinearSegmentedColormap('forc_cmap', cdict)\n return cmap, vmin, vmax\n\n\ndef profile_options(X):\n Hb1 = X['Hb1'] - X['Hc2']\n Hb2 = X['Hb2']\n Hc1 = np.maximum(X['Hc1'], 0)\n Hc2 = X['Hc2']\n style = {'description_width': 'initial'}\n HL = widgets.HTML(value=\n '<hr style=\"height:3px;border:none;color:#333;background-color:#333;\" />'\n )\n P_title = widgets.HTML(value='<h3>Select profile type:</h3>')\n P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (\n 'Vertical profile', 1)], value=0, style=style)\n H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')\n if X['unit'] == 'SI':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 10, description='H$_u$ [Oe]', disabled=False, continuous_update\n =False, orientation='horizontal', readout=True, readout_format=\n '.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')\n if X['unit'] == 'SI':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=10, description='H$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=10, description='H$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,\n x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])\n profile_nest = widgets.Tab()\n profile_nest.children = [profile_widge]\n profile_nest.set_title(0, 'PLOT PROFILES')\n display(profile_nest)\n X['P_widge'] = P_widge\n X['x_Hb_widge'] = x_Hb_widge\n X['x_Hc_widge'] = x_Hc_widge\n X['y_Hc_widge'] = y_Hc_widge\n X['y_Hb_widge'] = y_Hb_widge\n return X\n\n\ndef profile_plot(X):\n if X['P_widge'].value == 0:\n X = x_profile(X, X['x_Hc_widge'].value, X['x_Hb_widge'].value)\n else:\n X = y_profile(X, X['y_Hc_widge'].value, X['y_Hb_widge'].value)\n return X\n\n\ndef x_profile(X, Hc, Hb):\n Hc1, Hc2 = Hc[0], Hc[1]\n dH = X['dH']\n NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)\n Hc0 = np.linspace(Hc1, Hc2, NH)\n Hb0 = np.linspace(Hb, Hb, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hc0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hc0, rho_int, color='k')\n ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{c}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{c}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hc_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n\n\ndef y_profile(X, Hc, Hb):\n Hb1, Hb2 = Hb[0], Hb[1]\n dH = X['dH']\n NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)\n Hc0 = np.linspace(Hc, Hc, NH)\n Hb0 = np.linspace(Hb1, Hb2, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hb0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hb0, rho_int, color='k')\n ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{u}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{u}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hu_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n", "step-3": "<mask token>\nmpl.rcParams['pdf.fonttype'] = 42\nmpl.rcParams['ps.fonttype'] = 42\n\n\ndef forc(X):\n Xi = X['Xi']\n Yi = X['Yi']\n Zi = X['Zi']\n SEi = X['SEi']\n Pi = X['Pi']\n Hc1 = X['Hc1']\n Hc2 = X['Hc2']\n Hb1 = X['Hb1']\n Hb2 = X['Hb2']\n style = {'description_width': 'initial'}\n colorbar_widge = widgets.Checkbox(value=False, description=\n 'Show final FORC plot', style=style)\n pval_widge = widgets.Checkbox(value=False, description=\n 'Show 0.05 significance contour', style=style)\n colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,\n step=0.001, description='Rescale colormap minimum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=\n 0.001, description='Rescale colormap maximum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n contour_widge = widgets.Select(options=[['Select contour frequency', -1\n ], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level', \n 3], ['Every 4th level', 4], ['Every 5th level', 5], [\n 'Every 10th level', 10], ['Every 20th level', 20], [\n 'Every 50th level', 50]], value=-1, rows=1, description=\n 'Plot contours', style=style)\n contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,\n step=0.5, description='Contour line width [pts]', style=style)\n download_widge = widgets.Checkbox(value=False, description=\n 'Download plot', style=style)\n level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50\n ], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,\n rows=1, description='Number of color levels', style=style)\n if X['unit'] == 'SI':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum B$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n elif X['unit'] == 'cgs':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum H$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi\n =fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[\n 'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=\n level_widge, contour=contour_widge, contourpts=contourpts_widge,\n xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,\n colormin=colormin_widge, colormax=colormax_widge, download=\n download_widge)\n tab_nest = widgets.Tab()\n tab_nest.set_title(0, 'FORC PLOTTING')\n tab_nest.children = [VBox(children=x.children)]\n display(tab_nest)\n\n\ndef forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,\n contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(1, 1, 1)\n if mass.value < 0.0:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi\n SEi_new = SEi\n Pi_new = Pi\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n if unit == 'SI':\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$'\n se_csv = 'rho [Am**2 / T**2]'\n elif unit == 'cgs':\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu$ Oe$^{-2}$'\n se_csv = 'rho [emu / Oe**2]'\n else:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n Pi_new = Pi\n if unit == 'SI':\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'\n se_csv = 'se [Am**2 / T**2 / kg]'\n elif unit == 'cgs':\n Zi_new = Zi / mass.value\n SEi_new = SEi / mass.value\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'\n se_csv = 'se [emu/ Oe**2 / g]'\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=\n ymax)\n cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])\n Zi_trunc = np.copy(Zi_new)\n Zi_trunc[np.isnan(Zi_trunc)] = 0.0\n Zi_trunc[Zi_trunc < vmin] = vmin\n vmini = vmin * (1 - colormin)\n vmaxi = vmax * colormax\n idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)\n cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])\n CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,\n vmax=vmax)\n if (contour > 0) & (contour < level):\n CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',\n linewidths=contourpts)\n ax.set_xlabel(xlabel_text, fontsize=14)\n ax.set_ylabel(ylabel_text, fontsize=14)\n xlimits = np.sort((xmin, xmax))\n ax.set_xlim(xlimits)\n ylimits = np.sort((ymin, ymax))\n ax.set_ylim(ylimits)\n ax.tick_params(labelsize=14)\n ax.set_aspect('equal')\n ax.minorticks_on()\n if colorbar == True:\n cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')\n cbar.ax.tick_params(labelsize=14)\n cbar.set_label(cbar_text, fontsize=14)\n if download == True:\n outputfile = fn.value + '_FORC.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(\n Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,\n (-1, 1))))\n outputfile = fn.value + '_XYZ.csv'\n with open(outputfile, 'w') as fp:\n fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +\n se_csv + '\\n')\n np.savetxt(fp, ar, '%s', ',')\n plt.show()\n\n\ndef FORCinel_colormap(Z):\n cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /\n 255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255\n ), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (\n 0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (\n (0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (\n 0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (\n 0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (\n 0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((\n 0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,\n 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /\n 255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /\n 255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),\n (1.0, 76 / 255, 76 / 255))}\n if np.abs(np.min(Z)) <= np.max(Z) * 0.19:\n vmin = -np.max(Z) * 0.19\n vmax = np.max(Z)\n else:\n vmin = np.min(Z)\n vmax = np.max(Z)\n anchors = np.zeros(10)\n anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)\n anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)\n anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)\n anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)\n anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)\n anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)\n anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)\n anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)\n anchors[9] = 1.0\n Rlst = list(cdict['red'])\n Glst = list(cdict['green'])\n Blst = list(cdict['blue'])\n for i in range(9):\n Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))\n Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))\n Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))\n cdict['red'] = tuple(Rlst)\n cdict['green'] = tuple(Glst)\n cdict['blue'] = tuple(Blst)\n cmap = LinearSegmentedColormap('forc_cmap', cdict)\n return cmap, vmin, vmax\n\n\ndef profile_options(X):\n Hb1 = X['Hb1'] - X['Hc2']\n Hb2 = X['Hb2']\n Hc1 = np.maximum(X['Hc1'], 0)\n Hc2 = X['Hc2']\n style = {'description_width': 'initial'}\n HL = widgets.HTML(value=\n '<hr style=\"height:3px;border:none;color:#333;background-color:#333;\" />'\n )\n P_title = widgets.HTML(value='<h3>Select profile type:</h3>')\n P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (\n 'Vertical profile', 1)], value=0, style=style)\n H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')\n if X['unit'] == 'SI':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 10, description='H$_u$ [Oe]', disabled=False, continuous_update\n =False, orientation='horizontal', readout=True, readout_format=\n '.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')\n if X['unit'] == 'SI':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=10, description='H$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=10, description='H$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,\n x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])\n profile_nest = widgets.Tab()\n profile_nest.children = [profile_widge]\n profile_nest.set_title(0, 'PLOT PROFILES')\n display(profile_nest)\n X['P_widge'] = P_widge\n X['x_Hb_widge'] = x_Hb_widge\n X['x_Hc_widge'] = x_Hc_widge\n X['y_Hc_widge'] = y_Hc_widge\n X['y_Hb_widge'] = y_Hb_widge\n return X\n\n\ndef profile_plot(X):\n if X['P_widge'].value == 0:\n X = x_profile(X, X['x_Hc_widge'].value, X['x_Hb_widge'].value)\n else:\n X = y_profile(X, X['y_Hc_widge'].value, X['y_Hb_widge'].value)\n return X\n\n\ndef x_profile(X, Hc, Hb):\n Hc1, Hc2 = Hc[0], Hc[1]\n dH = X['dH']\n NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)\n Hc0 = np.linspace(Hc1, Hc2, NH)\n Hb0 = np.linspace(Hb, Hb, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hc0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hc0, rho_int, color='k')\n ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{c}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{c}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hc_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n\n\ndef y_profile(X, Hc, Hb):\n Hb1, Hb2 = Hb[0], Hb[1]\n dH = X['dH']\n NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)\n Hc0 = np.linspace(Hc, Hc, NH)\n Hb0 = np.linspace(Hb1, Hb2, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hb0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hb0, rho_int, color='k')\n ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{u}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{u}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hu_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n", "step-4": "import numpy as np\nimport ipywidgets as widgets\nfrom ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.tri as tri\nimport matplotlib.colors as colors\nfrom matplotlib.colors import LinearSegmentedColormap\nimport scipy.stats as sps\nimport matplotlib.ticker as mtick\nmpl.rcParams['pdf.fonttype'] = 42\nmpl.rcParams['ps.fonttype'] = 42\n\n\ndef forc(X):\n Xi = X['Xi']\n Yi = X['Yi']\n Zi = X['Zi']\n SEi = X['SEi']\n Pi = X['Pi']\n Hc1 = X['Hc1']\n Hc2 = X['Hc2']\n Hb1 = X['Hb1']\n Hb2 = X['Hb2']\n style = {'description_width': 'initial'}\n colorbar_widge = widgets.Checkbox(value=False, description=\n 'Show final FORC plot', style=style)\n pval_widge = widgets.Checkbox(value=False, description=\n 'Show 0.05 significance contour', style=style)\n colormin_widge = widgets.FloatSlider(value=0.0, min=0.0, max=0.999,\n step=0.001, description='Rescale colormap minimum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n colormax_widge = widgets.FloatSlider(value=1.0, min=0.001, max=1, step=\n 0.001, description='Rescale colormap maximum', disabled=False,\n continuous_update=False, orientation='horizontal', readout=False,\n readout_format='.2f', style=style)\n contour_widge = widgets.Select(options=[['Select contour frequency', -1\n ], ['Every level', 1], ['Every 2nd level', 2], ['Every 3rd level', \n 3], ['Every 4th level', 4], ['Every 5th level', 5], [\n 'Every 10th level', 10], ['Every 20th level', 20], [\n 'Every 50th level', 50]], value=-1, rows=1, description=\n 'Plot contours', style=style)\n contourpts_widge = widgets.FloatSlider(value=1.0, min=0.5, max=3.0,\n step=0.5, description='Contour line width [pts]', style=style)\n download_widge = widgets.Checkbox(value=False, description=\n 'Download plot', style=style)\n level_widge = widgets.Select(options=[['20', 20], ['30', 30], ['50', 50\n ], ['75', 75], ['100', 100], ['200', 200], ['500', 500]], value=100,\n rows=1, description='Number of color levels', style=style)\n if X['unit'] == 'SI':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum B$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum B$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n elif X['unit'] == 'cgs':\n xmin_widge = widgets.FloatText(value=0, description=\n 'Minimum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n xmax_widge = widgets.FloatText(value=np.round(Hc2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{c}$ [Oe]', style=style, step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1 - Hc2) * 1000) /\n 1000, description='Minimum H$_\\\\mathrm{u}$ [Oe]', style=style,\n step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2 * 1000) / 1000,\n description='Maximum H$_\\\\mathrm{u}$ [Oe]', style=style, step=10)\n x = interactive(forcplot, Xi=fixed(Xi), Yi=fixed(Yi), Zi=fixed(Zi), SEi\n =fixed(SEi), Pi=fixed(Pi), fn=fixed(X['sample']), mass=fixed(X[\n 'mass']), unit=fixed(X['unit']), colorbar=colorbar_widge, level=\n level_widge, contour=contour_widge, contourpts=contourpts_widge,\n xmin=xmin_widge, xmax=xmax_widge, ymin=ymin_widge, ymax=ymax_widge,\n colormin=colormin_widge, colormax=colormax_widge, download=\n download_widge)\n tab_nest = widgets.Tab()\n tab_nest.set_title(0, 'FORC PLOTTING')\n tab_nest.children = [VBox(children=x.children)]\n display(tab_nest)\n\n\ndef forcplot(Xi, Yi, Zi, SEi, Pi, fn, mass, unit, colorbar, level, contour,\n contourpts, xmin, xmax, ymin, ymax, colormin, colormax, download):\n fig = plt.figure(figsize=(6, 6))\n ax = fig.add_subplot(1, 1, 1)\n if mass.value < 0.0:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi\n SEi_new = SEi\n Pi_new = Pi\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n if unit == 'SI':\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$'\n se_csv = 'rho [Am**2 / T**2]'\n elif unit == 'cgs':\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu$ Oe$^{-2}$'\n se_csv = 'rho [emu / Oe**2]'\n else:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n Pi_new = Pi\n if unit == 'SI':\n Zi_new = Zi / (mass.value / 1000.0)\n SEi_new = SEi / (mass.value / 1000.0)\n xlabel_text = 'B$_\\\\mathrm{c}$ [T]'\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\\\mathrm{u}$ [T]'\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'\n se_csv = 'se [Am**2 / T**2 / kg]'\n elif unit == 'cgs':\n Zi_new = Zi / mass.value\n SEi_new = SEi / mass.value\n xlabel_text = 'H$_\\\\mathrm{c}$ [Oe]'\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\\\mathrm{u}$ [Oe]'\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'\n se_csv = 'se [emu/ Oe**2 / g]'\n SEi_new[Zi_new == 0.0] = 0.0\n SEi_new[np.isnan(SEi_new)] = 0.0\n idx = (Xi_new >= xmin) & (Xi_new <= xmax) & (Yi_new >= ymin) & (Yi_new <=\n ymax)\n cmap, vmin, vmax = FORCinel_colormap(Zi_new[idx])\n Zi_trunc = np.copy(Zi_new)\n Zi_trunc[np.isnan(Zi_trunc)] = 0.0\n Zi_trunc[Zi_trunc < vmin] = vmin\n vmini = vmin * (1 - colormin)\n vmaxi = vmax * colormax\n idx = (Zi_trunc >= vmini) & (Zi_trunc <= vmaxi)\n cmap, vmin, vmax = FORCinel_colormap(Zi_trunc[idx])\n CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap=cmap, vmin=vmin,\n vmax=vmax)\n if (contour > 0) & (contour < level):\n CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',\n linewidths=contourpts)\n ax.set_xlabel(xlabel_text, fontsize=14)\n ax.set_ylabel(ylabel_text, fontsize=14)\n xlimits = np.sort((xmin, xmax))\n ax.set_xlim(xlimits)\n ylimits = np.sort((ymin, ymax))\n ax.set_ylim(ylimits)\n ax.tick_params(labelsize=14)\n ax.set_aspect('equal')\n ax.minorticks_on()\n if colorbar == True:\n cbar = fig.colorbar(CS, fraction=0.04, pad=0.08, format='%.2e')\n cbar.ax.tick_params(labelsize=14)\n cbar.set_label(cbar_text, fontsize=14)\n if download == True:\n outputfile = fn.value + '_FORC.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n ar = np.column_stack((np.reshape(Xi_new, (-1, 1)), np.reshape(\n Yi_new, (-1, 1)), np.reshape(Zi_trunc, (-1, 1)), np.reshape(SEi,\n (-1, 1))))\n outputfile = fn.value + '_XYZ.csv'\n with open(outputfile, 'w') as fp:\n fp.write(xlabel_csv + ',' + ylabel_csv + ',' + se_csv + ',' +\n se_csv + '\\n')\n np.savetxt(fp, ar, '%s', ',')\n plt.show()\n\n\ndef FORCinel_colormap(Z):\n cdict = {'red': ((0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 /\n 255), (0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255\n ), (0.3193, 102 / 255, 102 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 204 / 255, 204 / 255), (0.8319, 153 / 255, 153 / 255), (\n 0.9748, 76 / 255, 76 / 255), (1.0, 76 / 255, 76 / 255)), 'green': (\n (0.0, 127 / 255, 127 / 255), (0.1387, 255 / 255, 255 / 255), (\n 0.1597, 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (\n 0.3193, 178 / 255, 178 / 255), (0.563, 204 / 255, 204 / 255), (\n 0.6975, 76 / 255, 76 / 255), (0.8319, 102 / 255, 102 / 255), (\n 0.9748, 25 / 255, 25 / 255), (1.0, 25 / 255, 25 / 255)), 'blue': ((\n 0.0, 255 / 255, 255 / 255), (0.1387, 255 / 255, 255 / 255), (0.1597,\n 255 / 255, 255 / 255), (0.1807, 255 / 255, 255 / 255), (0.3193, 102 /\n 255, 102 / 255), (0.563, 76 / 255, 76 / 255), (0.6975, 76 / 255, 76 /\n 255), (0.8319, 153 / 255, 153 / 255), (0.9748, 76 / 255, 76 / 255),\n (1.0, 76 / 255, 76 / 255))}\n if np.abs(np.min(Z)) <= np.max(Z) * 0.19:\n vmin = -np.max(Z) * 0.19\n vmax = np.max(Z)\n else:\n vmin = np.min(Z)\n vmax = np.max(Z)\n anchors = np.zeros(10)\n anchors[1] = (-0.025 * vmax - vmin) / (vmax - vmin)\n anchors[2] = (-0.005 * vmax - vmin) / (vmax - vmin)\n anchors[3] = (0.025 * vmax - vmin) / (vmax - vmin)\n anchors[4] = (0.19 * vmax - vmin) / (vmax - vmin)\n anchors[5] = (0.48 * vmax - vmin) / (vmax - vmin)\n anchors[6] = (0.64 * vmax - vmin) / (vmax - vmin)\n anchors[7] = (0.8 * vmax - vmin) / (vmax - vmin)\n anchors[8] = (0.97 * vmax - vmin) / (vmax - vmin)\n anchors[9] = 1.0\n Rlst = list(cdict['red'])\n Glst = list(cdict['green'])\n Blst = list(cdict['blue'])\n for i in range(9):\n Rlst[i] = tuple((anchors[i], Rlst[i][1], Rlst[i][2]))\n Glst[i] = tuple((anchors[i], Glst[i][1], Glst[i][2]))\n Blst[i] = tuple((anchors[i], Blst[i][1], Blst[i][2]))\n cdict['red'] = tuple(Rlst)\n cdict['green'] = tuple(Glst)\n cdict['blue'] = tuple(Blst)\n cmap = LinearSegmentedColormap('forc_cmap', cdict)\n return cmap, vmin, vmax\n\n\ndef profile_options(X):\n Hb1 = X['Hb1'] - X['Hc2']\n Hb2 = X['Hb2']\n Hc1 = np.maximum(X['Hc1'], 0)\n Hc2 = X['Hc2']\n style = {'description_width': 'initial'}\n HL = widgets.HTML(value=\n '<hr style=\"height:3px;border:none;color:#333;background-color:#333;\" />'\n )\n P_title = widgets.HTML(value='<h3>Select profile type:</h3>')\n P_widge = widgets.RadioButtons(options=[('Horizontal profile', 0), (\n 'Vertical profile', 1)], value=0, style=style)\n H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')\n if X['unit'] == 'SI':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hb_widge = widgets.FloatSlider(value=0.0, min=Hb1, max=Hb2, step=\n 10, description='H$_u$ [Oe]', disabled=False, continuous_update\n =False, orientation='horizontal', readout=True, readout_format=\n '.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n x_Hc_widge = widgets.FloatRangeSlider(value=[Hc1, Hc2], min=Hc1,\n max=Hc2, step=10, description='H$_c$ [Oe]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')\n if X['unit'] == 'SI':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=0.001, description='B$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hc_widge = widgets.FloatSlider(value=(Hc1 + Hc2) / 2.0, min=Hc1,\n max=Hc2, step=10, description='H$_c$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n if X['unit'] == 'SI':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=0.001, description='B$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n elif X['unit'] == 'cgs':\n y_Hb_widge = widgets.FloatRangeSlider(value=[Hb1, Hb2], min=Hb1,\n max=Hb2, step=10, description='H$_u$ [T]', disabled=False,\n continuous_update=False, orientation='horizontal', readout=True,\n readout_format='.3f', layout={'width': '350px'}, style=style)\n profile_widge = VBox([P_title, P_widge, HL, H_title, x_Hb_widge,\n x_Hc_widge, HL, V_title, y_Hc_widge, y_Hb_widge])\n profile_nest = widgets.Tab()\n profile_nest.children = [profile_widge]\n profile_nest.set_title(0, 'PLOT PROFILES')\n display(profile_nest)\n X['P_widge'] = P_widge\n X['x_Hb_widge'] = x_Hb_widge\n X['x_Hc_widge'] = x_Hc_widge\n X['y_Hc_widge'] = y_Hc_widge\n X['y_Hb_widge'] = y_Hb_widge\n return X\n\n\ndef profile_plot(X):\n if X['P_widge'].value == 0:\n X = x_profile(X, X['x_Hc_widge'].value, X['x_Hb_widge'].value)\n else:\n X = y_profile(X, X['y_Hc_widge'].value, X['y_Hb_widge'].value)\n return X\n\n\ndef x_profile(X, Hc, Hb):\n Hc1, Hc2 = Hc[0], Hc[1]\n dH = X['dH']\n NH = int(np.sqrt((Hc2 - Hc1) ** 2) / dH)\n Hc0 = np.linspace(Hc1, Hc2, NH)\n Hb0 = np.linspace(Hb, Hb, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hc0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hc0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hc0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hc0, rho_int, color='k')\n ax1.fill_between(Hc0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{c}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{c}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hc_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n\n\ndef y_profile(X, Hc, Hb):\n Hb1, Hb2 = Hb[0], Hb[1]\n dH = X['dH']\n NH = int(np.sqrt((Hb2 - Hb1) ** 2) / dH)\n Hc0 = np.linspace(Hc, Hc, NH)\n Hb0 = np.linspace(Hb1, Hb2, NH)\n rho_int = X['Zint'](Hc0, Hb0)\n coef = sps.norm.ppf(0.025 / np.sum(rho_int.mask == False))\n CI_int = X['SEint'](Hc0, Hb0) * coef\n fig = plt.figure(figsize=(5, 5))\n ax1 = fig.add_subplot(1, 1, 1)\n if X['mass'].value > 0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hb0, rho_int / (X['mass'].value / 1000.0), color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / (X['mass'].value / \n 1000.0), (rho_int + CI_int) / (X['mass'].value / 1000.0),\n color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hb0, rho_int / X['mass'].value, color='k')\n ax1.fill_between(Hb0, (rho_int - CI_int) / X['mass'].value, (\n rho_int + CI_int) / X['mass'].value, color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$', fontsize=14)\n else:\n ax1.plot(Hb0, rho_int, color='k')\n ax1.fill_between(Hb0, rho_int - CI_int, rho_int + CI_int, color=\n 'lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$', fontsize=14)\n ax1.tick_params(axis='both', which='major', direction='out', length=5,\n width=1, color='k', labelsize='14')\n ax1.tick_params(axis='both', which='minor', direction='out', length=3.5,\n width=1, color='k')\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\\\mathrm{u}$ [T]', fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\\\mathrm{u}$ [Oe]', fontsize=14)\n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n outputfile = X['sample'].value + '_Hu_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches='tight')\n plt.show\n return X\n", "step-5": "import numpy as np\nimport ipywidgets as widgets\nfrom ipywidgets import interact, interactive, fixed, Layout, VBox, HBox\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nimport matplotlib.tri as tri\nimport matplotlib.colors as colors\nfrom matplotlib.colors import LinearSegmentedColormap\nimport scipy.stats as sps\nimport matplotlib.ticker as mtick\nmpl.rcParams['pdf.fonttype'] = 42\nmpl.rcParams['ps.fonttype'] = 42\n\n\n#### FORC plotting ####\ndef forc(X):\n\n #unpack data\n Xi = X['Xi']\n Yi = X['Yi']\n Zi = X['Zi']\n SEi = X['SEi']\n Pi = X['Pi']\n Hc1 = X['Hc1']\n Hc2 = X['Hc2']\n Hb1 = X['Hb1']\n Hb2 = X['Hb2']\n\n #Set up widgets for interactive plot\n style = {'description_width': 'initial'} #general style settings\n \n #DEFINE INTERACTIVE WIDGETS\n \n #should a colorbar be included\n colorbar_widge = widgets.Checkbox(value=False, description = 'Show final FORC plot',style=style) \n pval_widge = widgets.Checkbox(value=False, description = 'Show 0.05 significance contour',style=style) \n\n colormin_widge = widgets.FloatSlider(\n value=0.0,\n min=0.00,\n max=0.999,\n step=0.001,\n description='Rescale colormap minimum',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=False,\n readout_format='.2f',\n style=style\n )\n\n colormax_widge = widgets.FloatSlider(\n value=1.0,\n min=0.001,\n max=1,\n step=0.001,\n description='Rescale colormap maximum',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=False,\n readout_format='.2f',\n style=style\n )\n\n #Frequency for contour lines to be included in plot\n contour_widge = widgets.Select(\n options=[['Select contour frequency',-1],\n ['Every level',1],\n ['Every 2nd level',2],\n ['Every 3rd level',3],\n ['Every 4th level',4],\n ['Every 5th level',5],\n ['Every 10th level',10],\n ['Every 20th level',20],\n ['Every 50th level',50],\n ],\n value=-1,\n rows=1,\n description='Plot contours',style=style)\n \n contourpts_widge = widgets.FloatSlider(value=1.0,min=0.5,max=3.0,step=0.5, description = 'Contour line width [pts]',style=style)\n\n #check box for plot download\n download_widge = widgets.Checkbox(value=False, description = 'Download plot',style=style) \n \n #How many contour levels should be included\n level_widge = widgets.Select(\n options=[['20',20],['30',30],['50',50],['75',75],['100',100],['200',200],['500',500]],\n value=100,\n rows=1,\n description='Number of color levels',style=style)\n\n #plot limit widgets\n if X['unit']=='SI': \n xmin_widge = widgets.FloatText(value=0,description='Minimum B$_\\mathrm{c}$ [Oe]',style=style,step=10) \n xmax_widge = widgets.FloatText(value=np.round(Hc2*1000)/1000,description='Maximum B$_\\mathrm{c}$ [Oe]',style=style,step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1-Hc2)*1000)/1000,description='Minimum B$_\\mathrm{u}$ [Oe]',style=style,step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2*1000)/1000,description='Maximum B$_\\mathrm{u}$ [Oe]',style=style,step=10)\n elif X['unit']=='cgs':\n xmin_widge = widgets.FloatText(value=0,description='Minimum H$_\\mathrm{c}$ [Oe]',style=style,step=10) \n xmax_widge = widgets.FloatText(value=np.round(Hc2*1000)/1000,description='Maximum H$_\\mathrm{c}$ [Oe]',style=style,step=10)\n ymin_widge = widgets.FloatText(value=np.round((Hb1-Hc2)*1000)/1000,description='Minimum H$_\\mathrm{u}$ [Oe]',style=style,step=10)\n ymax_widge = widgets.FloatText(value=np.round(Hb2*1000)/1000,description='Maximum H$_\\mathrm{u}$ [Oe]',style=style,step=10) \n\n #launch the interactive FORC plot\n x = interactive(forcplot,\n Xi=fixed(Xi), #X point grid\n Yi=fixed(Yi), #Y point grid\n Zi=fixed(Zi), #interpolated Z values\n SEi = fixed(SEi), #interpolated standard errors\n Pi = fixed(Pi), #P values\n fn=fixed(X['sample']), #File information\n mass=fixed(X['mass']), #Preprocessing information\n unit=fixed(X['unit']),\n colorbar=colorbar_widge, #Include colorbar \n level=level_widge, #Number of levels to plot \n contour=contour_widge, #Contour levels to plot\n contourpts=contourpts_widge, #Contour line width\n xmin=xmin_widge, #X-minimum\n xmax=xmax_widge, #X-maximum\n ymin=ymin_widge, #Y-minimum\n ymax=ymax_widge, #Y-maximum\n colormin = colormin_widge, #adjust colormap minimum\n colormax = colormax_widge, #adjust colormap minimum\n download = download_widge #download plot\n )\n \n #create tabs\n tab_nest = widgets.Tab()\n # tab_nest.children = [tab_visualise]\n tab_nest.set_title(0, 'FORC PLOTTING')\n\n #interact function in isolation\n tab_nest.children = [VBox(children = x.children)]\n display(tab_nest)\n \n #display(x) #display the interactive plot\n\ndef forcplot(Xi,Yi,Zi,SEi,Pi,fn,mass,unit,colorbar,level,contour,contourpts,xmin,xmax,ymin,ymax,colormin,colormax,download):\n \n\n fig = plt.figure(figsize=(6,6))\n ax = fig.add_subplot(1,1,1)\n \n if mass.value<0.0: \n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi\n SEi_new = SEi\n Pi_new = Pi\n SEi_new[Zi_new==0.0]=0.0\n SEi_new[np.isnan(SEi_new)]=0.0\n if unit=='SI':\n xlabel_text = 'B$_\\mathrm{c}$ [T]' #label Hc axis [SI units]\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\mathrm{u}$ [T]' #label Hu axis [SI units]\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$'\n se_csv = 'rho [Am**2 / T**2]'\n elif unit=='cgs':\n xlabel_text = 'H$_\\mathrm{c}$ [Oe]' #label Hc axis [SI units]\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\mathrm{u}$ [Oe]' #label Hu axis [SI units]\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu$ Oe$^{-2}$'\n se_csv = 'rho [emu / Oe**2]' \n else:\n Xi_new = Xi\n Yi_new = Yi\n Zi_new = Zi / (mass.value/1000.0)\n SEi_new = SEi / (mass.value/1000.0)\n SEi_new[Zi_new==0.0]=0.0\n SEi_new[np.isnan(SEi_new)]=0.0\n Pi_new = Pi\n if unit=='SI':\n Zi_new = Zi / (mass.value/1000.0)\n SEi_new = SEi / (mass.value/1000.0) \n xlabel_text = 'B$_\\mathrm{c}$ [T]' #label Hc axis [SI units]\n xlabel_csv = 'Bc [T]'\n ylabel_text = 'B$_\\mathrm{u}$ [T]' #label Hu axis [SI units]\n ylabel_csv = 'Bu [T]'\n cbar_text = 'Am$^2$ T$^{-2}$ kg$^{-1}$'\n se_csv = 'se [Am**2 / T**2 / kg]'\n elif unit=='cgs':\n Zi_new = Zi / (mass.value)\n SEi_new = SEi / (mass.value) \n xlabel_text = 'H$_\\mathrm{c}$ [Oe]' #label Hc axis [SI units]\n xlabel_csv = 'Hc [Oe]'\n ylabel_text = 'H$_\\mathrm{u}$ [Oe]' #label Hu axis [SI units]\n ylabel_csv = 'Hu [Oe]'\n cbar_text = 'emu Oe$^{-2}$ g$^{-1}$'\n se_csv = 'se [emu/ Oe**2 / g]' \n \n SEi_new[Zi_new==0.0]=0.0\n SEi_new[np.isnan(SEi_new)]=0.0 \n\n #define colormaps\n idx=(Xi_new>=xmin) & (Xi_new<=xmax) & (Yi_new>=ymin) & (Yi_new<=ymax) #find points currently in view\n cmap,vmin,vmax = FORCinel_colormap(Zi_new[idx])\n #cmap, norm = FORCinel_colormap(Zi_new[idx])\n\n Zi_trunc = np.copy(Zi_new)\n Zi_trunc[np.isnan(Zi_trunc)] = 0.0\n Zi_trunc[Zi_trunc<vmin]=vmin\n \n vmini = vmin*(1-colormin)\n vmaxi = vmax*colormax\n\n idx = (Zi_trunc>=vmini) & (Zi_trunc<=vmaxi)\n cmap,vmin,vmax = FORCinel_colormap(Zi_trunc[idx])\n\n CS = ax.contourf(Xi_new, Yi_new, Zi_trunc, level, cmap = cmap, vmin=vmin, vmax=vmax)\n \n if (contour>0) & (contour<level):\n CS2 = ax.contour(CS, levels=CS.levels[::contour], colors='k',linewidths=contourpts)\n\n #if pval==True:\n # CS3 = ax.contour(Xi_new, Yi_new, Pi_new, levels=[0.05], colors=['r'])\n\n ax.set_xlabel(xlabel_text,fontsize=14) #label Hc axis [SI units]\n ax.set_ylabel(ylabel_text,fontsize=14) #label Hu axis [SI units] \n\n # Set plot Xlimits\n xlimits = np.sort((xmin,xmax))\n ax.set_xlim(xlimits)\n \n #Set plot Ylimits\n ylimits = np.sort((ymin,ymax))\n ax.set_ylim(ylimits)\n \n #Set ticks and plot aspect ratio\n ax.tick_params(labelsize=14)\n ax.set_aspect('equal') #set 1:1 aspect ratio\n ax.minorticks_on() #add minor ticks\n \n #Add colorbar\n if colorbar == True: \n cbar = fig.colorbar(CS,fraction=0.04, pad=0.08,format='%.2e')\n cbar.ax.tick_params(labelsize=14)\n #cbar.ax.set_title(cbar_text,fontsize=14)\n cbar.set_label(cbar_text,fontsize=14)\n #cbar.ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n\n \n #Activate download to same folder as data file\n if download==True:\n outputfile = fn.value+'_FORC.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches=\"tight\")\n \n ar = np.column_stack((np.reshape(Xi_new,(-1,1)),np.reshape(Yi_new,(-1,1)),np.reshape(Zi_trunc,(-1,1)),np.reshape(SEi,(-1,1))))\n outputfile = fn.value+'_XYZ.csv'\n with open(outputfile, 'w') as fp:\n fp.write(xlabel_csv+','+ylabel_csv+','+se_csv+','+se_csv + '\\n')\n np.savetxt(fp, ar, '%s', ',')\n \n #show the final plot\n plt.show()\n\ndef FORCinel_colormap(Z):\n\n #setup initial colormap assuming that negative range does not require extension\n cdict = {'red': ((0.0, 127/255, 127/255),\n (0.1387, 255/255, 255/255),\n (0.1597, 255/255, 255/255),\n (0.1807, 255/255, 255/255),\n (0.3193, 102/255, 102/255),\n (0.563, 204/255, 204/255),\n (0.6975, 204/255, 204/255),\n (0.8319, 153/255, 153/255),\n (0.9748, 76/255, 76/255),\n (1.0, 76/255, 76/255)),\n\n 'green': ((0.0, 127/255, 127/255),\n (0.1387, 255/255, 255/255),\n (0.1597, 255/255, 255/255),\n (0.1807, 255/255, 255/255),\n (0.3193, 178/255, 178/255),\n (0.563, 204/255, 204/255),\n (0.6975, 76/255, 76/255),\n (0.8319, 102/255, 102/255),\n (0.9748, 25/255, 25/255),\n (1.0, 25/255, 25/255)),\n\n 'blue': ((0.0, 255/255, 255/255),\n (0.1387, 255/255, 255/255),\n (0.1597, 255/255, 255/255),\n (0.1807, 255/255, 255/255),\n (0.3193, 102/255, 102/255),\n (0.563, 76/255, 76/255),\n (0.6975, 76/255, 76/255),\n (0.8319, 153/255, 153/255),\n (0.9748, 76/255, 76/255),\n (1.0, 76/255, 76/255))}\n\n if np.abs(np.min(Z))<=np.max(Z)*0.19: #negative extension is not required\n #cmap = LinearSegmentedColormap('forc_cmap', cdict)\n vmin = -np.max(Z)*0.19\n vmax = np.max(Z)\n else: #negative extension is required\n vmin=np.min(Z)\n vmax=np.max(Z) \n \n anchors = np.zeros(10)\n anchors[1]=(-0.025*vmax-vmin)/(vmax-vmin)\n anchors[2]=(-0.005*vmax-vmin)/(vmax-vmin)\n anchors[3]=(0.025*vmax-vmin)/(vmax-vmin)\n anchors[4]=(0.19*vmax-vmin)/(vmax-vmin)\n anchors[5]=(0.48*vmax-vmin)/(vmax-vmin)\n anchors[6]=(0.64*vmax-vmin)/(vmax-vmin)\n anchors[7]=(0.80*vmax-vmin)/(vmax-vmin)\n anchors[8]=(0.97*vmax-vmin)/(vmax-vmin)\n anchors[9]=1.0\n\n Rlst = list(cdict['red'])\n Glst = list(cdict['green'])\n Blst = list(cdict['blue'])\n\n for i in range(9):\n Rlst[i] = tuple((anchors[i],Rlst[i][1],Rlst[i][2]))\n Glst[i] = tuple((anchors[i],Glst[i][1],Glst[i][2]))\n Blst[i] = tuple((anchors[i],Blst[i][1],Blst[i][2]))\n \n cdict['red'] = tuple(Rlst)\n cdict['green'] = tuple(Glst)\n cdict['blue'] = tuple(Blst)\n\n cmap = LinearSegmentedColormap('forc_cmap', cdict)\n\n return cmap, vmin, vmax\n\n #### Profile Plotting ####\n\n#### Profile plotting ####\n\ndef profile_options(X):\n Hb1 = X['Hb1']-X['Hc2']\n Hb2 = X['Hb2']\n Hc1 = np.maximum(X['Hc1'],0)\n Hc2 = X['Hc2']\n style = {'description_width': 'initial'} #general style settings\n \n HL = widgets.HTML(value='<hr style=\"height:3px;border:none;color:#333;background-color:#333;\" />')\n \n P_title = widgets.HTML(value='<h3>Select profile type:</h3>')\n P_widge = widgets.RadioButtons(options=[('Horizontal profile',0), ('Vertical profile',1)],\n value=0,\n style=style)\n \n H_title = widgets.HTML(value='<h4>Horizontal profile specification:</h4>')\n\n if X['unit'] == 'SI':\n x_Hb_widge = widgets.FloatSlider(\n value=0.0,\n min=Hb1,\n max=Hb2,\n step=0.001,\n description='B$_u$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n elif X['unit'] == 'cgs':\n x_Hb_widge = widgets.FloatSlider(\n value=0.0,\n min=Hb1,\n max=Hb2,\n step=10,\n description='H$_u$ [Oe]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n \n if X['unit'] == 'SI':\n x_Hc_widge = widgets.FloatRangeSlider(\n value=[Hc1,Hc2],\n min=Hc1,\n max=Hc2,\n step=0.001,\n description='B$_c$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n elif X['unit'] == 'cgs':\n x_Hc_widge = widgets.FloatRangeSlider(\n value=[Hc1,Hc2],\n min=Hc1,\n max=Hc2,\n step=10,\n description='H$_c$ [Oe]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n \n V_title = widgets.HTML(value='<h4>Vertical profile specification:</h4>')\n \n if X['unit'] == 'SI':\n y_Hc_widge = widgets.FloatSlider(\n value=(Hc1+Hc2)/2.0,\n min=Hc1,\n max=Hc2,\n step=0.001,\n description='B$_c$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n elif X['unit'] == 'cgs':\n y_Hc_widge = widgets.FloatSlider(\n value=(Hc1+Hc2)/2.0,\n min=Hc1,\n max=Hc2,\n step=10,\n description='H$_c$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n\n if X['unit'] == 'SI':\n y_Hb_widge = widgets.FloatRangeSlider(\n value=[Hb1,Hb2],\n min=Hb1,\n max=Hb2,\n step=0.001,\n description='B$_u$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n\n elif X['unit'] == 'cgs':\n y_Hb_widge = widgets.FloatRangeSlider(\n value=[Hb1,Hb2],\n min=Hb1,\n max=Hb2,\n step=10,\n description='H$_u$ [T]',\n disabled=False,\n continuous_update=False,\n orientation='horizontal',\n readout=True,\n readout_format='.3f',\n layout={'width': '350px'},\n style = style\n )\n \n profile_widge = VBox([P_title,P_widge,HL,H_title,x_Hb_widge,x_Hc_widge, \\\n HL,V_title,y_Hc_widge,y_Hb_widge])\n \n profile_nest = widgets.Tab()\n profile_nest.children = [profile_widge]\n profile_nest.set_title(0, 'PLOT PROFILES')\n display(profile_nest) \n \n X['P_widge'] = P_widge\n X['x_Hb_widge'] = x_Hb_widge\n X['x_Hc_widge'] = x_Hc_widge\n X['y_Hc_widge'] = y_Hc_widge\n X['y_Hb_widge'] = y_Hb_widge\n\n return X\n\ndef profile_plot(X):\n\n if X['P_widge'].value==0:\n X = x_profile(X,X['x_Hc_widge'].value,X['x_Hb_widge'].value)\n else:\n X = y_profile(X,X['y_Hc_widge'].value,X['y_Hb_widge'].value)\n \n return X\n\ndef x_profile(X,Hc,Hb):\n\n Hc1, Hc2 = Hc[0], Hc[1]\n\n dH = X['dH']\n NH = int(np.sqrt((Hc2-Hc1)**2)/dH)\n Hc0 = np.linspace(Hc1,Hc2,NH)\n Hb0 = np.linspace(Hb,Hb,NH)\n \n rho_int = X['Zint'](Hc0,Hb0)\n coef = sps.norm.ppf(0.025/np.sum(rho_int.mask==False))\n CI_int = X['SEint'](Hc0,Hb0)*coef\n\n fig = plt.figure(figsize=(5,5))\n ax1 = fig.add_subplot(1,1,1)\n \n if X['mass'].value>0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hc0,rho_int/(X['mass'].value/1000.0),color='k')\n ax1.fill_between(Hc0, (rho_int-CI_int)/(X['mass'].value/1000.0), (rho_int+CI_int)/(X['mass'].value/1000.0),color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hc0,rho_int/(X['mass'].value),color='k')\n ax1.fill_between(Hc0, (rho_int-CI_int)/(X['mass'].value), (rho_int+CI_int)/(X['mass'].value),color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$',fontsize=14)\n else:\n ax1.plot(Hc0,rho_int,color='k')\n ax1.fill_between(Hc0, (rho_int-CI_int), (rho_int+CI_int),color='lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$',fontsize=14)\n\n\n ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='14')\n ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k')\n \n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\mathrm{c}$ [T]',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\mathrm{c}$ [Oe]',fontsize=14) \n \n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n\n\n outputfile = X['sample'].value+'_Hc_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches=\"tight\")\n plt.show\n \n return X\n\ndef y_profile(X,Hc,Hb):\n\n Hb1, Hb2 = Hb[0], Hb[1]\n\n dH = X['dH']\n NH = int(np.sqrt((Hb2-Hb1)**2)/dH)\n Hc0 = np.linspace(Hc,Hc,NH)\n Hb0 = np.linspace(Hb1,Hb2,NH)\n \n rho_int = X['Zint'](Hc0,Hb0)\n coef = sps.norm.ppf(0.025/np.sum(rho_int.mask==False))\n CI_int = X['SEint'](Hc0,Hb0)*coef\n\n fig = plt.figure(figsize=(5,5))\n ax1 = fig.add_subplot(1,1,1)\n\n if X['mass'].value>0.0:\n if X['unit'] == 'SI':\n ax1.plot(Hb0,rho_int/(X['mass'].value/1000.0),color='k')\n ax1.fill_between(Hb0, (rho_int-CI_int)/(X['mass'].value/1000.0), (rho_int+CI_int)/(X['mass'].value/1000.0),color='lightgrey')\n ax1.set_ylabel('Am$^2$ T$^{-2}$ kg$^{-1}$',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.plot(Hb0,rho_int/(X['mass'].value),color='k')\n ax1.fill_between(Hb0, (rho_int-CI_int)/(X['mass'].value), (rho_int+CI_int)/(X['mass'].value),color='lightgrey')\n ax1.set_ylabel('emu Oe$^{-2}$ g$^{-1}$',fontsize=14)\n else:\n ax1.plot(Hb0,rho_int,color='k')\n ax1.fill_between(Hb0, (rho_int-CI_int), (rho_int+CI_int),color='lightgrey')\n if X['unit'] == 'SI':\n ax1.set_ylabel('Am$^2$ T$^{-2}$',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_ylabel('emu Oe$^{-2}$',fontsize=14) \n \n ax1.tick_params(axis='both',which='major',direction='out',length=5,width=1,color='k',labelsize='14')\n ax1.tick_params(axis='both',which='minor',direction='out',length=3.5,width=1,color='k')\n\n if X['unit'] == 'SI':\n ax1.set_xlabel('B$_\\mathrm{u}$ [T]',fontsize=14)\n elif X['unit'] == 'cgs':\n ax1.set_xlabel('H$_\\mathrm{u}$ [Oe]',fontsize=14) \n \n ax1.minorticks_on()\n ax1.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.2e'))\n\n\n outputfile = X['sample'].value+'_Hu_PROFILE.pdf'\n plt.savefig(outputfile, dpi=300, bbox_inches=\"tight\")\n plt.show\n \n return X", "step-ids": [ 6, 7, 8, 9, 10 ] }
[ 6, 7, 8, 9, 10 ]
<|reserved_special_token_0|> class BaseExecution: <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseExecution: def __init__(self, flag, parser): self.flag = flag self.parser = parser <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class BaseExecution: def __init__(self, flag, parser): self.flag = flag self.parser = parser def execute(self): process = subprocess.Popen(f'df {self.flag}', shell=True, stdout= subprocess.PIPE, stderr=subprocess.PIPE) output, err = process.communicate() return_code = process.returncode parser = self.parser(output, err, return_code) result = parser.parse() return result <|reserved_special_token_1|> import subprocess class BaseExecution: def __init__(self, flag, parser): self.flag = flag self.parser = parser def execute(self): process = subprocess.Popen(f'df {self.flag}', shell=True, stdout= subprocess.PIPE, stderr=subprocess.PIPE) output, err = process.communicate() return_code = process.returncode parser = self.parser(output, err, return_code) result = parser.parse() return result
flexible
{ "blob_id": "d8af43d24a2f2b99bc8b5098f251e017852d6d86", "index": 1085, "step-1": "<mask token>\n\n\nclass BaseExecution:\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n <mask token>\n", "step-3": "<mask token>\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n\n def execute(self):\n process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n output, err = process.communicate()\n return_code = process.returncode\n parser = self.parser(output, err, return_code)\n result = parser.parse()\n return result\n", "step-4": "import subprocess\n\n\nclass BaseExecution:\n\n def __init__(self, flag, parser):\n self.flag = flag\n self.parser = parser\n\n def execute(self):\n process = subprocess.Popen(f'df {self.flag}', shell=True, stdout=\n subprocess.PIPE, stderr=subprocess.PIPE)\n output, err = process.communicate()\n return_code = process.returncode\n parser = self.parser(output, err, return_code)\n result = parser.parse()\n return result\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
import config import psycopg2 from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT def check_db_exists(opt): try: conn = psycopg2.connect(opt) cur = conn.cursor() cur.close() print('Database exists.') return True except: print("Database doesn't exist.") return False def create_db(opt): if check_db_exists(opt): pass else: print("Creating new database.") conn = psycopg2.connect(opt) conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) cur = conn.cursor() cur.execute(f"CREATE DATABASE {config.db_name};") cur.close() def create_tables(opt): if check_db_exists(opt): commands = (""" CREATE TABLE IF NOT EXISTS stock ( id SERIAL PRIMARY KEY, ticker VARCHAR NOT NULL, name VARCHAR NOT NULL, created_date TIMESTAMP NOT NULL, last_updated_date TIMESTAMP NOT NULL ) """, """ CREATE TABLE IF NOT EXISTS price ( id SERIAL PRIMARY KEY, stock_id INTEGER NOT NULL, created_date TIMESTAMP NOT NULL, last_updated_date TIMESTAMP NOT NULL, date_price TIMESTAMP, open_price NUMERIC, high_price NUMERIC, low_price NUMERIC, close_price NUMERIC, volume BIGINT, FOREIGN KEY (stock_id) REFERENCES stock(id)) """, """ CREATE TABLE IF NOT EXISTS fundamentals ( id SERIAL PRIMARY KEY, stock_id INTEGER NOT NULL, created_date TIMESTAMP NOT NULL, last_updated_date TIMESTAMP NOT NULL, longBusinessSummary TEXT, sector VARCHAR, sharesOutstanding BIGINT, marketCap BIGINT, forwardPE REAL, dividendYield REAL, beta REAL, previousClose REAL, averageVolume BIGINT, FOREIGN KEY (stock_id) REFERENCES stock(id)) """, """ CREATE TABLE IF NOT EXISTS news ( id SERIAL PRIMARY KEY, stock_id INTEGER NOT NULL, news_date TIMESTAMP NOT NULL, headline VARCHAR NOT NULL, url VARCHAR NOT NULL, sentiment REAL, FOREIGN KEY (stock_id) REFERENCES stock(id)) """ ) try: for command in commands: print('Building database tables') conn = psycopg2.connect(opt) cur = conn.cursor() cur.execute(command) conn.commit() cur.close() except (Exception, psycopg2.DatabaseError) as e: print(e) cur.close() else: pass def main(): opt = f"postgres://{config.username}:{config.password}@{config.host}:{config.port}/{config.cluster}.{config.db_name}?sslmode=verify-full&sslrootcert={config.cert_dir}/cc-ca.crt" create_db(opt) create_tables(opt) if __name__ == "__main__": main()
normal
{ "blob_id": "09792da1c3cc38c7df7def2b487c2078de4e8912", "index": 9514, "step-1": "<mask token>\n\n\ndef check_db_exists(opt):\n try:\n conn = psycopg2.connect(opt)\n cur = conn.cursor()\n cur.close()\n print('Database exists.')\n return True\n except:\n print(\"Database doesn't exist.\")\n return False\n\n\ndef create_db(opt):\n if check_db_exists(opt):\n pass\n else:\n print('Creating new database.')\n conn = psycopg2.connect(opt)\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur = conn.cursor()\n cur.execute(f'CREATE DATABASE {config.db_name};')\n cur.close()\n\n\n<mask token>\n\n\ndef main():\n opt = (\n f'postgres://{config.username}:{config.password}@{config.host}:{config.port}/{config.cluster}.{config.db_name}?sslmode=verify-full&sslrootcert={config.cert_dir}/cc-ca.crt'\n )\n create_db(opt)\n create_tables(opt)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef check_db_exists(opt):\n try:\n conn = psycopg2.connect(opt)\n cur = conn.cursor()\n cur.close()\n print('Database exists.')\n return True\n except:\n print(\"Database doesn't exist.\")\n return False\n\n\ndef create_db(opt):\n if check_db_exists(opt):\n pass\n else:\n print('Creating new database.')\n conn = psycopg2.connect(opt)\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur = conn.cursor()\n cur.execute(f'CREATE DATABASE {config.db_name};')\n cur.close()\n\n\ndef create_tables(opt):\n if check_db_exists(opt):\n commands = \"\"\" CREATE TABLE IF NOT EXISTS stock (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tticker VARCHAR NOT NULL,\n\t\t\tname VARCHAR NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL\n\t\t\t)\n\t\t\t\"\"\", \"\"\" CREATE TABLE IF NOT EXISTS price (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL,\n\t\t\tdate_price TIMESTAMP,\n\t\t\topen_price NUMERIC,\n\t\t\thigh_price NUMERIC,\n\t\t\tlow_price NUMERIC,\n\t\t\tclose_price NUMERIC,\n\t\t\tvolume BIGINT,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\", \"\"\" CREATE TABLE IF NOT EXISTS fundamentals (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL,\n\t\t\tlongBusinessSummary TEXT,\n\t\t\tsector VARCHAR,\n\t\t\tsharesOutstanding BIGINT,\n\t\t\tmarketCap BIGINT,\n\t\t\tforwardPE REAL,\n\t\t\tdividendYield REAL,\n\t\t\tbeta REAL,\n\t\t\tpreviousClose REAL,\n\t\t\taverageVolume BIGINT,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\", \"\"\" CREATE TABLE IF NOT EXISTS news (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tnews_date TIMESTAMP NOT NULL,\n\t\t\theadline VARCHAR NOT NULL,\n\t\t\turl VARCHAR NOT NULL,\n\t\t\tsentiment REAL,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\"\n try:\n for command in commands:\n print('Building database tables')\n conn = psycopg2.connect(opt)\n cur = conn.cursor()\n cur.execute(command)\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as e:\n print(e)\n cur.close()\n else:\n pass\n\n\ndef main():\n opt = (\n f'postgres://{config.username}:{config.password}@{config.host}:{config.port}/{config.cluster}.{config.db_name}?sslmode=verify-full&sslrootcert={config.cert_dir}/cc-ca.crt'\n )\n create_db(opt)\n create_tables(opt)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef check_db_exists(opt):\n try:\n conn = psycopg2.connect(opt)\n cur = conn.cursor()\n cur.close()\n print('Database exists.')\n return True\n except:\n print(\"Database doesn't exist.\")\n return False\n\n\ndef create_db(opt):\n if check_db_exists(opt):\n pass\n else:\n print('Creating new database.')\n conn = psycopg2.connect(opt)\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur = conn.cursor()\n cur.execute(f'CREATE DATABASE {config.db_name};')\n cur.close()\n\n\ndef create_tables(opt):\n if check_db_exists(opt):\n commands = \"\"\" CREATE TABLE IF NOT EXISTS stock (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tticker VARCHAR NOT NULL,\n\t\t\tname VARCHAR NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL\n\t\t\t)\n\t\t\t\"\"\", \"\"\" CREATE TABLE IF NOT EXISTS price (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL,\n\t\t\tdate_price TIMESTAMP,\n\t\t\topen_price NUMERIC,\n\t\t\thigh_price NUMERIC,\n\t\t\tlow_price NUMERIC,\n\t\t\tclose_price NUMERIC,\n\t\t\tvolume BIGINT,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\", \"\"\" CREATE TABLE IF NOT EXISTS fundamentals (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL,\n\t\t\tlongBusinessSummary TEXT,\n\t\t\tsector VARCHAR,\n\t\t\tsharesOutstanding BIGINT,\n\t\t\tmarketCap BIGINT,\n\t\t\tforwardPE REAL,\n\t\t\tdividendYield REAL,\n\t\t\tbeta REAL,\n\t\t\tpreviousClose REAL,\n\t\t\taverageVolume BIGINT,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\", \"\"\" CREATE TABLE IF NOT EXISTS news (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tnews_date TIMESTAMP NOT NULL,\n\t\t\theadline VARCHAR NOT NULL,\n\t\t\turl VARCHAR NOT NULL,\n\t\t\tsentiment REAL,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\"\n try:\n for command in commands:\n print('Building database tables')\n conn = psycopg2.connect(opt)\n cur = conn.cursor()\n cur.execute(command)\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as e:\n print(e)\n cur.close()\n else:\n pass\n\n\ndef main():\n opt = (\n f'postgres://{config.username}:{config.password}@{config.host}:{config.port}/{config.cluster}.{config.db_name}?sslmode=verify-full&sslrootcert={config.cert_dir}/cc-ca.crt'\n )\n create_db(opt)\n create_tables(opt)\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import config\nimport psycopg2\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\n\n\ndef check_db_exists(opt):\n try:\n conn = psycopg2.connect(opt)\n cur = conn.cursor()\n cur.close()\n print('Database exists.')\n return True\n except:\n print(\"Database doesn't exist.\")\n return False\n\n\ndef create_db(opt):\n if check_db_exists(opt):\n pass\n else:\n print('Creating new database.')\n conn = psycopg2.connect(opt)\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cur = conn.cursor()\n cur.execute(f'CREATE DATABASE {config.db_name};')\n cur.close()\n\n\ndef create_tables(opt):\n if check_db_exists(opt):\n commands = \"\"\" CREATE TABLE IF NOT EXISTS stock (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tticker VARCHAR NOT NULL,\n\t\t\tname VARCHAR NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL\n\t\t\t)\n\t\t\t\"\"\", \"\"\" CREATE TABLE IF NOT EXISTS price (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL,\n\t\t\tdate_price TIMESTAMP,\n\t\t\topen_price NUMERIC,\n\t\t\thigh_price NUMERIC,\n\t\t\tlow_price NUMERIC,\n\t\t\tclose_price NUMERIC,\n\t\t\tvolume BIGINT,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\", \"\"\" CREATE TABLE IF NOT EXISTS fundamentals (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL,\n\t\t\tlongBusinessSummary TEXT,\n\t\t\tsector VARCHAR,\n\t\t\tsharesOutstanding BIGINT,\n\t\t\tmarketCap BIGINT,\n\t\t\tforwardPE REAL,\n\t\t\tdividendYield REAL,\n\t\t\tbeta REAL,\n\t\t\tpreviousClose REAL,\n\t\t\taverageVolume BIGINT,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\", \"\"\" CREATE TABLE IF NOT EXISTS news (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tnews_date TIMESTAMP NOT NULL,\n\t\t\theadline VARCHAR NOT NULL,\n\t\t\turl VARCHAR NOT NULL,\n\t\t\tsentiment REAL,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\"\n try:\n for command in commands:\n print('Building database tables')\n conn = psycopg2.connect(opt)\n cur = conn.cursor()\n cur.execute(command)\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as e:\n print(e)\n cur.close()\n else:\n pass\n\n\ndef main():\n opt = (\n f'postgres://{config.username}:{config.password}@{config.host}:{config.port}/{config.cluster}.{config.db_name}?sslmode=verify-full&sslrootcert={config.cert_dir}/cc-ca.crt'\n )\n create_db(opt)\n create_tables(opt)\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import config\nimport psycopg2\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\n\ndef check_db_exists(opt):\n\ttry:\n\t\tconn = psycopg2.connect(opt)\n\t\tcur = conn.cursor()\n\t\tcur.close()\n\t\tprint('Database exists.')\n\t\treturn True\n\texcept:\n\t\tprint(\"Database doesn't exist.\")\n\t\treturn False\n\ndef create_db(opt):\n\tif check_db_exists(opt):\n\t\tpass\n\telse:\n\t\tprint(\"Creating new database.\")\n\t\tconn = psycopg2.connect(opt)\n\t\tconn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n\t\tcur = conn.cursor()\n\t\tcur.execute(f\"CREATE DATABASE {config.db_name};\")\n\t\tcur.close()\n\ndef create_tables(opt):\n\tif check_db_exists(opt):\n\t\tcommands = (\"\"\" CREATE TABLE IF NOT EXISTS stock (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tticker VARCHAR NOT NULL,\n\t\t\tname VARCHAR NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL\n\t\t\t)\n\t\t\t\"\"\",\n\t\t\t\"\"\" CREATE TABLE IF NOT EXISTS price (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL,\n\t\t\tdate_price TIMESTAMP,\n\t\t\topen_price NUMERIC,\n\t\t\thigh_price NUMERIC,\n\t\t\tlow_price NUMERIC,\n\t\t\tclose_price NUMERIC,\n\t\t\tvolume BIGINT,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\",\n\t\t\t\"\"\" CREATE TABLE IF NOT EXISTS fundamentals (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tcreated_date TIMESTAMP NOT NULL,\n\t\t\tlast_updated_date TIMESTAMP NOT NULL,\n\t\t\tlongBusinessSummary TEXT,\n\t\t\tsector VARCHAR,\n\t\t\tsharesOutstanding BIGINT,\n\t\t\tmarketCap BIGINT,\n\t\t\tforwardPE REAL,\n\t\t\tdividendYield REAL,\n\t\t\tbeta REAL,\n\t\t\tpreviousClose REAL,\n\t\t\taverageVolume BIGINT,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\",\n\t\t\t\"\"\" CREATE TABLE IF NOT EXISTS news (\n\t\t\tid SERIAL PRIMARY KEY,\n\t\t\tstock_id INTEGER NOT NULL,\n\t\t\tnews_date TIMESTAMP NOT NULL,\n\t\t\theadline VARCHAR NOT NULL,\n\t\t\turl VARCHAR NOT NULL,\n\t\t\tsentiment REAL,\n\t\t\tFOREIGN KEY (stock_id) REFERENCES stock(id))\n\t\t\t\"\"\"\n\t\t\t)\n\t\ttry:\n\t\t\tfor command in commands:\n\t\t\t\tprint('Building database tables')\n\t\t\t\tconn = psycopg2.connect(opt)\n\t\t\t\tcur = conn.cursor()\n\t\t\t\tcur.execute(command)\n\t\t\t\tconn.commit()\n\t\t\t\tcur.close()\n\t\texcept (Exception, psycopg2.DatabaseError) as e:\n\t\t\tprint(e)\n\t\t\tcur.close()\n\telse:\n\t\tpass\n\ndef main():\n\topt = f\"postgres://{config.username}:{config.password}@{config.host}:{config.port}/{config.cluster}.{config.db_name}?sslmode=verify-full&sslrootcert={config.cert_dir}/cc-ca.crt\"\n\tcreate_db(opt)\n\tcreate_tables(opt)\n\nif __name__ == \"__main__\":\n main()\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
<|reserved_special_token_0|> class Relation_type(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> def __str__(self): return str(self.name) class Relation(models.Model): id_relation = models.AutoField(primary_key=True) id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name='who1') id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name='who2') description = models.CharField(max_length=100, null=True) id_relation_type = models.ForeignKey(Relation_type, on_delete=models. CASCADE) class Meeting(models.Model): id_meeting = models.AutoField(primary_key=True) start_date = models.DateField(max_length=100) start_time = models.TimeField(max_length=100) description = models.CharField(max_length=100, null=True, default='') duration = models.DurationField(default=0) id_location = models.ForeignKey(Location, on_delete=models.CASCADE) def __str__(self): return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str( self.duration) + ' ' + str(self.description) + ' ' + str(self. id_location) class Person_meeting(models.Model): id_person = models.ForeignKey(Person, on_delete=models.CASCADE) id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, unique=False) <|reserved_special_token_1|> <|reserved_special_token_0|> class Person(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def __str__(self): return str(self.nickname) + ' ' + self.last_name + '' + self.first_name class Contact_type(models.Model): id_contact_type = models.AutoField(primary_key=True) name = models.CharField(max_length=100) validation_regexp = models.CharField(max_length=100) def __str__(self): return str(self.name) class Contact(models.Model): id_contact = models.AutoField(primary_key=True) id_person = models.ForeignKey(Person, on_delete=models.PROTECT) id_contact_type = models.ForeignKey(Contact_type, on_delete=models. PROTECT, null=True) contact = models.CharField(max_length=100, null=True) def __str__(self): return str(self.id_person) + ' ' + str(self.contact) class Relation_type(models.Model): id_relation = models.AutoField(primary_key=True) name = models.CharField(max_length=100) def __str__(self): return str(self.name) class Relation(models.Model): id_relation = models.AutoField(primary_key=True) id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name='who1') id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name='who2') description = models.CharField(max_length=100, null=True) id_relation_type = models.ForeignKey(Relation_type, on_delete=models. CASCADE) class Meeting(models.Model): id_meeting = models.AutoField(primary_key=True) start_date = models.DateField(max_length=100) start_time = models.TimeField(max_length=100) description = models.CharField(max_length=100, null=True, default='') duration = models.DurationField(default=0) id_location = models.ForeignKey(Location, on_delete=models.CASCADE) def __str__(self): return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str( self.duration) + ' ' + str(self.description) + ' ' + str(self. id_location) class Person_meeting(models.Model): id_person = models.ForeignKey(Person, on_delete=models.CASCADE) id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, unique=False) <|reserved_special_token_1|> <|reserved_special_token_0|> class Location(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class Person(models.Model): id_person = models.AutoField(primary_key=True) nickname = models.CharField(max_length=100, null=True) first_name = models.CharField(max_length=100, null=True) last_name = models.CharField(max_length=100, null=True) id_location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True, default=52) birth_day = models.DateField(default='1900-01-01') height = models.IntegerField(null=True) GENDER = ('Female', 'Female'), ('Male', 'Male') gender = models.CharField(max_length=20, choices=GENDER, null=True) def __str__(self): return str(self.nickname) + ' ' + self.last_name + '' + self.first_name class Contact_type(models.Model): id_contact_type = models.AutoField(primary_key=True) name = models.CharField(max_length=100) validation_regexp = models.CharField(max_length=100) def __str__(self): return str(self.name) class Contact(models.Model): id_contact = models.AutoField(primary_key=True) id_person = models.ForeignKey(Person, on_delete=models.PROTECT) id_contact_type = models.ForeignKey(Contact_type, on_delete=models. PROTECT, null=True) contact = models.CharField(max_length=100, null=True) def __str__(self): return str(self.id_person) + ' ' + str(self.contact) class Relation_type(models.Model): id_relation = models.AutoField(primary_key=True) name = models.CharField(max_length=100) def __str__(self): return str(self.name) class Relation(models.Model): id_relation = models.AutoField(primary_key=True) id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name='who1') id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name='who2') description = models.CharField(max_length=100, null=True) id_relation_type = models.ForeignKey(Relation_type, on_delete=models. CASCADE) class Meeting(models.Model): id_meeting = models.AutoField(primary_key=True) start_date = models.DateField(max_length=100) start_time = models.TimeField(max_length=100) description = models.CharField(max_length=100, null=True, default='') duration = models.DurationField(default=0) id_location = models.ForeignKey(Location, on_delete=models.CASCADE) def __str__(self): return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str( self.duration) + ' ' + str(self.description) + ' ' + str(self. id_location) class Person_meeting(models.Model): id_person = models.ForeignKey(Person, on_delete=models.CASCADE) id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, unique=False) <|reserved_special_token_1|> <|reserved_special_token_0|> class Location(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def __str__(self): return str(self.name) + ' - ' + str(self.country) + ': ' + str(self .city) class Person(models.Model): id_person = models.AutoField(primary_key=True) nickname = models.CharField(max_length=100, null=True) first_name = models.CharField(max_length=100, null=True) last_name = models.CharField(max_length=100, null=True) id_location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True, default=52) birth_day = models.DateField(default='1900-01-01') height = models.IntegerField(null=True) GENDER = ('Female', 'Female'), ('Male', 'Male') gender = models.CharField(max_length=20, choices=GENDER, null=True) def __str__(self): return str(self.nickname) + ' ' + self.last_name + '' + self.first_name class Contact_type(models.Model): id_contact_type = models.AutoField(primary_key=True) name = models.CharField(max_length=100) validation_regexp = models.CharField(max_length=100) def __str__(self): return str(self.name) class Contact(models.Model): id_contact = models.AutoField(primary_key=True) id_person = models.ForeignKey(Person, on_delete=models.PROTECT) id_contact_type = models.ForeignKey(Contact_type, on_delete=models. PROTECT, null=True) contact = models.CharField(max_length=100, null=True) def __str__(self): return str(self.id_person) + ' ' + str(self.contact) class Relation_type(models.Model): id_relation = models.AutoField(primary_key=True) name = models.CharField(max_length=100) def __str__(self): return str(self.name) class Relation(models.Model): id_relation = models.AutoField(primary_key=True) id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name='who1') id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name='who2') description = models.CharField(max_length=100, null=True) id_relation_type = models.ForeignKey(Relation_type, on_delete=models. CASCADE) class Meeting(models.Model): id_meeting = models.AutoField(primary_key=True) start_date = models.DateField(max_length=100) start_time = models.TimeField(max_length=100) description = models.CharField(max_length=100, null=True, default='') duration = models.DurationField(default=0) id_location = models.ForeignKey(Location, on_delete=models.CASCADE) def __str__(self): return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str( self.duration) + ' ' + str(self.description) + ' ' + str(self. id_location) class Person_meeting(models.Model): id_person = models.ForeignKey(Person, on_delete=models.CASCADE) id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, unique=False) <|reserved_special_token_1|> from django.db import models class Location(models.Model): id_location = models.AutoField(primary_key=True) city = models.CharField(max_length=100, null=True) street_name = models.CharField(max_length=100, null=True) street_number = models.IntegerField(null=True) zip = models.IntegerField(null=True) country = models.CharField(max_length=100, null=True) name = models.CharField(max_length=100, null=True) latitude = models.DecimalField(max_digits=6, decimal_places=3, null=True) longitude = models.DecimalField(max_digits=6, decimal_places=3, null=True) def __str__(self): # print('Name', type(self.name), '\nCountry', type(self.country), '\nCity', self.city) return str(self.name) + ' - ' + str(self.country) + ': ' + str(self.city) class Person(models.Model): id_person = models.AutoField(primary_key=True) nickname = models.CharField(max_length=100, null=True) first_name = models.CharField(max_length=100, null=True) last_name = models.CharField(max_length=100, null=True) id_location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True, default=52) birth_day = models.DateField(default='1900-01-01') height = models.IntegerField(null=True) GENDER = ( ('Female', 'Female'), ('Male', 'Male'), ) gender = models.CharField(max_length=20, choices=GENDER, null=True) def __str__(self): return str(self.nickname) + ' ' + self.last_name + '' + self.first_name class Contact_type(models.Model): id_contact_type = models.AutoField(primary_key=True) name = models.CharField(max_length=100) validation_regexp = models.CharField(max_length=100) def __str__(self): return str(self.name) class Contact(models.Model): id_contact = models.AutoField(primary_key=True) id_person = models.ForeignKey(Person, on_delete=models.PROTECT) id_contact_type = models.ForeignKey(Contact_type, on_delete=models.PROTECT, null=True) contact = models.CharField(max_length=100, null=True) def __str__(self): return str(self.id_person) + ' ' + str(self.contact) class Relation_type(models.Model): id_relation = models.AutoField(primary_key=True) name = models.CharField(max_length=100) def __str__(self): return str(self.name) class Relation(models.Model): id_relation = models.AutoField(primary_key=True) id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name="who1") id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name="who2") description = models.CharField(max_length=100, null=True) id_relation_type = models.ForeignKey(Relation_type, on_delete=models.CASCADE) class Meeting(models.Model): id_meeting = models.AutoField(primary_key=True) start_date = models.DateField(max_length=100) start_time = models.TimeField(max_length=100) description = models.CharField(max_length=100, null=True, default='') duration = models.DurationField(default=0) id_location = models.ForeignKey(Location, on_delete=models.CASCADE) def __str__(self): return str(self.start_time) + " - " + str(self.start_date) + " " + str(self.duration) + " " + str( self.description) + " " + str(self.id_location) class Person_meeting(models.Model): id_person = models.ForeignKey(Person, on_delete=models.CASCADE) id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, unique=False)
flexible
{ "blob_id": "914f477518918619e0e42184bd03c2a7ed16bb01", "index": 86, "step-1": "<mask token>\n\n\nclass Relation_type(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "step-2": "<mask token>\n\n\nclass Person(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "step-3": "<mask token>\n\n\nclass Location(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE,\n null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = ('Female', 'Female'), ('Male', 'Male')\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "step-4": "<mask token>\n\n\nclass Location(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return str(self.name) + ' - ' + str(self.country) + ': ' + str(self\n .city)\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE,\n null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = ('Female', 'Female'), ('Male', 'Male')\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.\n PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who1')\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT,\n related_name='who2')\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.\n CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + ' - ' + str(self.start_date) + ' ' + str(\n self.duration) + ' ' + str(self.description) + ' ' + str(self.\n id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE,\n unique=False)\n", "step-5": "from django.db import models\n\n\nclass Location(models.Model):\n id_location = models.AutoField(primary_key=True)\n city = models.CharField(max_length=100, null=True)\n street_name = models.CharField(max_length=100, null=True)\n street_number = models.IntegerField(null=True)\n zip = models.IntegerField(null=True)\n country = models.CharField(max_length=100, null=True)\n name = models.CharField(max_length=100, null=True)\n latitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)\n longitude = models.DecimalField(max_digits=6, decimal_places=3, null=True)\n\n def __str__(self):\n # print('Name', type(self.name), '\\nCountry', type(self.country), '\\nCity', self.city)\n return str(self.name) + ' - ' + str(self.country) + ': ' + str(self.city)\n\n\nclass Person(models.Model):\n id_person = models.AutoField(primary_key=True)\n nickname = models.CharField(max_length=100, null=True)\n first_name = models.CharField(max_length=100, null=True)\n last_name = models.CharField(max_length=100, null=True)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True, default=52)\n birth_day = models.DateField(default='1900-01-01')\n height = models.IntegerField(null=True)\n GENDER = (\n ('Female', 'Female'),\n ('Male', 'Male'),\n )\n gender = models.CharField(max_length=20, choices=GENDER, null=True)\n\n def __str__(self):\n return str(self.nickname) + ' ' + self.last_name + '' + self.first_name\n\n\nclass Contact_type(models.Model):\n id_contact_type = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n validation_regexp = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Contact(models.Model):\n id_contact = models.AutoField(primary_key=True)\n id_person = models.ForeignKey(Person, on_delete=models.PROTECT)\n id_contact_type = models.ForeignKey(Contact_type, on_delete=models.PROTECT, null=True)\n contact = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return str(self.id_person) + ' ' + str(self.contact)\n\n\nclass Relation_type(models.Model):\n id_relation = models.AutoField(primary_key=True)\n name = models.CharField(max_length=100)\n\n def __str__(self):\n return str(self.name)\n\n\nclass Relation(models.Model):\n id_relation = models.AutoField(primary_key=True)\n id_person1 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name=\"who1\")\n id_person2 = models.ForeignKey(Person, on_delete=models.PROTECT, related_name=\"who2\")\n description = models.CharField(max_length=100, null=True)\n id_relation_type = models.ForeignKey(Relation_type, on_delete=models.CASCADE)\n\n\nclass Meeting(models.Model):\n id_meeting = models.AutoField(primary_key=True)\n start_date = models.DateField(max_length=100)\n start_time = models.TimeField(max_length=100)\n description = models.CharField(max_length=100, null=True, default='')\n duration = models.DurationField(default=0)\n id_location = models.ForeignKey(Location, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(self.start_time) + \" - \" + str(self.start_date) + \" \" + str(self.duration) + \" \" + str(\n self.description) + \" \" + str(self.id_location)\n\n\nclass Person_meeting(models.Model):\n id_person = models.ForeignKey(Person, on_delete=models.CASCADE)\n id_meeting = models.ForeignKey(Meeting, on_delete=models.CASCADE, unique=False)\n", "step-ids": [ 9, 18, 20, 21, 24 ] }
[ 9, 18, 20, 21, 24 ]
# -*- coding: utf-8 *-* import MySQLdb conn = MySQLdb.connect('localhost', 'ABarbara', 'root', '1dawabarbara') # Abro la conexión def crearTabla(query): # Le paso la cadena que realizará el create como parámetro. cursor = conn.cursor() #En un cursor (de la conexión) almaceno lo que quiero enviar a la base de datos. cursor.execute(query) #Ejecuto la orden cursor.close() # Una vez utilizado, cierro mi cursor. def insertarEmpleados(): cursor= conn.cursor() for x in range(2): try: nombre = raw_input('Nombre: ') apellido = raw_input('Apellido: ') sueldoBase = comprobarSueldo(float(raw_input ('Sueldo base: '))) hijos = (int(raw_input('Número de hijos: '))) sueldoFinal = calcularImponible(sueldoBase, hijos) insert = (("INSERT INTO EMPLEADOS VALUES('%s', '%s', '%f', '%d', '%f')" ) % (nombre, apellido, sueldoBase, hijos, sueldoFinal)) cursor.execute(insert) except ValueError: print "Error, tipo de dato incorrecto" except Exception: print "Error" cursor.close() def comprobarSueldo(sueldoBase): if sueldoBase<600: sueldoBase=600 return sueldoBase def calcularImponible(sueldo, hijos): if hijos>0: sueldoFinal= sueldo+((0.05*sueldo)*hijos) else: sueldoFinal= sueldo return sueldoFinal crearTabla("CREATE TABLE EMPLEADOS (nombre varchar(100), apellido varchar(100), sueldo_base Decimal, hijos int, sueldo_final Decimal)") insertarEmpleados() conn.commit() conn.close()
normal
{ "blob_id": "8a2b7376369513ce403a2542fb8c6d5826b2169b", "index": 9949, "step-1": "# -*- coding: utf-8 *-*\nimport MySQLdb \n\nconn = MySQLdb.connect('localhost', 'ABarbara', 'root', '1dawabarbara') # Abro la conexión \n\ndef crearTabla(query): # Le paso la cadena que realizará el create como parámetro.\n\tcursor = conn.cursor() #En un cursor (de la conexión) almaceno lo que quiero enviar a la base de datos.\n\tcursor.execute(query) #Ejecuto la orden\n\tcursor.close() # Una vez utilizado, cierro mi cursor.\n\ndef insertarEmpleados():\n\tcursor= conn.cursor()\n\tfor x in range(2):\n\t\ttry:\n\t\t\tnombre = raw_input('Nombre: ')\n\t\t\tapellido = raw_input('Apellido: ')\n\t\t\tsueldoBase = comprobarSueldo(float(raw_input ('Sueldo base: ')))\n\t\t\thijos = (int(raw_input('Número de hijos: ')))\n\t\t\tsueldoFinal = calcularImponible(sueldoBase, hijos)\n\t\t\tinsert = ((\"INSERT INTO EMPLEADOS VALUES('%s', '%s', '%f', '%d', '%f')\" ) % (nombre, apellido, sueldoBase, hijos, sueldoFinal))\n\n\t\t\tcursor.execute(insert) \n\n\t\texcept ValueError:\n\t\t\tprint \"Error, tipo de dato incorrecto\"\n\t\texcept Exception:\n\t\t\tprint \"Error\"\n\tcursor.close()\n\ndef comprobarSueldo(sueldoBase):\n\tif sueldoBase<600:\n\t\tsueldoBase=600\n\treturn sueldoBase\n\ndef calcularImponible(sueldo, hijos):\n\tif hijos>0:\n\t\tsueldoFinal= sueldo+((0.05*sueldo)*hijos)\n\telse:\n\t\tsueldoFinal= sueldo\n\treturn sueldoFinal\n\ncrearTabla(\"CREATE TABLE EMPLEADOS (nombre varchar(100), apellido varchar(100), sueldo_base Decimal, hijos int, sueldo_final Decimal)\")\ninsertarEmpleados()\nconn.commit() \nconn.close()", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @npm_decorator(3) def scenario(): """ 1. Check each peer's genesis block 2. Generate new blocks on each peer 2.1. 2 blocks on peer #1 2.2. 4 blocks on peer #2 2.3. 2 blocks on peer #3 3. Connect peers 3.1. peer #1 with #2 (1->2) 3.2. peer #1 with #3 (1->(2 and 3)) 4. Generate new blocks 4.1. 3 blocks on peer #1 4.2. 5 blocks on peer #3 5. Stop all peers """ LOCAL_HOST = 'http://127.0.0.1' from . import genesis_block from . import create_block from . import connect_peer from . import stop_server from . import block_crosscheck total_cnt = 0 pass_cnt = 0 try: assert genesis_block.check(LOCAL_HOST, 3001) assert genesis_block.check(LOCAL_HOST, 3002) assert genesis_block.check(LOCAL_HOST, 3003) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/genesis_block') total_cnt += 1 try: assert create_block.addBlocks(LOCAL_HOST, 3001, num=2) assert create_block.check(LOCAL_HOST, 3001, num=2) assert create_block.addBlocks(LOCAL_HOST, 3002, num=4) assert create_block.check(LOCAL_HOST, 3002, num=4) assert create_block.addBlocks(LOCAL_HOST, 3003, num=2) assert create_block.check(LOCAL_HOST, 3003, num=2) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/create_block') total_cnt += 1 try: assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6002' ) assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6003' ) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/connect_peer') total_cnt += 1 try: isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3001, num=3) assert isPass assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=3) assert block_crosscheck.check(LOCAL_HOST, 3003, newBlocks, num=3) isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3003, num=5) assert isPass assert block_crosscheck.check(LOCAL_HOST, 3001, newBlocks, num=5) assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=5) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/block_crosscheck') total_cnt += 1 try: assert stop_server.stopServer(LOCAL_HOST, 3001) assert stop_server.stopServer(LOCAL_HOST, 3002) assert stop_server.stopServer(LOCAL_HOST, 3003) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/stop_server') total_cnt += 1 return pass_cnt, total_cnt <|reserved_special_token_1|> <|reserved_special_token_0|> sys.path.append('..') <|reserved_special_token_0|> @npm_decorator(3) def scenario(): """ 1. Check each peer's genesis block 2. Generate new blocks on each peer 2.1. 2 blocks on peer #1 2.2. 4 blocks on peer #2 2.3. 2 blocks on peer #3 3. Connect peers 3.1. peer #1 with #2 (1->2) 3.2. peer #1 with #3 (1->(2 and 3)) 4. Generate new blocks 4.1. 3 blocks on peer #1 4.2. 5 blocks on peer #3 5. Stop all peers """ LOCAL_HOST = 'http://127.0.0.1' from . import genesis_block from . import create_block from . import connect_peer from . import stop_server from . import block_crosscheck total_cnt = 0 pass_cnt = 0 try: assert genesis_block.check(LOCAL_HOST, 3001) assert genesis_block.check(LOCAL_HOST, 3002) assert genesis_block.check(LOCAL_HOST, 3003) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/genesis_block') total_cnt += 1 try: assert create_block.addBlocks(LOCAL_HOST, 3001, num=2) assert create_block.check(LOCAL_HOST, 3001, num=2) assert create_block.addBlocks(LOCAL_HOST, 3002, num=4) assert create_block.check(LOCAL_HOST, 3002, num=4) assert create_block.addBlocks(LOCAL_HOST, 3003, num=2) assert create_block.check(LOCAL_HOST, 3003, num=2) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/create_block') total_cnt += 1 try: assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6002' ) assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6003' ) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/connect_peer') total_cnt += 1 try: isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3001, num=3) assert isPass assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=3) assert block_crosscheck.check(LOCAL_HOST, 3003, newBlocks, num=3) isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3003, num=5) assert isPass assert block_crosscheck.check(LOCAL_HOST, 3001, newBlocks, num=5) assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=5) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/block_crosscheck') total_cnt += 1 try: assert stop_server.stopServer(LOCAL_HOST, 3001) assert stop_server.stopServer(LOCAL_HOST, 3002) assert stop_server.stopServer(LOCAL_HOST, 3003) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/stop_server') total_cnt += 1 return pass_cnt, total_cnt <|reserved_special_token_1|> import sys sys.path.append('..') from utils import npm_decorator @npm_decorator(3) def scenario(): """ 1. Check each peer's genesis block 2. Generate new blocks on each peer 2.1. 2 blocks on peer #1 2.2. 4 blocks on peer #2 2.3. 2 blocks on peer #3 3. Connect peers 3.1. peer #1 with #2 (1->2) 3.2. peer #1 with #3 (1->(2 and 3)) 4. Generate new blocks 4.1. 3 blocks on peer #1 4.2. 5 blocks on peer #3 5. Stop all peers """ LOCAL_HOST = 'http://127.0.0.1' from . import genesis_block from . import create_block from . import connect_peer from . import stop_server from . import block_crosscheck total_cnt = 0 pass_cnt = 0 try: assert genesis_block.check(LOCAL_HOST, 3001) assert genesis_block.check(LOCAL_HOST, 3002) assert genesis_block.check(LOCAL_HOST, 3003) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/genesis_block') total_cnt += 1 try: assert create_block.addBlocks(LOCAL_HOST, 3001, num=2) assert create_block.check(LOCAL_HOST, 3001, num=2) assert create_block.addBlocks(LOCAL_HOST, 3002, num=4) assert create_block.check(LOCAL_HOST, 3002, num=4) assert create_block.addBlocks(LOCAL_HOST, 3003, num=2) assert create_block.check(LOCAL_HOST, 3003, num=2) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/create_block') total_cnt += 1 try: assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6002' ) assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6003' ) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/connect_peer') total_cnt += 1 try: isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3001, num=3) assert isPass assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=3) assert block_crosscheck.check(LOCAL_HOST, 3003, newBlocks, num=3) isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3003, num=5) assert isPass assert block_crosscheck.check(LOCAL_HOST, 3001, newBlocks, num=5) assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=5) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/block_crosscheck') total_cnt += 1 try: assert stop_server.stopServer(LOCAL_HOST, 3001) assert stop_server.stopServer(LOCAL_HOST, 3002) assert stop_server.stopServer(LOCAL_HOST, 3003) print('pass', end=' ') pass_cnt += 1 except: print('FAIL', end=' ') finally: print('test1/stop_server') total_cnt += 1 return pass_cnt, total_cnt <|reserved_special_token_1|> import sys sys.path.append("..") # Adds higher directory to python modules path. from utils import npm_decorator # num_node = 3 @ npm_decorator(3) def scenario(): """ 1. Check each peer's genesis block 2. Generate new blocks on each peer 2.1. 2 blocks on peer #1 2.2. 4 blocks on peer #2 2.3. 2 blocks on peer #3 3. Connect peers 3.1. peer #1 with #2 (1->2) 3.2. peer #1 with #3 (1->(2 and 3)) 4. Generate new blocks 4.1. 3 blocks on peer #1 4.2. 5 blocks on peer #3 5. Stop all peers """ LOCAL_HOST = "http://127.0.0.1" # import functions from . import genesis_block from . import create_block from . import connect_peer from . import stop_server from . import block_crosscheck total_cnt = 0 pass_cnt = 0 # 1. Check each peer's genesis block try: assert genesis_block.check(LOCAL_HOST, 3001) assert genesis_block.check(LOCAL_HOST, 3002) assert genesis_block.check(LOCAL_HOST, 3003) print("pass", end=' ') pass_cnt += 1 except: print("FAIL", end=' ') finally: print("test1/genesis_block") total_cnt += 1 # 2. Generate new blocks # 2.1. 2 blocks on peer #1 # 2.2. 4 blocks on peer #2 # 2.3. 2 blocks on peer #3 try: assert create_block.addBlocks(LOCAL_HOST, 3001, num=2) assert create_block.check(LOCAL_HOST, 3001, num=2) assert create_block.addBlocks(LOCAL_HOST, 3002, num=4) assert create_block.check(LOCAL_HOST, 3002, num=4) assert create_block.addBlocks(LOCAL_HOST, 3003, num=2) assert create_block.check(LOCAL_HOST, 3003, num=2) print("pass", end=' ') pass_cnt += 1 except: print("FAIL", end=' ') finally: print("test1/create_block") total_cnt += 1 # 3. Connect peers # 3.1. peer #1 with #2 (1->2) # 3.2. peer #1 with #3 (1->(2 and 3)) try: assert connect_peer.connectPeer(LOCAL_HOST, 3001, "ws://127.0.0.1:6002") assert connect_peer.connectPeer(LOCAL_HOST, 3001, "ws://127.0.0.1:6003") print("pass", end=' ') pass_cnt += 1 except: print("FAIL", end=' ') finally: print("test1/connect_peer") total_cnt += 1 # 4. Generate new blocks # 4.1. 3 blocks on peer #1 # 4.2. 5 blocks on peer #3 try: isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3001, num=3) assert isPass assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=3) assert block_crosscheck.check(LOCAL_HOST, 3003, newBlocks, num=3) isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3003, num=5) assert isPass assert block_crosscheck.check(LOCAL_HOST, 3001, newBlocks, num=5) assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=5) print("pass", end=' ') pass_cnt += 1 except: print("FAIL", end=' ') finally: print("test1/block_crosscheck") total_cnt += 1 # 5. Stop all peers try: assert stop_server.stopServer(LOCAL_HOST, 3001) assert stop_server.stopServer(LOCAL_HOST, 3002) assert stop_server.stopServer(LOCAL_HOST, 3003) print("pass", end=' ') pass_cnt += 1 except: print("FAIL", end=' ') finally: print("test1/stop_server") total_cnt += 1 # return pass_cnt_per_test and total_cnt_per_test return pass_cnt, total_cnt
flexible
{ "blob_id": "91cf1f4cf34ac9723be4863e81149c703adca27a", "index": 3583, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\n@npm_decorator(3)\ndef scenario():\n \"\"\"\n 1. Check each peer's genesis block\n 2. Generate new blocks on each peer\n 2.1. 2 blocks on peer #1\n 2.2. 4 blocks on peer #2\n 2.3. 2 blocks on peer #3\n 3. Connect peers\n 3.1. peer #1 with #2 (1->2)\n 3.2. peer #1 with #3 (1->(2 and 3))\n 4. Generate new blocks\n 4.1. 3 blocks on peer #1\n 4.2. 5 blocks on peer #3\n 5. Stop all peers\n \"\"\"\n LOCAL_HOST = 'http://127.0.0.1'\n from . import genesis_block\n from . import create_block\n from . import connect_peer\n from . import stop_server\n from . import block_crosscheck\n total_cnt = 0\n pass_cnt = 0\n try:\n assert genesis_block.check(LOCAL_HOST, 3001)\n assert genesis_block.check(LOCAL_HOST, 3002)\n assert genesis_block.check(LOCAL_HOST, 3003)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/genesis_block')\n total_cnt += 1\n try:\n assert create_block.addBlocks(LOCAL_HOST, 3001, num=2)\n assert create_block.check(LOCAL_HOST, 3001, num=2)\n assert create_block.addBlocks(LOCAL_HOST, 3002, num=4)\n assert create_block.check(LOCAL_HOST, 3002, num=4)\n assert create_block.addBlocks(LOCAL_HOST, 3003, num=2)\n assert create_block.check(LOCAL_HOST, 3003, num=2)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/create_block')\n total_cnt += 1\n try:\n assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6002'\n )\n assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6003'\n )\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/connect_peer')\n total_cnt += 1\n try:\n isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3001, num=3)\n assert isPass\n assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=3)\n assert block_crosscheck.check(LOCAL_HOST, 3003, newBlocks, num=3)\n isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3003, num=5)\n assert isPass\n assert block_crosscheck.check(LOCAL_HOST, 3001, newBlocks, num=5)\n assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=5)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/block_crosscheck')\n total_cnt += 1\n try:\n assert stop_server.stopServer(LOCAL_HOST, 3001)\n assert stop_server.stopServer(LOCAL_HOST, 3002)\n assert stop_server.stopServer(LOCAL_HOST, 3003)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/stop_server')\n total_cnt += 1\n return pass_cnt, total_cnt\n", "step-3": "<mask token>\nsys.path.append('..')\n<mask token>\n\n\n@npm_decorator(3)\ndef scenario():\n \"\"\"\n 1. Check each peer's genesis block\n 2. Generate new blocks on each peer\n 2.1. 2 blocks on peer #1\n 2.2. 4 blocks on peer #2\n 2.3. 2 blocks on peer #3\n 3. Connect peers\n 3.1. peer #1 with #2 (1->2)\n 3.2. peer #1 with #3 (1->(2 and 3))\n 4. Generate new blocks\n 4.1. 3 blocks on peer #1\n 4.2. 5 blocks on peer #3\n 5. Stop all peers\n \"\"\"\n LOCAL_HOST = 'http://127.0.0.1'\n from . import genesis_block\n from . import create_block\n from . import connect_peer\n from . import stop_server\n from . import block_crosscheck\n total_cnt = 0\n pass_cnt = 0\n try:\n assert genesis_block.check(LOCAL_HOST, 3001)\n assert genesis_block.check(LOCAL_HOST, 3002)\n assert genesis_block.check(LOCAL_HOST, 3003)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/genesis_block')\n total_cnt += 1\n try:\n assert create_block.addBlocks(LOCAL_HOST, 3001, num=2)\n assert create_block.check(LOCAL_HOST, 3001, num=2)\n assert create_block.addBlocks(LOCAL_HOST, 3002, num=4)\n assert create_block.check(LOCAL_HOST, 3002, num=4)\n assert create_block.addBlocks(LOCAL_HOST, 3003, num=2)\n assert create_block.check(LOCAL_HOST, 3003, num=2)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/create_block')\n total_cnt += 1\n try:\n assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6002'\n )\n assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6003'\n )\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/connect_peer')\n total_cnt += 1\n try:\n isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3001, num=3)\n assert isPass\n assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=3)\n assert block_crosscheck.check(LOCAL_HOST, 3003, newBlocks, num=3)\n isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3003, num=5)\n assert isPass\n assert block_crosscheck.check(LOCAL_HOST, 3001, newBlocks, num=5)\n assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=5)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/block_crosscheck')\n total_cnt += 1\n try:\n assert stop_server.stopServer(LOCAL_HOST, 3001)\n assert stop_server.stopServer(LOCAL_HOST, 3002)\n assert stop_server.stopServer(LOCAL_HOST, 3003)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/stop_server')\n total_cnt += 1\n return pass_cnt, total_cnt\n", "step-4": "import sys\nsys.path.append('..')\nfrom utils import npm_decorator\n\n\n@npm_decorator(3)\ndef scenario():\n \"\"\"\n 1. Check each peer's genesis block\n 2. Generate new blocks on each peer\n 2.1. 2 blocks on peer #1\n 2.2. 4 blocks on peer #2\n 2.3. 2 blocks on peer #3\n 3. Connect peers\n 3.1. peer #1 with #2 (1->2)\n 3.2. peer #1 with #3 (1->(2 and 3))\n 4. Generate new blocks\n 4.1. 3 blocks on peer #1\n 4.2. 5 blocks on peer #3\n 5. Stop all peers\n \"\"\"\n LOCAL_HOST = 'http://127.0.0.1'\n from . import genesis_block\n from . import create_block\n from . import connect_peer\n from . import stop_server\n from . import block_crosscheck\n total_cnt = 0\n pass_cnt = 0\n try:\n assert genesis_block.check(LOCAL_HOST, 3001)\n assert genesis_block.check(LOCAL_HOST, 3002)\n assert genesis_block.check(LOCAL_HOST, 3003)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/genesis_block')\n total_cnt += 1\n try:\n assert create_block.addBlocks(LOCAL_HOST, 3001, num=2)\n assert create_block.check(LOCAL_HOST, 3001, num=2)\n assert create_block.addBlocks(LOCAL_HOST, 3002, num=4)\n assert create_block.check(LOCAL_HOST, 3002, num=4)\n assert create_block.addBlocks(LOCAL_HOST, 3003, num=2)\n assert create_block.check(LOCAL_HOST, 3003, num=2)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/create_block')\n total_cnt += 1\n try:\n assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6002'\n )\n assert connect_peer.connectPeer(LOCAL_HOST, 3001, 'ws://127.0.0.1:6003'\n )\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/connect_peer')\n total_cnt += 1\n try:\n isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3001, num=3)\n assert isPass\n assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=3)\n assert block_crosscheck.check(LOCAL_HOST, 3003, newBlocks, num=3)\n isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3003, num=5)\n assert isPass\n assert block_crosscheck.check(LOCAL_HOST, 3001, newBlocks, num=5)\n assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=5)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/block_crosscheck')\n total_cnt += 1\n try:\n assert stop_server.stopServer(LOCAL_HOST, 3001)\n assert stop_server.stopServer(LOCAL_HOST, 3002)\n assert stop_server.stopServer(LOCAL_HOST, 3003)\n print('pass', end=' ')\n pass_cnt += 1\n except:\n print('FAIL', end=' ')\n finally:\n print('test1/stop_server')\n total_cnt += 1\n return pass_cnt, total_cnt\n", "step-5": "import sys\nsys.path.append(\"..\") # Adds higher directory to python modules path.\n\nfrom utils import npm_decorator\n\n\n# num_node = 3\n@ npm_decorator(3)\ndef scenario():\n \"\"\"\n 1. Check each peer's genesis block\n 2. Generate new blocks on each peer\n 2.1. 2 blocks on peer #1\n 2.2. 4 blocks on peer #2\n 2.3. 2 blocks on peer #3\n 3. Connect peers\n 3.1. peer #1 with #2 (1->2)\n 3.2. peer #1 with #3 (1->(2 and 3))\n 4. Generate new blocks\n 4.1. 3 blocks on peer #1\n 4.2. 5 blocks on peer #3\n 5. Stop all peers\n \"\"\"\n LOCAL_HOST = \"http://127.0.0.1\"\n\n # import functions\n from . import genesis_block\n from . import create_block\n from . import connect_peer\n from . import stop_server\n from . import block_crosscheck\n\n total_cnt = 0\n pass_cnt = 0\n\n # 1. Check each peer's genesis block\n try:\n assert genesis_block.check(LOCAL_HOST, 3001)\n assert genesis_block.check(LOCAL_HOST, 3002)\n assert genesis_block.check(LOCAL_HOST, 3003)\n\n print(\"pass\", end=' ')\n pass_cnt += 1\n\n except:\n print(\"FAIL\", end=' ')\n\n finally:\n print(\"test1/genesis_block\")\n total_cnt += 1\n\n # 2. Generate new blocks\n # 2.1. 2 blocks on peer #1\n # 2.2. 4 blocks on peer #2\n # 2.3. 2 blocks on peer #3\n try:\n assert create_block.addBlocks(LOCAL_HOST, 3001, num=2)\n assert create_block.check(LOCAL_HOST, 3001, num=2)\n\n assert create_block.addBlocks(LOCAL_HOST, 3002, num=4)\n assert create_block.check(LOCAL_HOST, 3002, num=4)\n\n assert create_block.addBlocks(LOCAL_HOST, 3003, num=2)\n assert create_block.check(LOCAL_HOST, 3003, num=2)\n\n print(\"pass\", end=' ')\n pass_cnt += 1\n\n except:\n print(\"FAIL\", end=' ')\n\n finally:\n print(\"test1/create_block\")\n total_cnt += 1\n\n # 3. Connect peers\n # 3.1. peer #1 with #2 (1->2)\n # 3.2. peer #1 with #3 (1->(2 and 3))\n try:\n assert connect_peer.connectPeer(LOCAL_HOST, 3001, \"ws://127.0.0.1:6002\")\n assert connect_peer.connectPeer(LOCAL_HOST, 3001, \"ws://127.0.0.1:6003\")\n\n print(\"pass\", end=' ')\n pass_cnt += 1\n\n except:\n print(\"FAIL\", end=' ')\n\n finally:\n print(\"test1/connect_peer\")\n total_cnt += 1\n\n # 4. Generate new blocks\n # 4.1. 3 blocks on peer #1\n # 4.2. 5 blocks on peer #3\n try:\n isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3001, num=3)\n assert isPass\n assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=3)\n assert block_crosscheck.check(LOCAL_HOST, 3003, newBlocks, num=3)\n\n isPass, newBlocks = block_crosscheck.addBlocks(LOCAL_HOST, 3003, num=5)\n assert isPass\n assert block_crosscheck.check(LOCAL_HOST, 3001, newBlocks, num=5)\n assert block_crosscheck.check(LOCAL_HOST, 3002, newBlocks, num=5)\n\n print(\"pass\", end=' ')\n pass_cnt += 1\n\n except:\n print(\"FAIL\", end=' ')\n\n finally:\n print(\"test1/block_crosscheck\")\n total_cnt += 1\n\n # 5. Stop all peers\n try:\n assert stop_server.stopServer(LOCAL_HOST, 3001)\n assert stop_server.stopServer(LOCAL_HOST, 3002)\n assert stop_server.stopServer(LOCAL_HOST, 3003)\n\n print(\"pass\", end=' ')\n pass_cnt += 1\n\n except:\n print(\"FAIL\", end=' ')\n\n finally:\n print(\"test1/stop_server\")\n total_cnt += 1\n\n # return pass_cnt_per_test and total_cnt_per_test\n return pass_cnt, total_cnt\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import sys sys.stdin = open('retire.txt', 'r') def counseling(pay, row): global max_sum if row == N - 1: if arr[row][0] == 1: pay += arr[row][1] max_sum = max(pay, max_sum) return if row == N: max_sum = max(pay, max_sum) return if row > N - 1: return counseling(pay + arr[row][1], row + arr[row][0]) counseling(pay, row + 1) N = int(input()) arr = [list(map(int, input().split())) for _ in range(N)] # visit = [0] * N max_sum = 0 counseling(0, 0) print(max_sum)
normal
{ "blob_id": "9db2377f15aaf28373959dad88c6ec7b6dacffd2", "index": 9512, "step-1": "<mask token>\n\n\ndef counseling(pay, row):\n global max_sum\n if row == N - 1:\n if arr[row][0] == 1:\n pay += arr[row][1]\n max_sum = max(pay, max_sum)\n return\n if row == N:\n max_sum = max(pay, max_sum)\n return\n if row > N - 1:\n return\n counseling(pay + arr[row][1], row + arr[row][0])\n counseling(pay, row + 1)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef counseling(pay, row):\n global max_sum\n if row == N - 1:\n if arr[row][0] == 1:\n pay += arr[row][1]\n max_sum = max(pay, max_sum)\n return\n if row == N:\n max_sum = max(pay, max_sum)\n return\n if row > N - 1:\n return\n counseling(pay + arr[row][1], row + arr[row][0])\n counseling(pay, row + 1)\n\n\n<mask token>\ncounseling(0, 0)\nprint(max_sum)\n", "step-3": "<mask token>\nsys.stdin = open('retire.txt', 'r')\n\n\ndef counseling(pay, row):\n global max_sum\n if row == N - 1:\n if arr[row][0] == 1:\n pay += arr[row][1]\n max_sum = max(pay, max_sum)\n return\n if row == N:\n max_sum = max(pay, max_sum)\n return\n if row > N - 1:\n return\n counseling(pay + arr[row][1], row + arr[row][0])\n counseling(pay, row + 1)\n\n\nN = int(input())\narr = [list(map(int, input().split())) for _ in range(N)]\nmax_sum = 0\ncounseling(0, 0)\nprint(max_sum)\n", "step-4": "import sys\nsys.stdin = open('retire.txt', 'r')\n\n\ndef counseling(pay, row):\n global max_sum\n if row == N - 1:\n if arr[row][0] == 1:\n pay += arr[row][1]\n max_sum = max(pay, max_sum)\n return\n if row == N:\n max_sum = max(pay, max_sum)\n return\n if row > N - 1:\n return\n counseling(pay + arr[row][1], row + arr[row][0])\n counseling(pay, row + 1)\n\n\nN = int(input())\narr = [list(map(int, input().split())) for _ in range(N)]\nmax_sum = 0\ncounseling(0, 0)\nprint(max_sum)\n", "step-5": "import sys\nsys.stdin = open('retire.txt', 'r')\n\ndef counseling(pay, row):\n global max_sum\n if row == N - 1:\n if arr[row][0] == 1:\n pay += arr[row][1]\n max_sum = max(pay, max_sum)\n return\n if row == N:\n max_sum = max(pay, max_sum)\n return\n if row > N - 1:\n return\n counseling(pay + arr[row][1], row + arr[row][0])\n counseling(pay, row + 1)\n\nN = int(input())\narr = [list(map(int, input().split())) for _ in range(N)]\n# visit = [0] * N\nmax_sum = 0\ncounseling(0, 0)\nprint(max_sum)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> class TaskSolver: <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def task_calculate_cosin_similarity(self, word1, word2, print_to_screen =True): sim = 0 if word1 in self.W2V_DICT and word2 in self.W2V_DICT: sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self. W2V_DICT[word2])) / 2 if print_to_screen: print("Độ tương đồng giữa '{}' và '{}' là: {}".format(word1, word2, sim)) return sim def test_with_visim_400_data_set(self): visim_400_df = pd.read_csv(os.path.abspath( './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\t') rs, sim1_arr, sim2_arr = [], [], [] for index, row in visim_400_df.iterrows(): word_1, word_2 = row['Word1'], row['Word2'] sim_1, sim_2 = row['Sim1'], row['Sim2'] if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT: sim = self.task_calculate_cosin_similarity(word_1, word_2, True ) rs.append(sim) sim1_arr.append(sim_1) sim2_arr.append(sim_2) print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr)) print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr)) def task_k_nearest_words(self, k, word): k = int(k) if word not in self.W2V_DICT: print("Word '{}' not in vocab".format(word)) return sims = [] for key in self.W2V_DICT: if key != word: sims.append({'key': key, 'sim': self. task_calculate_cosin_similarity(key, word, False)}) k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1] print("{} từ tương đồng nhất với từ '{}' là:".format(k, word)) for w in k_list: print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get ('sim'))) return k_list def task_synonym_antonym_classification(self): self.prepare_data() self.train_synonym_antonym_classification() self.test_synonym_antonym_classification() <|reserved_special_token_0|> <|reserved_special_token_0|> def train_synonym_antonym_classification(self): X_train, Y_train = pickle.load(open( './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+')) unique, counts = np.unique(Y_train, return_counts=True) label_count = dict(zip(unique, counts)) clf = MLPClassifier() clf.fit(X_train, Y_train) pickle.dump(clf, open('./main/model/svm.model', 'wb+')) return clf def prepare_data(self): X, Y = [], [] for file in [ './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt', './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']: f = open(file, 'r', encoding='utf8') for index, line in enumerate(f): line_arr = line.split() if len(line_arr) < 2: continue word1, word2 = line_arr[0], line_arr[1] if word1 in self.W2V_DICT and word2 in self.W2V_DICT: vec = self.gen_vec_for_synonym_antonym_pair(word1, word2) X.append(vec) if os.path.basename(f.name) == 'Antonym_vietnamese.txt': Y.append(-1) else: Y.append(1) X, Y = np.array(X), np.array(Y) pickle.dump((X.astype(np.float64), Y), open( './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+')) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TaskSolver: <|reserved_special_token_0|> def __init__(self): pass <|reserved_special_token_0|> def task_calculate_cosin_similarity(self, word1, word2, print_to_screen =True): sim = 0 if word1 in self.W2V_DICT and word2 in self.W2V_DICT: sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self. W2V_DICT[word2])) / 2 if print_to_screen: print("Độ tương đồng giữa '{}' và '{}' là: {}".format(word1, word2, sim)) return sim def test_with_visim_400_data_set(self): visim_400_df = pd.read_csv(os.path.abspath( './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\t') rs, sim1_arr, sim2_arr = [], [], [] for index, row in visim_400_df.iterrows(): word_1, word_2 = row['Word1'], row['Word2'] sim_1, sim_2 = row['Sim1'], row['Sim2'] if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT: sim = self.task_calculate_cosin_similarity(word_1, word_2, True ) rs.append(sim) sim1_arr.append(sim_1) sim2_arr.append(sim_2) print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr)) print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr)) def task_k_nearest_words(self, k, word): k = int(k) if word not in self.W2V_DICT: print("Word '{}' not in vocab".format(word)) return sims = [] for key in self.W2V_DICT: if key != word: sims.append({'key': key, 'sim': self. task_calculate_cosin_similarity(key, word, False)}) k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1] print("{} từ tương đồng nhất với từ '{}' là:".format(k, word)) for w in k_list: print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get ('sim'))) return k_list def task_synonym_antonym_classification(self): self.prepare_data() self.train_synonym_antonym_classification() self.test_synonym_antonym_classification() def test_synonym_antonym_classification(self): clf = pickle.load(open('./main/model/svm.model', 'rb')) X_test, Y_test = [], [] for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt', './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt', './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']: f = open(file, 'r', encoding='utf8') for index, line in enumerate(f): line_arr = line.split() if index == 0: continue word1, word2, relation = line_arr[0], line_arr[1], line_arr[2] if word1 in self.W2V_DICT and word2 in self.W2V_DICT: vec = self.gen_vec_for_synonym_antonym_pair(word1, word2) X_test.append(vec) if relation == 'SYN': Y_test.append(1) elif relation == 'ANT': Y_test.append(-1) X_test = X_test pred = clf.predict(X_test) print('Test date: {}'.format(date.today())) print('Precision: {}'.format(precision_score(Y_test, pred))) print('Recall: {}'.format(recall_score(Y_test, pred))) print('F1: {}'.format(f1_score(Y_test, pred))) log = ( """ Test date: {} Precision: {} Recall: {} F1: {} ---------------------------------------- """ .format(date.today(), precision_score(Y_test, pred), recall_score(Y_test, pred), f1_score(Y_test, pred))) log_f = open('./main/log', 'a+') log_f.write(log) log_f.close() def gen_vec_for_synonym_antonym_pair(self, word1, word2): np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self. W2V_DICT[word2]) return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 * np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0) def train_synonym_antonym_classification(self): X_train, Y_train = pickle.load(open( './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+')) unique, counts = np.unique(Y_train, return_counts=True) label_count = dict(zip(unique, counts)) clf = MLPClassifier() clf.fit(X_train, Y_train) pickle.dump(clf, open('./main/model/svm.model', 'wb+')) return clf def prepare_data(self): X, Y = [], [] for file in [ './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt', './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']: f = open(file, 'r', encoding='utf8') for index, line in enumerate(f): line_arr = line.split() if len(line_arr) < 2: continue word1, word2 = line_arr[0], line_arr[1] if word1 in self.W2V_DICT and word2 in self.W2V_DICT: vec = self.gen_vec_for_synonym_antonym_pair(word1, word2) X.append(vec) if os.path.basename(f.name) == 'Antonym_vietnamese.txt': Y.append(-1) else: Y.append(1) X, Y = np.array(X), np.array(Y) pickle.dump((X.astype(np.float64), Y), open( './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+')) def gen_w2v_dict(self): with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f: if f.read(1): f.seek(0) self.W2V_DICT = json.load(f) if not self.W2V_DICT: with open('./Word-Similarity/word2vec/W2V_150.txt', 'r', encoding='utf8') as f: for index, line in enumerate(f): line_arr = line.split() if index > 1: self.W2V_DICT.update({line_arr[0]: np.array( line_arr[1:]).astype(float).tolist()}) f = open('./main/dataset/w2v/w2v-dict.json', 'w+') f.write(json.dumps(self.W2V_DICT)) f.close() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TaskSolver: <|reserved_special_token_0|> def __init__(self): pass def solve(self, task_name, **kwargs): self.gen_w2v_dict() if task_name == 'k-nearest-words': self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word')) elif task_name == 'synonym-antonym-classification': self.task_synonym_antonym_classification() elif task_name == 'test-cosin-similarity-with-visim-400-dataset': self.test_with_visim_400_data_set() def task_calculate_cosin_similarity(self, word1, word2, print_to_screen =True): sim = 0 if word1 in self.W2V_DICT and word2 in self.W2V_DICT: sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self. W2V_DICT[word2])) / 2 if print_to_screen: print("Độ tương đồng giữa '{}' và '{}' là: {}".format(word1, word2, sim)) return sim def test_with_visim_400_data_set(self): visim_400_df = pd.read_csv(os.path.abspath( './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\t') rs, sim1_arr, sim2_arr = [], [], [] for index, row in visim_400_df.iterrows(): word_1, word_2 = row['Word1'], row['Word2'] sim_1, sim_2 = row['Sim1'], row['Sim2'] if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT: sim = self.task_calculate_cosin_similarity(word_1, word_2, True ) rs.append(sim) sim1_arr.append(sim_1) sim2_arr.append(sim_2) print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr)) print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr)) def task_k_nearest_words(self, k, word): k = int(k) if word not in self.W2V_DICT: print("Word '{}' not in vocab".format(word)) return sims = [] for key in self.W2V_DICT: if key != word: sims.append({'key': key, 'sim': self. task_calculate_cosin_similarity(key, word, False)}) k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1] print("{} từ tương đồng nhất với từ '{}' là:".format(k, word)) for w in k_list: print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get ('sim'))) return k_list def task_synonym_antonym_classification(self): self.prepare_data() self.train_synonym_antonym_classification() self.test_synonym_antonym_classification() def test_synonym_antonym_classification(self): clf = pickle.load(open('./main/model/svm.model', 'rb')) X_test, Y_test = [], [] for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt', './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt', './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']: f = open(file, 'r', encoding='utf8') for index, line in enumerate(f): line_arr = line.split() if index == 0: continue word1, word2, relation = line_arr[0], line_arr[1], line_arr[2] if word1 in self.W2V_DICT and word2 in self.W2V_DICT: vec = self.gen_vec_for_synonym_antonym_pair(word1, word2) X_test.append(vec) if relation == 'SYN': Y_test.append(1) elif relation == 'ANT': Y_test.append(-1) X_test = X_test pred = clf.predict(X_test) print('Test date: {}'.format(date.today())) print('Precision: {}'.format(precision_score(Y_test, pred))) print('Recall: {}'.format(recall_score(Y_test, pred))) print('F1: {}'.format(f1_score(Y_test, pred))) log = ( """ Test date: {} Precision: {} Recall: {} F1: {} ---------------------------------------- """ .format(date.today(), precision_score(Y_test, pred), recall_score(Y_test, pred), f1_score(Y_test, pred))) log_f = open('./main/log', 'a+') log_f.write(log) log_f.close() def gen_vec_for_synonym_antonym_pair(self, word1, word2): np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self. W2V_DICT[word2]) return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 * np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0) def train_synonym_antonym_classification(self): X_train, Y_train = pickle.load(open( './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+')) unique, counts = np.unique(Y_train, return_counts=True) label_count = dict(zip(unique, counts)) clf = MLPClassifier() clf.fit(X_train, Y_train) pickle.dump(clf, open('./main/model/svm.model', 'wb+')) return clf def prepare_data(self): X, Y = [], [] for file in [ './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt', './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']: f = open(file, 'r', encoding='utf8') for index, line in enumerate(f): line_arr = line.split() if len(line_arr) < 2: continue word1, word2 = line_arr[0], line_arr[1] if word1 in self.W2V_DICT and word2 in self.W2V_DICT: vec = self.gen_vec_for_synonym_antonym_pair(word1, word2) X.append(vec) if os.path.basename(f.name) == 'Antonym_vietnamese.txt': Y.append(-1) else: Y.append(1) X, Y = np.array(X), np.array(Y) pickle.dump((X.astype(np.float64), Y), open( './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+')) def gen_w2v_dict(self): with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f: if f.read(1): f.seek(0) self.W2V_DICT = json.load(f) if not self.W2V_DICT: with open('./Word-Similarity/word2vec/W2V_150.txt', 'r', encoding='utf8') as f: for index, line in enumerate(f): line_arr = line.split() if index > 1: self.W2V_DICT.update({line_arr[0]: np.array( line_arr[1:]).astype(float).tolist()}) f = open('./main/dataset/w2v/w2v-dict.json', 'w+') f.write(json.dumps(self.W2V_DICT)) f.close() <|reserved_special_token_0|> <|reserved_special_token_1|> import os, pickle, json, ast import pandas as pd from scipy import spatial import numpy as np from scipy import stats from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.svm import LinearSVC from sklearn.metrics import precision_score, recall_score, f1_score from datetime import date from sklearn.neural_network import MLPClassifier class TaskSolver: W2V_DICT = dict() def __init__(self): pass def solve(self, task_name, **kwargs): self.gen_w2v_dict() if task_name == 'k-nearest-words': self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word')) elif task_name == 'synonym-antonym-classification': self.task_synonym_antonym_classification() elif task_name == 'test-cosin-similarity-with-visim-400-dataset': self.test_with_visim_400_data_set() def task_calculate_cosin_similarity(self, word1, word2, print_to_screen =True): sim = 0 if word1 in self.W2V_DICT and word2 in self.W2V_DICT: sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self. W2V_DICT[word2])) / 2 if print_to_screen: print("Độ tương đồng giữa '{}' và '{}' là: {}".format(word1, word2, sim)) return sim def test_with_visim_400_data_set(self): visim_400_df = pd.read_csv(os.path.abspath( './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\t') rs, sim1_arr, sim2_arr = [], [], [] for index, row in visim_400_df.iterrows(): word_1, word_2 = row['Word1'], row['Word2'] sim_1, sim_2 = row['Sim1'], row['Sim2'] if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT: sim = self.task_calculate_cosin_similarity(word_1, word_2, True ) rs.append(sim) sim1_arr.append(sim_1) sim2_arr.append(sim_2) print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr)) print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr)) def task_k_nearest_words(self, k, word): k = int(k) if word not in self.W2V_DICT: print("Word '{}' not in vocab".format(word)) return sims = [] for key in self.W2V_DICT: if key != word: sims.append({'key': key, 'sim': self. task_calculate_cosin_similarity(key, word, False)}) k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1] print("{} từ tương đồng nhất với từ '{}' là:".format(k, word)) for w in k_list: print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get ('sim'))) return k_list def task_synonym_antonym_classification(self): self.prepare_data() self.train_synonym_antonym_classification() self.test_synonym_antonym_classification() def test_synonym_antonym_classification(self): clf = pickle.load(open('./main/model/svm.model', 'rb')) X_test, Y_test = [], [] for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt', './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt', './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']: f = open(file, 'r', encoding='utf8') for index, line in enumerate(f): line_arr = line.split() if index == 0: continue word1, word2, relation = line_arr[0], line_arr[1], line_arr[2] if word1 in self.W2V_DICT and word2 in self.W2V_DICT: vec = self.gen_vec_for_synonym_antonym_pair(word1, word2) X_test.append(vec) if relation == 'SYN': Y_test.append(1) elif relation == 'ANT': Y_test.append(-1) X_test = X_test pred = clf.predict(X_test) print('Test date: {}'.format(date.today())) print('Precision: {}'.format(precision_score(Y_test, pred))) print('Recall: {}'.format(recall_score(Y_test, pred))) print('F1: {}'.format(f1_score(Y_test, pred))) log = ( """ Test date: {} Precision: {} Recall: {} F1: {} ---------------------------------------- """ .format(date.today(), precision_score(Y_test, pred), recall_score(Y_test, pred), f1_score(Y_test, pred))) log_f = open('./main/log', 'a+') log_f.write(log) log_f.close() def gen_vec_for_synonym_antonym_pair(self, word1, word2): np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self. W2V_DICT[word2]) return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 * np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0) def train_synonym_antonym_classification(self): X_train, Y_train = pickle.load(open( './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+')) unique, counts = np.unique(Y_train, return_counts=True) label_count = dict(zip(unique, counts)) clf = MLPClassifier() clf.fit(X_train, Y_train) pickle.dump(clf, open('./main/model/svm.model', 'wb+')) return clf def prepare_data(self): X, Y = [], [] for file in [ './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt', './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']: f = open(file, 'r', encoding='utf8') for index, line in enumerate(f): line_arr = line.split() if len(line_arr) < 2: continue word1, word2 = line_arr[0], line_arr[1] if word1 in self.W2V_DICT and word2 in self.W2V_DICT: vec = self.gen_vec_for_synonym_antonym_pair(word1, word2) X.append(vec) if os.path.basename(f.name) == 'Antonym_vietnamese.txt': Y.append(-1) else: Y.append(1) X, Y = np.array(X), np.array(Y) pickle.dump((X.astype(np.float64), Y), open( './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+')) def gen_w2v_dict(self): with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f: if f.read(1): f.seek(0) self.W2V_DICT = json.load(f) if not self.W2V_DICT: with open('./Word-Similarity/word2vec/W2V_150.txt', 'r', encoding='utf8') as f: for index, line in enumerate(f): line_arr = line.split() if index > 1: self.W2V_DICT.update({line_arr[0]: np.array( line_arr[1:]).astype(float).tolist()}) f = open('./main/dataset/w2v/w2v-dict.json', 'w+') f.write(json.dumps(self.W2V_DICT)) f.close() if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='Helper') parser.add_argument('--task', required=True, metavar='path', help= """ Task name: 0 => Cosin Similarity 1 => Test Cosine Similarity with Visim-400 dataset 2 => K Nearest Words 3 => Synonym Antonym Classification """ ) parser.add_argument('--word', metavar='path', help= "Target word used in 'K Nearest Words' task") parser.add_argument('--k', metavar='path', help= "Number of 'Nearest Words' used in 'K Nearest Words' task") parser.add_argument('--word1', metavar='path', help= "Source word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task" ) parser.add_argument('--word2', metavar='path', help= "Target word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task" ) args = parser.parse_args() task = args.task k = args.k word = args.word word1 = args.word1 word2 = args.word2 switcher = {'0': 'calculate-cosin-similarity', '1': 'test-cosin-similarity-with-visim-400-dataset', '2': 'k-nearest-words', '3': 'synonym-antonym-classification', '4': 'predict-synonym-antonym'} task_name = switcher.get(task, 'Invalid task') task_solver = TaskSolver() task_solver.solve(task_name, k=k, word=word, word1=word1, word2=word2) <|reserved_special_token_1|> #!/usr/bin/env python # -*- coding: utf-8 -*- import os, pickle, json, ast import pandas as pd from scipy import spatial import numpy as np from scipy import stats from sklearn.model_selection import train_test_split from sklearn import preprocessing from sklearn.svm import LinearSVC from sklearn.metrics import precision_score, recall_score, f1_score from datetime import date from sklearn.neural_network import MLPClassifier class TaskSolver: W2V_DICT = dict() def __init__(self): pass def solve(self, task_name, **kwargs): self.gen_w2v_dict() if task_name == 'k-nearest-words': self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word')) elif task_name == 'synonym-antonym-classification': self.task_synonym_antonym_classification() elif task_name == 'test-cosin-similarity-with-visim-400-dataset': self.test_with_visim_400_data_set() def task_calculate_cosin_similarity(self, word1, word2, print_to_screen=True): sim = 0 if word1 in self.W2V_DICT and word2 in self.W2V_DICT: sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.W2V_DICT[word2])) / 2 if (print_to_screen): print("Độ tương đồng giữa '{}' và '{}' là: {}".format(word1, word2, sim)) return sim def test_with_visim_400_data_set(self): visim_400_df = pd.read_csv( os.path.abspath('./Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep="\t") rs, sim1_arr, sim2_arr = [], [], [] for index, row in visim_400_df.iterrows(): word_1, word_2 = row['Word1'], row['Word2'] sim_1, sim_2 = row['Sim1'], row['Sim2'] if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT: sim = self.task_calculate_cosin_similarity(word_1, word_2, True) rs.append(sim) sim1_arr.append(sim_1) sim2_arr.append(sim_2) print("Hệ số tương đồng Pearson là: ", stats.pearsonr(rs, sim1_arr)) print("Hệ số tương đồng Spearman là: ", stats.spearmanr(rs, sim1_arr)) def task_k_nearest_words(self, k, word): k = int(k) if word not in self.W2V_DICT: print("Word '{}' not in vocab".format(word)) return sims = [] for key in self.W2V_DICT: if key != word: sims.append({ 'key': key, 'sim': self.task_calculate_cosin_similarity(key, word, False) }) k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0: (k - 1)] print("{} từ tương đồng nhất với từ '{}' là:".format(k, word)) for w in k_list: print("Từ {} có độ tương đồng là {}".format(w.get('key'), w.get('sim'))) return k_list def task_synonym_antonym_classification(self): self.prepare_data() self.train_synonym_antonym_classification() self.test_synonym_antonym_classification() def test_synonym_antonym_classification(self): clf = pickle.load(open('./main/model/svm.model', 'rb')) X_test, Y_test = [], [] for file in [ './Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt', './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt', './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt' ]: f = open(file, 'r', encoding="utf8") for index, line in enumerate(f): line_arr = line.split() if index == 0: continue word1, word2, relation = line_arr[0], line_arr[1], line_arr[2] if word1 in self.W2V_DICT and word2 in self.W2V_DICT: vec = self.gen_vec_for_synonym_antonym_pair(word1, word2) X_test.append(vec) if relation == 'SYN': Y_test.append(1) elif relation == 'ANT': Y_test.append(-1) X_test = X_test pred = clf.predict(X_test) print("Test date: {}".format(date.today())) print("Precision: {}".format(precision_score(Y_test, pred))) print("Recall: {}".format(recall_score(Y_test, pred))) print("F1: {}".format(f1_score(Y_test, pred))) log = """ Test date: {} Precision: {} Recall: {} F1: {} \n ---------------------------------------- """.format( date.today(), precision_score(Y_test, pred), recall_score(Y_test, pred), f1_score(Y_test, pred)) log_f = open('./main/log', 'a+') log_f.write(log) log_f.close() def gen_vec_for_synonym_antonym_pair(self, word1, word2): np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.W2V_DICT[word2]) return np.concatenate(( np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 * np_vec2, np.absolute(np_vec1 - np_vec2), # np.array([self.task_calculate_cosin_similarity(word1, word2, False)]) ), axis=0) def train_synonym_antonym_classification(self): X_train, Y_train = pickle.load(open('./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+')) unique, counts = np.unique(Y_train, return_counts=True) label_count = dict(zip(unique, counts)) clf = MLPClassifier() clf.fit(X_train, Y_train) pickle.dump(clf, open('./main/model/svm.model', 'wb+')) return clf def prepare_data(self): X, Y = [], [] for file in [ './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt', './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt' ]: f = open(file, 'r', encoding="utf8") for index, line in enumerate(f): line_arr = line.split() if len(line_arr) < 2: continue word1, word2 = line_arr[0], line_arr[1] if word1 in self.W2V_DICT and word2 in self.W2V_DICT: vec = self.gen_vec_for_synonym_antonym_pair(word1, word2) X.append(vec) if os.path.basename(f.name) == 'Antonym_vietnamese.txt': Y.append(-1) else: Y.append(1) X, Y = np.array(X), np.array(Y) pickle.dump( ( X.astype(np.float64), Y ), open('./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+') ) def gen_w2v_dict(self): with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f: if f.read(1): f.seek(0) self.W2V_DICT = json.load(f) if not self.W2V_DICT: with open('./Word-Similarity/word2vec/W2V_150.txt', 'r', encoding="utf8") as f: for index, line in enumerate(f): line_arr = line.split() if index > 1: self.W2V_DICT.update({line_arr[0]: np.array(line_arr[1:]).astype(float).tolist()}) f = open("./main/dataset/w2v/w2v-dict.json","w+") f.write(json.dumps(self.W2V_DICT)) f.close() if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Helper") parser.add_argument( "--task", required=True, metavar="path", help=""" Task name: 0 => Cosin Similarity 1 => Test Cosine Similarity with Visim-400 dataset 2 => K Nearest Words 3 => Synonym Antonym Classification """, ) parser.add_argument( "--word", metavar="path", help="Target word used in 'K Nearest Words' task", ) parser.add_argument( "--k", metavar="path", help="Number of 'Nearest Words' used in 'K Nearest Words' task", ) parser.add_argument( "--word1", metavar="path", help="Source word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task", ) parser.add_argument( "--word2", metavar="path", help="Target word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task", ) args = parser.parse_args() task = args.task k = args.k word = args.word word1 = args.word1 word2 = args.word2 switcher = { '0': 'calculate-cosin-similarity', '1': 'test-cosin-similarity-with-visim-400-dataset', '2': 'k-nearest-words', '3': 'synonym-antonym-classification', '4': 'predict-synonym-antonym' } task_name = switcher.get(task, "Invalid task") task_solver = TaskSolver() task_solver.solve( task_name, k=k, word=word, word1=word1, word2=word2 )
flexible
{ "blob_id": "c23bd136991bfb41f153321420c2fcfba0c843f4", "index": 1513, "step-1": "<mask token>\n\n\nclass TaskSolver:\n <mask token>\n <mask token>\n <mask token>\n\n def task_calculate_cosin_similarity(self, word1, word2, print_to_screen\n =True):\n sim = 0\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.\n W2V_DICT[word2])) / 2\n if print_to_screen:\n print(\"Độ tương đồng giữa '{}' và '{}' là: {}\".format(word1,\n word2, sim))\n return sim\n\n def test_with_visim_400_data_set(self):\n visim_400_df = pd.read_csv(os.path.abspath(\n './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\\t')\n rs, sim1_arr, sim2_arr = [], [], []\n for index, row in visim_400_df.iterrows():\n word_1, word_2 = row['Word1'], row['Word2']\n sim_1, sim_2 = row['Sim1'], row['Sim2']\n if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:\n sim = self.task_calculate_cosin_similarity(word_1, word_2, True\n )\n rs.append(sim)\n sim1_arr.append(sim_1)\n sim2_arr.append(sim_2)\n print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))\n print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))\n\n def task_k_nearest_words(self, k, word):\n k = int(k)\n if word not in self.W2V_DICT:\n print(\"Word '{}' not in vocab\".format(word))\n return\n sims = []\n for key in self.W2V_DICT:\n if key != word:\n sims.append({'key': key, 'sim': self.\n task_calculate_cosin_similarity(key, word, False)})\n k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]\n print(\"{} từ tương đồng nhất với từ '{}' là:\".format(k, word))\n for w in k_list:\n print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get\n ('sim')))\n return k_list\n\n def task_synonym_antonym_classification(self):\n self.prepare_data()\n self.train_synonym_antonym_classification()\n self.test_synonym_antonym_classification()\n <mask token>\n <mask token>\n\n def train_synonym_antonym_classification(self):\n X_train, Y_train = pickle.load(open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))\n unique, counts = np.unique(Y_train, return_counts=True)\n label_count = dict(zip(unique, counts))\n clf = MLPClassifier()\n clf.fit(X_train, Y_train)\n pickle.dump(clf, open('./main/model/svm.model', 'wb+'))\n return clf\n\n def prepare_data(self):\n X, Y = [], []\n for file in [\n './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',\n './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if len(line_arr) < 2:\n continue\n word1, word2 = line_arr[0], line_arr[1]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X.append(vec)\n if os.path.basename(f.name) == 'Antonym_vietnamese.txt':\n Y.append(-1)\n else:\n Y.append(1)\n X, Y = np.array(X), np.array(Y)\n pickle.dump((X.astype(np.float64), Y), open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TaskSolver:\n <mask token>\n\n def __init__(self):\n pass\n <mask token>\n\n def task_calculate_cosin_similarity(self, word1, word2, print_to_screen\n =True):\n sim = 0\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.\n W2V_DICT[word2])) / 2\n if print_to_screen:\n print(\"Độ tương đồng giữa '{}' và '{}' là: {}\".format(word1,\n word2, sim))\n return sim\n\n def test_with_visim_400_data_set(self):\n visim_400_df = pd.read_csv(os.path.abspath(\n './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\\t')\n rs, sim1_arr, sim2_arr = [], [], []\n for index, row in visim_400_df.iterrows():\n word_1, word_2 = row['Word1'], row['Word2']\n sim_1, sim_2 = row['Sim1'], row['Sim2']\n if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:\n sim = self.task_calculate_cosin_similarity(word_1, word_2, True\n )\n rs.append(sim)\n sim1_arr.append(sim_1)\n sim2_arr.append(sim_2)\n print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))\n print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))\n\n def task_k_nearest_words(self, k, word):\n k = int(k)\n if word not in self.W2V_DICT:\n print(\"Word '{}' not in vocab\".format(word))\n return\n sims = []\n for key in self.W2V_DICT:\n if key != word:\n sims.append({'key': key, 'sim': self.\n task_calculate_cosin_similarity(key, word, False)})\n k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]\n print(\"{} từ tương đồng nhất với từ '{}' là:\".format(k, word))\n for w in k_list:\n print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get\n ('sim')))\n return k_list\n\n def task_synonym_antonym_classification(self):\n self.prepare_data()\n self.train_synonym_antonym_classification()\n self.test_synonym_antonym_classification()\n\n def test_synonym_antonym_classification(self):\n clf = pickle.load(open('./main/model/svm.model', 'rb'))\n X_test, Y_test = [], []\n for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if index == 0:\n continue\n word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X_test.append(vec)\n if relation == 'SYN':\n Y_test.append(1)\n elif relation == 'ANT':\n Y_test.append(-1)\n X_test = X_test\n pred = clf.predict(X_test)\n print('Test date: {}'.format(date.today()))\n print('Precision: {}'.format(precision_score(Y_test, pred)))\n print('Recall: {}'.format(recall_score(Y_test, pred)))\n print('F1: {}'.format(f1_score(Y_test, pred)))\n log = (\n \"\"\"\n Test date: {}\n Precision: {}\n Recall: {}\n F1: {}\n \n\n ----------------------------------------\n \"\"\"\n .format(date.today(), precision_score(Y_test, pred),\n recall_score(Y_test, pred), f1_score(Y_test, pred)))\n log_f = open('./main/log', 'a+')\n log_f.write(log)\n log_f.close()\n\n def gen_vec_for_synonym_antonym_pair(self, word1, word2):\n np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.\n W2V_DICT[word2])\n return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 *\n np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0)\n\n def train_synonym_antonym_classification(self):\n X_train, Y_train = pickle.load(open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))\n unique, counts = np.unique(Y_train, return_counts=True)\n label_count = dict(zip(unique, counts))\n clf = MLPClassifier()\n clf.fit(X_train, Y_train)\n pickle.dump(clf, open('./main/model/svm.model', 'wb+'))\n return clf\n\n def prepare_data(self):\n X, Y = [], []\n for file in [\n './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',\n './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if len(line_arr) < 2:\n continue\n word1, word2 = line_arr[0], line_arr[1]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X.append(vec)\n if os.path.basename(f.name) == 'Antonym_vietnamese.txt':\n Y.append(-1)\n else:\n Y.append(1)\n X, Y = np.array(X), np.array(Y)\n pickle.dump((X.astype(np.float64), Y), open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))\n\n def gen_w2v_dict(self):\n with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:\n if f.read(1):\n f.seek(0)\n self.W2V_DICT = json.load(f)\n if not self.W2V_DICT:\n with open('./Word-Similarity/word2vec/W2V_150.txt', 'r',\n encoding='utf8') as f:\n for index, line in enumerate(f):\n line_arr = line.split()\n if index > 1:\n self.W2V_DICT.update({line_arr[0]: np.array(\n line_arr[1:]).astype(float).tolist()})\n f = open('./main/dataset/w2v/w2v-dict.json', 'w+')\n f.write(json.dumps(self.W2V_DICT))\n f.close()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TaskSolver:\n <mask token>\n\n def __init__(self):\n pass\n\n def solve(self, task_name, **kwargs):\n self.gen_w2v_dict()\n if task_name == 'k-nearest-words':\n self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word'))\n elif task_name == 'synonym-antonym-classification':\n self.task_synonym_antonym_classification()\n elif task_name == 'test-cosin-similarity-with-visim-400-dataset':\n self.test_with_visim_400_data_set()\n\n def task_calculate_cosin_similarity(self, word1, word2, print_to_screen\n =True):\n sim = 0\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.\n W2V_DICT[word2])) / 2\n if print_to_screen:\n print(\"Độ tương đồng giữa '{}' và '{}' là: {}\".format(word1,\n word2, sim))\n return sim\n\n def test_with_visim_400_data_set(self):\n visim_400_df = pd.read_csv(os.path.abspath(\n './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\\t')\n rs, sim1_arr, sim2_arr = [], [], []\n for index, row in visim_400_df.iterrows():\n word_1, word_2 = row['Word1'], row['Word2']\n sim_1, sim_2 = row['Sim1'], row['Sim2']\n if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:\n sim = self.task_calculate_cosin_similarity(word_1, word_2, True\n )\n rs.append(sim)\n sim1_arr.append(sim_1)\n sim2_arr.append(sim_2)\n print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))\n print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))\n\n def task_k_nearest_words(self, k, word):\n k = int(k)\n if word not in self.W2V_DICT:\n print(\"Word '{}' not in vocab\".format(word))\n return\n sims = []\n for key in self.W2V_DICT:\n if key != word:\n sims.append({'key': key, 'sim': self.\n task_calculate_cosin_similarity(key, word, False)})\n k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]\n print(\"{} từ tương đồng nhất với từ '{}' là:\".format(k, word))\n for w in k_list:\n print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get\n ('sim')))\n return k_list\n\n def task_synonym_antonym_classification(self):\n self.prepare_data()\n self.train_synonym_antonym_classification()\n self.test_synonym_antonym_classification()\n\n def test_synonym_antonym_classification(self):\n clf = pickle.load(open('./main/model/svm.model', 'rb'))\n X_test, Y_test = [], []\n for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if index == 0:\n continue\n word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X_test.append(vec)\n if relation == 'SYN':\n Y_test.append(1)\n elif relation == 'ANT':\n Y_test.append(-1)\n X_test = X_test\n pred = clf.predict(X_test)\n print('Test date: {}'.format(date.today()))\n print('Precision: {}'.format(precision_score(Y_test, pred)))\n print('Recall: {}'.format(recall_score(Y_test, pred)))\n print('F1: {}'.format(f1_score(Y_test, pred)))\n log = (\n \"\"\"\n Test date: {}\n Precision: {}\n Recall: {}\n F1: {}\n \n\n ----------------------------------------\n \"\"\"\n .format(date.today(), precision_score(Y_test, pred),\n recall_score(Y_test, pred), f1_score(Y_test, pred)))\n log_f = open('./main/log', 'a+')\n log_f.write(log)\n log_f.close()\n\n def gen_vec_for_synonym_antonym_pair(self, word1, word2):\n np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.\n W2V_DICT[word2])\n return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 *\n np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0)\n\n def train_synonym_antonym_classification(self):\n X_train, Y_train = pickle.load(open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))\n unique, counts = np.unique(Y_train, return_counts=True)\n label_count = dict(zip(unique, counts))\n clf = MLPClassifier()\n clf.fit(X_train, Y_train)\n pickle.dump(clf, open('./main/model/svm.model', 'wb+'))\n return clf\n\n def prepare_data(self):\n X, Y = [], []\n for file in [\n './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',\n './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if len(line_arr) < 2:\n continue\n word1, word2 = line_arr[0], line_arr[1]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X.append(vec)\n if os.path.basename(f.name) == 'Antonym_vietnamese.txt':\n Y.append(-1)\n else:\n Y.append(1)\n X, Y = np.array(X), np.array(Y)\n pickle.dump((X.astype(np.float64), Y), open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))\n\n def gen_w2v_dict(self):\n with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:\n if f.read(1):\n f.seek(0)\n self.W2V_DICT = json.load(f)\n if not self.W2V_DICT:\n with open('./Word-Similarity/word2vec/W2V_150.txt', 'r',\n encoding='utf8') as f:\n for index, line in enumerate(f):\n line_arr = line.split()\n if index > 1:\n self.W2V_DICT.update({line_arr[0]: np.array(\n line_arr[1:]).astype(float).tolist()})\n f = open('./main/dataset/w2v/w2v-dict.json', 'w+')\n f.write(json.dumps(self.W2V_DICT))\n f.close()\n\n\n<mask token>\n", "step-4": "import os, pickle, json, ast\nimport pandas as pd\nfrom scipy import spatial\nimport numpy as np\nfrom scipy import stats\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import precision_score, recall_score, f1_score\nfrom datetime import date\nfrom sklearn.neural_network import MLPClassifier\n\n\nclass TaskSolver:\n W2V_DICT = dict()\n\n def __init__(self):\n pass\n\n def solve(self, task_name, **kwargs):\n self.gen_w2v_dict()\n if task_name == 'k-nearest-words':\n self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word'))\n elif task_name == 'synonym-antonym-classification':\n self.task_synonym_antonym_classification()\n elif task_name == 'test-cosin-similarity-with-visim-400-dataset':\n self.test_with_visim_400_data_set()\n\n def task_calculate_cosin_similarity(self, word1, word2, print_to_screen\n =True):\n sim = 0\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.\n W2V_DICT[word2])) / 2\n if print_to_screen:\n print(\"Độ tương đồng giữa '{}' và '{}' là: {}\".format(word1,\n word2, sim))\n return sim\n\n def test_with_visim_400_data_set(self):\n visim_400_df = pd.read_csv(os.path.abspath(\n './Word-Similarity/datasets/ViSim-400/Visim-400.txt'), sep='\\t')\n rs, sim1_arr, sim2_arr = [], [], []\n for index, row in visim_400_df.iterrows():\n word_1, word_2 = row['Word1'], row['Word2']\n sim_1, sim_2 = row['Sim1'], row['Sim2']\n if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:\n sim = self.task_calculate_cosin_similarity(word_1, word_2, True\n )\n rs.append(sim)\n sim1_arr.append(sim_1)\n sim2_arr.append(sim_2)\n print('Hệ số tương đồng Pearson là: ', stats.pearsonr(rs, sim1_arr))\n print('Hệ số tương đồng Spearman là: ', stats.spearmanr(rs, sim1_arr))\n\n def task_k_nearest_words(self, k, word):\n k = int(k)\n if word not in self.W2V_DICT:\n print(\"Word '{}' not in vocab\".format(word))\n return\n sims = []\n for key in self.W2V_DICT:\n if key != word:\n sims.append({'key': key, 'sim': self.\n task_calculate_cosin_similarity(key, word, False)})\n k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0:k - 1]\n print(\"{} từ tương đồng nhất với từ '{}' là:\".format(k, word))\n for w in k_list:\n print('Từ {} có độ tương đồng là {}'.format(w.get('key'), w.get\n ('sim')))\n return k_list\n\n def task_synonym_antonym_classification(self):\n self.prepare_data()\n self.train_synonym_antonym_classification()\n self.test_synonym_antonym_classification()\n\n def test_synonym_antonym_classification(self):\n clf = pickle.load(open('./main/model/svm.model', 'rb'))\n X_test, Y_test = [], []\n for file in ['./Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if index == 0:\n continue\n word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X_test.append(vec)\n if relation == 'SYN':\n Y_test.append(1)\n elif relation == 'ANT':\n Y_test.append(-1)\n X_test = X_test\n pred = clf.predict(X_test)\n print('Test date: {}'.format(date.today()))\n print('Precision: {}'.format(precision_score(Y_test, pred)))\n print('Recall: {}'.format(recall_score(Y_test, pred)))\n print('F1: {}'.format(f1_score(Y_test, pred)))\n log = (\n \"\"\"\n Test date: {}\n Precision: {}\n Recall: {}\n F1: {}\n \n\n ----------------------------------------\n \"\"\"\n .format(date.today(), precision_score(Y_test, pred),\n recall_score(Y_test, pred), f1_score(Y_test, pred)))\n log_f = open('./main/log', 'a+')\n log_f.write(log)\n log_f.close()\n\n def gen_vec_for_synonym_antonym_pair(self, word1, word2):\n np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.\n W2V_DICT[word2])\n return np.concatenate((np_vec1, np_vec2, np_vec1 + np_vec2, np_vec1 *\n np_vec2, np.absolute(np_vec1 - np_vec2)), axis=0)\n\n def train_synonym_antonym_classification(self):\n X_train, Y_train = pickle.load(open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))\n unique, counts = np.unique(Y_train, return_counts=True)\n label_count = dict(zip(unique, counts))\n clf = MLPClassifier()\n clf.fit(X_train, Y_train)\n pickle.dump(clf, open('./main/model/svm.model', 'wb+'))\n return clf\n\n def prepare_data(self):\n X, Y = [], []\n for file in [\n './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt',\n './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt']:\n f = open(file, 'r', encoding='utf8')\n for index, line in enumerate(f):\n line_arr = line.split()\n if len(line_arr) < 2:\n continue\n word1, word2 = line_arr[0], line_arr[1]\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n X.append(vec)\n if os.path.basename(f.name) == 'Antonym_vietnamese.txt':\n Y.append(-1)\n else:\n Y.append(1)\n X, Y = np.array(X), np.array(Y)\n pickle.dump((X.astype(np.float64), Y), open(\n './main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+'))\n\n def gen_w2v_dict(self):\n with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:\n if f.read(1):\n f.seek(0)\n self.W2V_DICT = json.load(f)\n if not self.W2V_DICT:\n with open('./Word-Similarity/word2vec/W2V_150.txt', 'r',\n encoding='utf8') as f:\n for index, line in enumerate(f):\n line_arr = line.split()\n if index > 1:\n self.W2V_DICT.update({line_arr[0]: np.array(\n line_arr[1:]).astype(float).tolist()})\n f = open('./main/dataset/w2v/w2v-dict.json', 'w+')\n f.write(json.dumps(self.W2V_DICT))\n f.close()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='Helper')\n parser.add_argument('--task', required=True, metavar='path', help=\n \"\"\"\n Task name: \n 0 => Cosin Similarity\n 1 => Test Cosine Similarity with Visim-400 dataset\n 2 => K Nearest Words\n 3 => Synonym Antonym Classification\n \"\"\"\n )\n parser.add_argument('--word', metavar='path', help=\n \"Target word used in 'K Nearest Words' task\")\n parser.add_argument('--k', metavar='path', help=\n \"Number of 'Nearest Words' used in 'K Nearest Words' task\")\n parser.add_argument('--word1', metavar='path', help=\n \"Source word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task\"\n )\n parser.add_argument('--word2', metavar='path', help=\n \"Target word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task\"\n )\n args = parser.parse_args()\n task = args.task\n k = args.k\n word = args.word\n word1 = args.word1\n word2 = args.word2\n switcher = {'0': 'calculate-cosin-similarity', '1':\n 'test-cosin-similarity-with-visim-400-dataset', '2':\n 'k-nearest-words', '3': 'synonym-antonym-classification', '4':\n 'predict-synonym-antonym'}\n task_name = switcher.get(task, 'Invalid task')\n task_solver = TaskSolver()\n task_solver.solve(task_name, k=k, word=word, word1=word1, word2=word2)\n", "step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\nimport os, pickle, json, ast\nimport pandas as pd\nfrom scipy import spatial\nimport numpy as np\nfrom scipy import stats\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom sklearn.svm import LinearSVC\nfrom sklearn.metrics import precision_score, recall_score, f1_score\nfrom datetime import date\nfrom sklearn.neural_network import MLPClassifier\n\nclass TaskSolver:\n\n W2V_DICT = dict()\n\n def __init__(self):\n pass\n\n def solve(self, task_name, **kwargs):\n\n self.gen_w2v_dict()\n\n if task_name == 'k-nearest-words':\n self.task_k_nearest_words(kwargs.get('k'), kwargs.get('word'))\n elif task_name == 'synonym-antonym-classification':\n self.task_synonym_antonym_classification()\n elif task_name == 'test-cosin-similarity-with-visim-400-dataset':\n self.test_with_visim_400_data_set()\n\n def task_calculate_cosin_similarity(self, word1, word2, print_to_screen=True):\n\n sim = 0\n\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n\n sim = (2 - spatial.distance.cosine(self.W2V_DICT[word1], self.W2V_DICT[word2])) / 2\n\n if (print_to_screen): print(\"Độ tương đồng giữa '{}' và '{}' là: {}\".format(word1, word2, sim))\n\n return sim\n\n def test_with_visim_400_data_set(self):\n\n visim_400_df = pd.read_csv(\n os.path.abspath('./Word-Similarity/datasets/ViSim-400/Visim-400.txt'), \n sep=\"\\t\")\n\n rs, sim1_arr, sim2_arr = [], [], []\n\n for index, row in visim_400_df.iterrows():\n\n word_1, word_2 = row['Word1'], row['Word2']\n sim_1, sim_2 = row['Sim1'], row['Sim2']\n\n if word_1 in self.W2V_DICT and word_2 in self.W2V_DICT:\n\n sim = self.task_calculate_cosin_similarity(word_1, word_2, True)\n\n rs.append(sim)\n \n sim1_arr.append(sim_1)\n sim2_arr.append(sim_2)\n\n print(\"Hệ số tương đồng Pearson là: \", stats.pearsonr(rs, sim1_arr))\n print(\"Hệ số tương đồng Spearman là: \", stats.spearmanr(rs, sim1_arr))\n\n def task_k_nearest_words(self, k, word):\n\n k = int(k)\n\n if word not in self.W2V_DICT: \n print(\"Word '{}' not in vocab\".format(word))\n return\n\n sims = []\n\n for key in self.W2V_DICT:\n\n if key != word:\n\n sims.append({\n 'key': key,\n 'sim': self.task_calculate_cosin_similarity(key, word, False)\n })\n \n k_list = sorted(sims, key=lambda k: k['sim'], reverse=True)[0: (k - 1)]\n\n print(\"{} từ tương đồng nhất với từ '{}' là:\".format(k, word))\n\n for w in k_list:\n\n print(\"Từ {} có độ tương đồng là {}\".format(w.get('key'), w.get('sim')))\n\n return k_list\n\n def task_synonym_antonym_classification(self):\n\n self.prepare_data()\n\n self.train_synonym_antonym_classification()\n\n self.test_synonym_antonym_classification()\n\n def test_synonym_antonym_classification(self):\n\n clf = pickle.load(open('./main/model/svm.model', 'rb'))\n\n X_test, Y_test = [], []\n\n for file in [\n './Word-Similarity/datasets/ViCon-400/400_noun_pairs.txt', \n './Word-Similarity/datasets/ViCon-400/400_verb_pairs.txt',\n './Word-Similarity/datasets/ViCon-400/600_adj_pairs.txt'\n ]:\n\n f = open(file, 'r', encoding=\"utf8\")\n\n for index, line in enumerate(f):\n\n line_arr = line.split()\n\n if index == 0: continue\n\n word1, word2, relation = line_arr[0], line_arr[1], line_arr[2]\n\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n\n X_test.append(vec)\n\n if relation == 'SYN': Y_test.append(1)\n elif relation == 'ANT': Y_test.append(-1)\n\n X_test = X_test\n pred = clf.predict(X_test)\n\n print(\"Test date: {}\".format(date.today()))\n print(\"Precision: {}\".format(precision_score(Y_test, pred)))\n print(\"Recall: {}\".format(recall_score(Y_test, pred)))\n print(\"F1: {}\".format(f1_score(Y_test, pred)))\n\n log = \"\"\"\n Test date: {}\n Precision: {}\n Recall: {}\n F1: {}\n \\n\n ----------------------------------------\n \"\"\".format(\n date.today(), \n precision_score(Y_test, pred), \n recall_score(Y_test, pred), \n f1_score(Y_test, pred))\n\n log_f = open('./main/log', 'a+')\n\n log_f.write(log)\n\n log_f.close()\n\n def gen_vec_for_synonym_antonym_pair(self, word1, word2):\n\n np_vec1, np_vec2 = np.array(self.W2V_DICT[word1]), np.array(self.W2V_DICT[word2])\n\n return np.concatenate((\n np_vec1,\n np_vec2,\n np_vec1 + np_vec2, \n np_vec1 * np_vec2,\n np.absolute(np_vec1 - np_vec2),\n # np.array([self.task_calculate_cosin_similarity(word1, word2, False)])\n ), axis=0)\n\n def train_synonym_antonym_classification(self):\n\n X_train, Y_train = pickle.load(open('./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'rb+'))\n\n unique, counts = np.unique(Y_train, return_counts=True)\n\n label_count = dict(zip(unique, counts))\n \n clf = MLPClassifier()\n \n clf.fit(X_train, Y_train)\n\n pickle.dump(clf, open('./main/model/svm.model', 'wb+'))\n\n return clf\n\n def prepare_data(self):\n\n X, Y = [], []\n\n for file in [\n './Word-Similarity/antonym-synonym set/Antonym_vietnamese.txt', \n './Word-Similarity/antonym-synonym set/Synonym_vietnamese.txt'\n ]:\n \n f = open(file, 'r', encoding=\"utf8\")\n\n for index, line in enumerate(f):\n\n line_arr = line.split()\n \n if len(line_arr) < 2: continue\n\n word1, word2 = line_arr[0], line_arr[1]\n\n if word1 in self.W2V_DICT and word2 in self.W2V_DICT:\n\n vec = self.gen_vec_for_synonym_antonym_pair(word1, word2)\n\n X.append(vec)\n\n if os.path.basename(f.name) == 'Antonym_vietnamese.txt': Y.append(-1)\n else: Y.append(1)\n\n\n X, Y = np.array(X), np.array(Y)\n\n pickle.dump(\n ( X.astype(np.float64), Y ),\n open('./main/dataset/antonym-synonym/antonym-synonym-pairs.bin', 'wb+')\n )\n\n def gen_w2v_dict(self):\n with open('./main/dataset/w2v/w2v-dict.json', 'w+') as f:\n if f.read(1):\n\n f.seek(0)\n\n self.W2V_DICT = json.load(f)\n\n if not self.W2V_DICT:\n with open('./Word-Similarity/word2vec/W2V_150.txt', 'r', encoding=\"utf8\") as f:\n\n for index, line in enumerate(f):\n\n line_arr = line.split()\n \n if index > 1:\n\n self.W2V_DICT.update({line_arr[0]: np.array(line_arr[1:]).astype(float).tolist()})\n\n f = open(\"./main/dataset/w2v/w2v-dict.json\",\"w+\")\n\n f.write(json.dumps(self.W2V_DICT))\n\n f.close()\n\nif __name__ == \"__main__\":\n\n import argparse\n\n parser = argparse.ArgumentParser(description=\"Helper\")\n\n parser.add_argument(\n \"--task\",\n required=True,\n metavar=\"path\",\n help=\"\"\"\n Task name: \n 0 => Cosin Similarity\n 1 => Test Cosine Similarity with Visim-400 dataset\n 2 => K Nearest Words\n 3 => Synonym Antonym Classification\n \"\"\",\n )\n\n parser.add_argument(\n \"--word\",\n metavar=\"path\",\n help=\"Target word used in 'K Nearest Words' task\",\n )\n\n parser.add_argument(\n \"--k\",\n metavar=\"path\",\n help=\"Number of 'Nearest Words' used in 'K Nearest Words' task\",\n )\n\n parser.add_argument(\n \"--word1\",\n metavar=\"path\",\n help=\"Source word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task\",\n )\n\n parser.add_argument(\n \"--word2\",\n metavar=\"path\",\n help=\"Target word used in 'Cosin Similarity' and 'Predict Synonym Antonym' task\",\n )\n\n args = parser.parse_args()\n\n task = args.task \n k = args.k \n word = args.word \n word1 = args.word1\n word2 = args.word2\n\n switcher = {\n '0': 'calculate-cosin-similarity',\n '1': 'test-cosin-similarity-with-visim-400-dataset',\n '2': 'k-nearest-words',\n '3': 'synonym-antonym-classification',\n '4': 'predict-synonym-antonym'\n }\n\n task_name = switcher.get(task, \"Invalid task\")\n\n task_solver = TaskSolver()\n\n task_solver.solve(\n task_name, \n k=k,\n word=word,\n word1=word1,\n word2=word2\n )\n", "step-ids": [ 7, 11, 12, 15, 16 ] }
[ 7, 11, 12, 15, 16 ]
<|reserved_special_token_0|> class DataEncoding: @staticmethod def segment_decode(segment): arr = bytearray(segment) ack_binary = bytearray([arr[i] for i in range(4)]) tip_binary = bytearray([arr[4]]) len_binary = bytearray([arr[i] for i in (5, 6)]) ack = int.from_bytes(ack_binary, byteorder='big', signed=False) tip = int.from_bytes(tip_binary, byteorder='big', signed=False) length = int.from_bytes(len_binary, byteorder='big', signed=False) data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] != b'\x00']) return {'ack': ack, 'tip': tip, 'len': length, 'data': data} <|reserved_special_token_0|> @staticmethod def encode_data(transmitter, segment_data): transmitter.ack = transmitter.ack + 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x02' segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False) segment = segment_number + segment_type + segment_len + segment_data return segment @staticmethod def encode_end(transmitter, segment_data): global end_transmission transmitter.ack = transmitter.ack + 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x03' segment_data_len = len(segment_data) segment_data = segment_data + b'\x00' * (DEFAULT_SIZE - segment_data_len) segment_len = segment_data_len.to_bytes(2, byteorder='big', signed= False) segment = segment_number + segment_type + segment_len + segment_data return segment <|reserved_special_token_0|> @staticmethod def encode(transmitter, tip, data): segment_type = {'START': DataEncoding.encode_start, 'DATA': DataEncoding.encode_data, 'END': DataEncoding.encode_end} return segment_type.get(tip, DataEncoding.encode_error)(transmitter, data) @staticmethod def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE): with open(transmitter.filepath, 'rb') as f: while True: chunk = f.read(chunk_size) if chunk: yield chunk else: break <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class DataEncoding: @staticmethod def segment_decode(segment): arr = bytearray(segment) ack_binary = bytearray([arr[i] for i in range(4)]) tip_binary = bytearray([arr[4]]) len_binary = bytearray([arr[i] for i in (5, 6)]) ack = int.from_bytes(ack_binary, byteorder='big', signed=False) tip = int.from_bytes(tip_binary, byteorder='big', signed=False) length = int.from_bytes(len_binary, byteorder='big', signed=False) data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] != b'\x00']) return {'ack': ack, 'tip': tip, 'len': length, 'data': data} @staticmethod def encode_start(transmitter, nume_fisier): transmitter.ack = transmitter.ack + 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x01' lungime_nume = len(nume_fisier) segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False) segment = segment_number + segment_type + segment_len for ch in nume_fisier: segment += ord(ch).to_bytes(1, byteorder='big', signed=False) return segment @staticmethod def encode_data(transmitter, segment_data): transmitter.ack = transmitter.ack + 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x02' segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False) segment = segment_number + segment_type + segment_len + segment_data return segment @staticmethod def encode_end(transmitter, segment_data): global end_transmission transmitter.ack = transmitter.ack + 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x03' segment_data_len = len(segment_data) segment_data = segment_data + b'\x00' * (DEFAULT_SIZE - segment_data_len) segment_len = segment_data_len.to_bytes(2, byteorder='big', signed= False) segment = segment_number + segment_type + segment_len + segment_data return segment <|reserved_special_token_0|> @staticmethod def encode(transmitter, tip, data): segment_type = {'START': DataEncoding.encode_start, 'DATA': DataEncoding.encode_data, 'END': DataEncoding.encode_end} return segment_type.get(tip, DataEncoding.encode_error)(transmitter, data) @staticmethod def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE): with open(transmitter.filepath, 'rb') as f: while True: chunk = f.read(chunk_size) if chunk: yield chunk else: break @staticmethod def encode_bytes(transmitter): for b in DataEncoding.bytes_from_file(transmitter.filepath): if len(b) == DEFAULT_SIZE: yield DataEncoding.encode(transmitter, 'DATA', b) else: yield DataEncoding.encode(transmitter, 'END', b) <|reserved_special_token_1|> <|reserved_special_token_0|> class DataEncoding: @staticmethod def segment_decode(segment): arr = bytearray(segment) ack_binary = bytearray([arr[i] for i in range(4)]) tip_binary = bytearray([arr[4]]) len_binary = bytearray([arr[i] for i in (5, 6)]) ack = int.from_bytes(ack_binary, byteorder='big', signed=False) tip = int.from_bytes(tip_binary, byteorder='big', signed=False) length = int.from_bytes(len_binary, byteorder='big', signed=False) data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] != b'\x00']) return {'ack': ack, 'tip': tip, 'len': length, 'data': data} @staticmethod def encode_start(transmitter, nume_fisier): transmitter.ack = transmitter.ack + 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x01' lungime_nume = len(nume_fisier) segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False) segment = segment_number + segment_type + segment_len for ch in nume_fisier: segment += ord(ch).to_bytes(1, byteorder='big', signed=False) return segment @staticmethod def encode_data(transmitter, segment_data): transmitter.ack = transmitter.ack + 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x02' segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False) segment = segment_number + segment_type + segment_len + segment_data return segment @staticmethod def encode_end(transmitter, segment_data): global end_transmission transmitter.ack = transmitter.ack + 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x03' segment_data_len = len(segment_data) segment_data = segment_data + b'\x00' * (DEFAULT_SIZE - segment_data_len) segment_len = segment_data_len.to_bytes(2, byteorder='big', signed= False) segment = segment_number + segment_type + segment_len + segment_data return segment @staticmethod def encode_error(transmitter, segment_data): pass @staticmethod def encode(transmitter, tip, data): segment_type = {'START': DataEncoding.encode_start, 'DATA': DataEncoding.encode_data, 'END': DataEncoding.encode_end} return segment_type.get(tip, DataEncoding.encode_error)(transmitter, data) @staticmethod def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE): with open(transmitter.filepath, 'rb') as f: while True: chunk = f.read(chunk_size) if chunk: yield chunk else: break @staticmethod def encode_bytes(transmitter): for b in DataEncoding.bytes_from_file(transmitter.filepath): if len(b) == DEFAULT_SIZE: yield DataEncoding.encode(transmitter, 'DATA', b) else: yield DataEncoding.encode(transmitter, 'END', b) <|reserved_special_token_1|> DEFAULT_SIZE = 512 class DataEncoding: @staticmethod def segment_decode(segment): arr = bytearray(segment) ack_binary = bytearray([arr[i] for i in range(4)]) tip_binary = bytearray([arr[4]]) len_binary = bytearray([arr[i] for i in (5, 6)]) ack = int.from_bytes(ack_binary, byteorder='big', signed=False) tip = int.from_bytes(tip_binary, byteorder='big', signed=False) length = int.from_bytes(len_binary, byteorder='big', signed=False) data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] != b'\x00']) return {'ack': ack, 'tip': tip, 'len': length, 'data': data} @staticmethod def encode_start(transmitter, nume_fisier): transmitter.ack = transmitter.ack + 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x01' lungime_nume = len(nume_fisier) segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False) segment = segment_number + segment_type + segment_len for ch in nume_fisier: segment += ord(ch).to_bytes(1, byteorder='big', signed=False) return segment @staticmethod def encode_data(transmitter, segment_data): transmitter.ack = transmitter.ack + 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x02' segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False) segment = segment_number + segment_type + segment_len + segment_data return segment @staticmethod def encode_end(transmitter, segment_data): global end_transmission transmitter.ack = transmitter.ack + 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x03' segment_data_len = len(segment_data) segment_data = segment_data + b'\x00' * (DEFAULT_SIZE - segment_data_len) segment_len = segment_data_len.to_bytes(2, byteorder='big', signed= False) segment = segment_number + segment_type + segment_len + segment_data return segment @staticmethod def encode_error(transmitter, segment_data): pass @staticmethod def encode(transmitter, tip, data): segment_type = {'START': DataEncoding.encode_start, 'DATA': DataEncoding.encode_data, 'END': DataEncoding.encode_end} return segment_type.get(tip, DataEncoding.encode_error)(transmitter, data) @staticmethod def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE): with open(transmitter.filepath, 'rb') as f: while True: chunk = f.read(chunk_size) if chunk: yield chunk else: break @staticmethod def encode_bytes(transmitter): for b in DataEncoding.bytes_from_file(transmitter.filepath): if len(b) == DEFAULT_SIZE: yield DataEncoding.encode(transmitter, 'DATA', b) else: yield DataEncoding.encode(transmitter, 'END', b) <|reserved_special_token_1|> DEFAULT_SIZE = 512 class DataEncoding: @staticmethod def segment_decode(segment): arr = bytearray(segment) ack_binary = bytearray([arr[i] for i in range(4)]) tip_binary = bytearray([arr[4]]) len_binary = bytearray([arr[i] for i in (5,6)]) ack = int.from_bytes(ack_binary, byteorder='big', signed=False) tip = int.from_bytes(tip_binary, byteorder='big', signed=False) length = int.from_bytes(len_binary, byteorder='big', signed=False) data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] != b'\x00']) return {'ack': ack, 'tip': tip, 'len': length, 'data': data} # codificare: (segment_number, segment_type, segment_len), segment_data # creeaza primul pachet, cel care contine numele @staticmethod def encode_start(transmitter,nume_fisier): transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x01' lungime_nume = len(nume_fisier) segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False) segment = segment_number + segment_type + segment_len for ch in nume_fisier: segment += (ord(ch).to_bytes(1, byteorder='big', signed=False)) return segment # creeaza pachetele care contine bitii din fisier @staticmethod def encode_data(transmitter,segment_data): transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x02' segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False) segment = segment_number + segment_type + segment_len + segment_data return segment # in campul de segment_code, al doilea octet va fi lungimea caracterelor utile @staticmethod def encode_end(transmitter,segment_data): global end_transmission transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1 segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False) segment_type = b'\x03' segment_data_len = len(segment_data) segment_data = segment_data + b'\x00'*(DEFAULT_SIZE - segment_data_len) segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=False) segment = segment_number + segment_type + segment_len + segment_data return segment @staticmethod def encode_error(transmitter,segment_data): pass @staticmethod def encode(transmitter,tip, data): segment_type = { 'START': DataEncoding.encode_start, 'DATA' : DataEncoding.encode_data, 'END' : DataEncoding.encode_end } return segment_type.get(tip, DataEncoding.encode_error)(transmitter,data) #citirea fisier ca pachete de octeti @staticmethod def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE): with open(transmitter.filepath, "rb") as f: while True: chunk = f.read(chunk_size) if chunk: yield chunk else: break #codificarea pachetelor de octeti @staticmethod def encode_bytes(transmitter): for b in DataEncoding.bytes_from_file(transmitter.filepath): if len(b) == DEFAULT_SIZE: yield DataEncoding.encode(transmitter,'DATA', b) else: yield DataEncoding.encode(transmitter,'END', b)
flexible
{ "blob_id": "47c5375816ab35e8225e5f3695f7ee2ab5336076", "index": 4312, "step-1": "<mask token>\n\n\nclass DataEncoding:\n\n @staticmethod\n def segment_decode(segment):\n arr = bytearray(segment)\n ack_binary = bytearray([arr[i] for i in range(4)])\n tip_binary = bytearray([arr[4]])\n len_binary = bytearray([arr[i] for i in (5, 6)])\n ack = int.from_bytes(ack_binary, byteorder='big', signed=False)\n tip = int.from_bytes(tip_binary, byteorder='big', signed=False)\n length = int.from_bytes(len_binary, byteorder='big', signed=False)\n data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] !=\n b'\\x00'])\n return {'ack': ack, 'tip': tip, 'len': length, 'data': data}\n <mask token>\n\n @staticmethod\n def encode_data(transmitter, segment_data):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x02'\n segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_end(transmitter, segment_data):\n global end_transmission\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x03'\n segment_data_len = len(segment_data)\n segment_data = segment_data + b'\\x00' * (DEFAULT_SIZE -\n segment_data_len)\n segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=\n False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n <mask token>\n\n @staticmethod\n def encode(transmitter, tip, data):\n segment_type = {'START': DataEncoding.encode_start, 'DATA':\n DataEncoding.encode_data, 'END': DataEncoding.encode_end}\n return segment_type.get(tip, DataEncoding.encode_error)(transmitter,\n data)\n\n @staticmethod\n def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE):\n with open(transmitter.filepath, 'rb') as f:\n while True:\n chunk = f.read(chunk_size)\n if chunk:\n yield chunk\n else:\n break\n <mask token>\n", "step-2": "<mask token>\n\n\nclass DataEncoding:\n\n @staticmethod\n def segment_decode(segment):\n arr = bytearray(segment)\n ack_binary = bytearray([arr[i] for i in range(4)])\n tip_binary = bytearray([arr[4]])\n len_binary = bytearray([arr[i] for i in (5, 6)])\n ack = int.from_bytes(ack_binary, byteorder='big', signed=False)\n tip = int.from_bytes(tip_binary, byteorder='big', signed=False)\n length = int.from_bytes(len_binary, byteorder='big', signed=False)\n data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] !=\n b'\\x00'])\n return {'ack': ack, 'tip': tip, 'len': length, 'data': data}\n\n @staticmethod\n def encode_start(transmitter, nume_fisier):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x01'\n lungime_nume = len(nume_fisier)\n segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len\n for ch in nume_fisier:\n segment += ord(ch).to_bytes(1, byteorder='big', signed=False)\n return segment\n\n @staticmethod\n def encode_data(transmitter, segment_data):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x02'\n segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_end(transmitter, segment_data):\n global end_transmission\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x03'\n segment_data_len = len(segment_data)\n segment_data = segment_data + b'\\x00' * (DEFAULT_SIZE -\n segment_data_len)\n segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=\n False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n <mask token>\n\n @staticmethod\n def encode(transmitter, tip, data):\n segment_type = {'START': DataEncoding.encode_start, 'DATA':\n DataEncoding.encode_data, 'END': DataEncoding.encode_end}\n return segment_type.get(tip, DataEncoding.encode_error)(transmitter,\n data)\n\n @staticmethod\n def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE):\n with open(transmitter.filepath, 'rb') as f:\n while True:\n chunk = f.read(chunk_size)\n if chunk:\n yield chunk\n else:\n break\n\n @staticmethod\n def encode_bytes(transmitter):\n for b in DataEncoding.bytes_from_file(transmitter.filepath):\n if len(b) == DEFAULT_SIZE:\n yield DataEncoding.encode(transmitter, 'DATA', b)\n else:\n yield DataEncoding.encode(transmitter, 'END', b)\n", "step-3": "<mask token>\n\n\nclass DataEncoding:\n\n @staticmethod\n def segment_decode(segment):\n arr = bytearray(segment)\n ack_binary = bytearray([arr[i] for i in range(4)])\n tip_binary = bytearray([arr[4]])\n len_binary = bytearray([arr[i] for i in (5, 6)])\n ack = int.from_bytes(ack_binary, byteorder='big', signed=False)\n tip = int.from_bytes(tip_binary, byteorder='big', signed=False)\n length = int.from_bytes(len_binary, byteorder='big', signed=False)\n data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] !=\n b'\\x00'])\n return {'ack': ack, 'tip': tip, 'len': length, 'data': data}\n\n @staticmethod\n def encode_start(transmitter, nume_fisier):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x01'\n lungime_nume = len(nume_fisier)\n segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len\n for ch in nume_fisier:\n segment += ord(ch).to_bytes(1, byteorder='big', signed=False)\n return segment\n\n @staticmethod\n def encode_data(transmitter, segment_data):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x02'\n segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_end(transmitter, segment_data):\n global end_transmission\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x03'\n segment_data_len = len(segment_data)\n segment_data = segment_data + b'\\x00' * (DEFAULT_SIZE -\n segment_data_len)\n segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=\n False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_error(transmitter, segment_data):\n pass\n\n @staticmethod\n def encode(transmitter, tip, data):\n segment_type = {'START': DataEncoding.encode_start, 'DATA':\n DataEncoding.encode_data, 'END': DataEncoding.encode_end}\n return segment_type.get(tip, DataEncoding.encode_error)(transmitter,\n data)\n\n @staticmethod\n def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE):\n with open(transmitter.filepath, 'rb') as f:\n while True:\n chunk = f.read(chunk_size)\n if chunk:\n yield chunk\n else:\n break\n\n @staticmethod\n def encode_bytes(transmitter):\n for b in DataEncoding.bytes_from_file(transmitter.filepath):\n if len(b) == DEFAULT_SIZE:\n yield DataEncoding.encode(transmitter, 'DATA', b)\n else:\n yield DataEncoding.encode(transmitter, 'END', b)\n", "step-4": "DEFAULT_SIZE = 512\n\n\nclass DataEncoding:\n\n @staticmethod\n def segment_decode(segment):\n arr = bytearray(segment)\n ack_binary = bytearray([arr[i] for i in range(4)])\n tip_binary = bytearray([arr[4]])\n len_binary = bytearray([arr[i] for i in (5, 6)])\n ack = int.from_bytes(ack_binary, byteorder='big', signed=False)\n tip = int.from_bytes(tip_binary, byteorder='big', signed=False)\n length = int.from_bytes(len_binary, byteorder='big', signed=False)\n data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] !=\n b'\\x00'])\n return {'ack': ack, 'tip': tip, 'len': length, 'data': data}\n\n @staticmethod\n def encode_start(transmitter, nume_fisier):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x01'\n lungime_nume = len(nume_fisier)\n segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len\n for ch in nume_fisier:\n segment += ord(ch).to_bytes(1, byteorder='big', signed=False)\n return segment\n\n @staticmethod\n def encode_data(transmitter, segment_data):\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x02'\n segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_end(transmitter, segment_data):\n global end_transmission\n transmitter.ack = transmitter.ack + 1\n segment_number = transmitter.ack.to_bytes(4, byteorder='big',\n signed=False)\n segment_type = b'\\x03'\n segment_data_len = len(segment_data)\n segment_data = segment_data + b'\\x00' * (DEFAULT_SIZE -\n segment_data_len)\n segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=\n False)\n segment = segment_number + segment_type + segment_len + segment_data\n return segment\n\n @staticmethod\n def encode_error(transmitter, segment_data):\n pass\n\n @staticmethod\n def encode(transmitter, tip, data):\n segment_type = {'START': DataEncoding.encode_start, 'DATA':\n DataEncoding.encode_data, 'END': DataEncoding.encode_end}\n return segment_type.get(tip, DataEncoding.encode_error)(transmitter,\n data)\n\n @staticmethod\n def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE):\n with open(transmitter.filepath, 'rb') as f:\n while True:\n chunk = f.read(chunk_size)\n if chunk:\n yield chunk\n else:\n break\n\n @staticmethod\n def encode_bytes(transmitter):\n for b in DataEncoding.bytes_from_file(transmitter.filepath):\n if len(b) == DEFAULT_SIZE:\n yield DataEncoding.encode(transmitter, 'DATA', b)\n else:\n yield DataEncoding.encode(transmitter, 'END', b)\n", "step-5": "DEFAULT_SIZE = 512\r\n\r\nclass DataEncoding:\r\n @staticmethod\r\n def segment_decode(segment):\r\n arr = bytearray(segment)\r\n ack_binary = bytearray([arr[i] for i in range(4)])\r\n tip_binary = bytearray([arr[4]])\r\n len_binary = bytearray([arr[i] for i in (5,6)])\r\n\r\n ack = int.from_bytes(ack_binary, byteorder='big', signed=False)\r\n tip = int.from_bytes(tip_binary, byteorder='big', signed=False)\r\n length = int.from_bytes(len_binary, byteorder='big', signed=False)\r\n data = bytearray([arr[i] for i in range(7, 7 + length) if arr[i] != b'\\x00'])\r\n\r\n return {'ack': ack, 'tip': tip, 'len': length, 'data': data}\r\n\r\n\r\n # codificare: (segment_number, segment_type, segment_len), segment_data\r\n # creeaza primul pachet, cel care contine numele\r\n @staticmethod\r\n def encode_start(transmitter,nume_fisier):\r\n transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1\r\n segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False)\r\n\r\n segment_type = b'\\x01'\r\n lungime_nume = len(nume_fisier)\r\n segment_len = lungime_nume.to_bytes(2, byteorder='big', signed=False)\r\n segment = segment_number + segment_type + segment_len\r\n\r\n for ch in nume_fisier:\r\n segment += (ord(ch).to_bytes(1, byteorder='big', signed=False))\r\n\r\n return segment\r\n\r\n\r\n # creeaza pachetele care contine bitii din fisier\r\n @staticmethod\r\n def encode_data(transmitter,segment_data):\r\n transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1\r\n segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False)\r\n\r\n segment_type = b'\\x02'\r\n segment_len = DEFAULT_SIZE.to_bytes(2, byteorder='big', signed=False)\r\n segment = segment_number + segment_type + segment_len + segment_data\r\n\r\n return segment\r\n\r\n # in campul de segment_code, al doilea octet va fi lungimea caracterelor utile\r\n @staticmethod\r\n def encode_end(transmitter,segment_data):\r\n global end_transmission\r\n transmitter.ack = transmitter.ack + 1 # primul ack trimis este 1\r\n segment_number = transmitter.ack.to_bytes(4, byteorder='big', signed=False)\r\n\r\n segment_type = b'\\x03'\r\n segment_data_len = len(segment_data)\r\n segment_data = segment_data + b'\\x00'*(DEFAULT_SIZE - segment_data_len)\r\n segment_len = segment_data_len.to_bytes(2, byteorder='big', signed=False)\r\n segment = segment_number + segment_type + segment_len + segment_data\r\n\r\n return segment\r\n\r\n @staticmethod\r\n def encode_error(transmitter,segment_data):\r\n pass\r\n\r\n\r\n @staticmethod\r\n def encode(transmitter,tip, data):\r\n segment_type = {\r\n 'START': DataEncoding.encode_start,\r\n 'DATA' : DataEncoding.encode_data,\r\n 'END' : DataEncoding.encode_end\r\n }\r\n return segment_type.get(tip, DataEncoding.encode_error)(transmitter,data)\r\n\r\n\r\n #citirea fisier ca pachete de octeti\r\n @staticmethod\r\n def bytes_from_file(transmitter, chunk_size=DEFAULT_SIZE):\r\n with open(transmitter.filepath, \"rb\") as f:\r\n while True:\r\n chunk = f.read(chunk_size)\r\n if chunk:\r\n yield chunk\r\n else:\r\n break\r\n\r\n #codificarea pachetelor de octeti\r\n @staticmethod\r\n def encode_bytes(transmitter):\r\n for b in DataEncoding.bytes_from_file(transmitter.filepath):\r\n if len(b) == DEFAULT_SIZE:\r\n yield DataEncoding.encode(transmitter,'DATA', b)\r\n else:\r\n yield DataEncoding.encode(transmitter,'END', b)\r\n\r\n\r\n", "step-ids": [ 6, 8, 9, 10, 11 ] }
[ 6, 8, 9, 10, 11 ]
<|reserved_special_token_0|> def doMath(btn): global result, n1, n2, isFirst, calc inputNumber() if btn == 'Add': calc = 'a' if btn == 'Substract': calc = 's' if btn == 'Multiply': calc = 'm' if btn == 'Divide': calc = 'd' app.clearEntry('Number') def calculate(btn): global result, n1, n2, isFirst, calc inputNumber() if calc == 'a': result = n1 + n2 if calc == 's': result = n1 - n2 if calc == 'm': result = n1 * n2 if calc == 'd': try: result = n1 / n2 except ZeroDivisionError: clearOut(btn) app.errorBox('DivisionByZero', "You can't divide by Zero.") app.clearEntry('Number') app.setLabel('Result', result) def clearOut(btn): global result, n1, n2, isFirst, calc n1, n2 = 0.0, 0.0 result = 0.0 isFirst = True calc = '' <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def doMath(btn): global result, n1, n2, isFirst, calc inputNumber() if btn == 'Add': calc = 'a' if btn == 'Substract': calc = 's' if btn == 'Multiply': calc = 'm' if btn == 'Divide': calc = 'd' app.clearEntry('Number') def calculate(btn): global result, n1, n2, isFirst, calc inputNumber() if calc == 'a': result = n1 + n2 if calc == 's': result = n1 - n2 if calc == 'm': result = n1 * n2 if calc == 'd': try: result = n1 / n2 except ZeroDivisionError: clearOut(btn) app.errorBox('DivisionByZero', "You can't divide by Zero.") app.clearEntry('Number') app.setLabel('Result', result) def clearOut(btn): global result, n1, n2, isFirst, calc n1, n2 = 0.0, 0.0 result = 0.0 isFirst = True calc = '' def inputNumber(): global n1, n2, isFirst if isFirst: n1 = app.getEntry('Number') isFirst = False else: n2 = app.getEntry('Number') isFirst = True app.setStretch('column') app.setSticky('') app.setResizable(True) app.addNumericEntry('Number') app.setEntryDefault('Number', 'Enter Number') app.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath) app.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut]) app.setButton('clearOut', 'C') app.addEmptyLabel('Result') app.go() <|reserved_special_token_1|> <|reserved_special_token_0|> app = gui('Calculator', '560x240') n1, n2 = 0.0, 0.0 result = 0.0 isFirst = True calc = '' def doMath(btn): global result, n1, n2, isFirst, calc inputNumber() if btn == 'Add': calc = 'a' if btn == 'Substract': calc = 's' if btn == 'Multiply': calc = 'm' if btn == 'Divide': calc = 'd' app.clearEntry('Number') def calculate(btn): global result, n1, n2, isFirst, calc inputNumber() if calc == 'a': result = n1 + n2 if calc == 's': result = n1 - n2 if calc == 'm': result = n1 * n2 if calc == 'd': try: result = n1 / n2 except ZeroDivisionError: clearOut(btn) app.errorBox('DivisionByZero', "You can't divide by Zero.") app.clearEntry('Number') app.setLabel('Result', result) def clearOut(btn): global result, n1, n2, isFirst, calc n1, n2 = 0.0, 0.0 result = 0.0 isFirst = True calc = '' def inputNumber(): global n1, n2, isFirst if isFirst: n1 = app.getEntry('Number') isFirst = False else: n2 = app.getEntry('Number') isFirst = True app.setStretch('column') app.setSticky('') app.setResizable(True) app.addNumericEntry('Number') app.setEntryDefault('Number', 'Enter Number') app.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath) app.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut]) app.setButton('clearOut', 'C') app.addEmptyLabel('Result') app.go() <|reserved_special_token_1|> from appJar import gui app = gui('Calculator', '560x240') n1, n2 = 0.0, 0.0 result = 0.0 isFirst = True calc = '' def doMath(btn): global result, n1, n2, isFirst, calc inputNumber() if btn == 'Add': calc = 'a' if btn == 'Substract': calc = 's' if btn == 'Multiply': calc = 'm' if btn == 'Divide': calc = 'd' app.clearEntry('Number') def calculate(btn): global result, n1, n2, isFirst, calc inputNumber() if calc == 'a': result = n1 + n2 if calc == 's': result = n1 - n2 if calc == 'm': result = n1 * n2 if calc == 'd': try: result = n1 / n2 except ZeroDivisionError: clearOut(btn) app.errorBox('DivisionByZero', "You can't divide by Zero.") app.clearEntry('Number') app.setLabel('Result', result) def clearOut(btn): global result, n1, n2, isFirst, calc n1, n2 = 0.0, 0.0 result = 0.0 isFirst = True calc = '' def inputNumber(): global n1, n2, isFirst if isFirst: n1 = app.getEntry('Number') isFirst = False else: n2 = app.getEntry('Number') isFirst = True app.setStretch('column') app.setSticky('') app.setResizable(True) app.addNumericEntry('Number') app.setEntryDefault('Number', 'Enter Number') app.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath) app.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut]) app.setButton('clearOut', 'C') app.addEmptyLabel('Result') app.go() <|reserved_special_token_1|> from appJar import gui app = gui("Calculator", "560x240") ### FUNCTIONS ### n1, n2 = 0.0, 0.0 result = 0.0 isFirst = True calc = "" def doMath(btn): global result, n1, n2, isFirst, calc inputNumber() if(btn == "Add"): calc = "a" if(btn == "Substract"): calc = "s" if(btn == "Multiply"): calc = "m" if(btn == "Divide"): calc = "d" app.clearEntry("Number") def calculate(btn): global result, n1, n2, isFirst, calc inputNumber() if(calc == 'a'): result = n1 + n2 if(calc == 's'): result = n1 - n2 if(calc == 'm'): result = n1 * n2 if(calc == 'd'): try: result = n1 / n2 except ZeroDivisionError: clearOut(btn) app.errorBox("DivisionByZero", "You can't divide by Zero.") app.clearEntry("Number") app.setLabel("Result", result) def clearOut(btn): global result, n1, n2, isFirst, calc n1, n2 = 0.0, 0.0 result = 0.0 isFirst = True calc = "" def inputNumber(): global n1, n2, isFirst if(isFirst): n1 = app.getEntry("Number") isFirst = False else: n2 = app.getEntry("Number") isFirst = True ### FUNCTIONS ### app.setStretch("column") app.setSticky("") app.setResizable(True) app.addNumericEntry("Number") app.setEntryDefault("Number", "Enter Number") app.addButtons(["Add", "Substract", "Multiply", "Divide"], doMath) app.addButtons(["Calculate!", "clearOut"], [calculate, clearOut]) app.setButton("clearOut", "C") app.addEmptyLabel("Result") app.go()
flexible
{ "blob_id": "084299da1c2f41de96e60d37088466c7b61de38e", "index": 9750, "step-1": "<mask token>\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\ndef inputNumber():\n global n1, n2, isFirst\n if isFirst:\n n1 = app.getEntry('Number')\n isFirst = False\n else:\n n2 = app.getEntry('Number')\n isFirst = True\n\n\napp.setStretch('column')\napp.setSticky('')\napp.setResizable(True)\napp.addNumericEntry('Number')\napp.setEntryDefault('Number', 'Enter Number')\napp.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath)\napp.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut])\napp.setButton('clearOut', 'C')\napp.addEmptyLabel('Result')\napp.go()\n", "step-3": "<mask token>\napp = gui('Calculator', '560x240')\nn1, n2 = 0.0, 0.0\nresult = 0.0\nisFirst = True\ncalc = ''\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\ndef inputNumber():\n global n1, n2, isFirst\n if isFirst:\n n1 = app.getEntry('Number')\n isFirst = False\n else:\n n2 = app.getEntry('Number')\n isFirst = True\n\n\napp.setStretch('column')\napp.setSticky('')\napp.setResizable(True)\napp.addNumericEntry('Number')\napp.setEntryDefault('Number', 'Enter Number')\napp.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath)\napp.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut])\napp.setButton('clearOut', 'C')\napp.addEmptyLabel('Result')\napp.go()\n", "step-4": "from appJar import gui\napp = gui('Calculator', '560x240')\nn1, n2 = 0.0, 0.0\nresult = 0.0\nisFirst = True\ncalc = ''\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\ndef inputNumber():\n global n1, n2, isFirst\n if isFirst:\n n1 = app.getEntry('Number')\n isFirst = False\n else:\n n2 = app.getEntry('Number')\n isFirst = True\n\n\napp.setStretch('column')\napp.setSticky('')\napp.setResizable(True)\napp.addNumericEntry('Number')\napp.setEntryDefault('Number', 'Enter Number')\napp.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath)\napp.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut])\napp.setButton('clearOut', 'C')\napp.addEmptyLabel('Result')\napp.go()\n", "step-5": "from appJar import gui\n\napp = gui(\"Calculator\", \"560x240\")\n\n### FUNCTIONS ###\n\nn1, n2 = 0.0, 0.0\nresult = 0.0\nisFirst = True\ncalc = \"\"\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n\n inputNumber()\n\n if(btn == \"Add\"): calc = \"a\"\n if(btn == \"Substract\"): calc = \"s\"\n if(btn == \"Multiply\"): calc = \"m\"\n if(btn == \"Divide\"): calc = \"d\"\n\n app.clearEntry(\"Number\")\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n\n inputNumber()\n\n if(calc == 'a'): result = n1 + n2\n if(calc == 's'): result = n1 - n2\n if(calc == 'm'): result = n1 * n2\n if(calc == 'd'):\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox(\"DivisionByZero\", \"You can't divide by Zero.\")\n\n app.clearEntry(\"Number\")\n app.setLabel(\"Result\", result)\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = \"\"\n\ndef inputNumber():\n global n1, n2, isFirst\n\n if(isFirst):\n n1 = app.getEntry(\"Number\")\n isFirst = False\n else:\n n2 = app.getEntry(\"Number\")\n isFirst = True\n\n\n### FUNCTIONS ###\n\napp.setStretch(\"column\")\napp.setSticky(\"\")\napp.setResizable(True)\napp.addNumericEntry(\"Number\")\napp.setEntryDefault(\"Number\", \"Enter Number\")\n\napp.addButtons([\"Add\", \"Substract\", \"Multiply\", \"Divide\"], doMath)\napp.addButtons([\"Calculate!\", \"clearOut\"], [calculate, clearOut])\napp.setButton(\"clearOut\", \"C\")\n\napp.addEmptyLabel(\"Result\")\n\napp.go()\n", "step-ids": [ 3, 5, 6, 7, 8 ] }
[ 3, 5, 6, 7, 8 ]
<|reserved_special_token_0|> class CycleGANVC2LossCalculator: def __init__(self): pass <|reserved_special_token_0|> @staticmethod def gen_loss(discriminator, y): y_dis = discriminator(y) return F.mean(F.softplus(-y_dis)) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class CycleGANVC2LossCalculator: def __init__(self): pass @staticmethod def dis_loss(discriminator, y, t): y_dis = discriminator(y) t_dis = discriminator(t) return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis)) @staticmethod def gen_loss(discriminator, y): y_dis = discriminator(y) return F.mean(F.softplus(-y_dis)) @staticmethod def cycle_loss(y, t): return 10.0 * F.mean_absolute_error(y, t) @staticmethod def identity_loss(y, t): return 5.0 * F.mean_absolute_error(y, t) def train(epochs, iterations, batchsize, modeldir, extension, time_width, mel_bins, sampling_rate, g_learning_rate, d_learning_rate, beta1, beta2, identity_epoch, second_step, src_path, tgt_path): dataset = DatasetLoader(src_path, tgt_path, extension, time_width, mel_bins, sampling_rate) print(dataset) generator_xy = Generator() generator_xy.to_gpu() gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2) generator_yx = Generator() generator_yx.to_gpu() gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2) discriminator_y = Discriminator() discriminator_y.to_gpu() dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2) discriminator_x = Discriminator() discriminator_x.to_gpu() dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2) discriminator_xyx = Discriminator() discriminator_xyx.to_gpu() dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1, beta2) discriminator_yxy = Discriminator() discriminator_yxy.to_gpu() dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1, beta2) lossfunc = CycleGANVC2LossCalculator() for epoch in range(epochs): sum_dis_loss = 0 sum_gen_loss = 0 for batch in range(0, iterations, batchsize): x, y = dataset.train(batchsize) xy = generator_xy(x) xyx = generator_yx(xy) yx = generator_yx(y) yxy = generator_xy(yx) xy.unchain_backward() xyx.unchain_backward() yx.unchain_backward() yxy.unchain_backward() dis_loss = lossfunc.dis_loss(discriminator_y, xy, y) dis_loss += lossfunc.dis_loss(discriminator_x, yx, x) if second_step: dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x) dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y) discriminator_xyx.cleargrads() discriminator_yxy.cleargrads() discriminator_x.cleargrads() discriminator_y.cleargrads() dis_loss.backward() dis_x_opt.update() dis_y_opt.update() if second_step: dis_xyx_opt.update() dis_yxy_opt.update() dis_loss.unchain_backward() xy = generator_xy(x) xyx = generator_yx(xy) id_y = generator_xy(y) yx = generator_yx(y) yxy = generator_xy(yx) id_x = generator_yx(x) gen_loss = lossfunc.gen_loss(discriminator_y, xy) gen_loss += lossfunc.gen_loss(discriminator_x, yx) if second_step: gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy) gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx) gen_loss += lossfunc.cycle_loss(x, xyx) gen_loss += lossfunc.cycle_loss(y, xyx) if epoch < identity_epoch: gen_loss += lossfunc.identity_loss(id_y, y) gen_loss += lossfunc.identity_loss(id_x, x) generator_xy.cleargrads() generator_yx.cleargrads() gen_loss.backward() gen_xy_opt.update() gen_yx_opt.update() gen_loss.unchain_backward() sum_dis_loss += dis_loss.data sum_gen_loss += gen_loss.data if batch == 0: serializers.save_npz(f'{modeldir}/generator_xy_{epoch}.model', generator_xy) serializers.save_npz(f'{modeldir}/generator_yx_{epoch}.model', generator_yx) print('epoch : {}'.format(epoch)) print('Generator loss : {}'.format(sum_gen_loss / iterations)) print('Discriminator loss : {}'.format(sum_dis_loss / iterations)) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> cuda.get_device(0).use() class CycleGANVC2LossCalculator: def __init__(self): pass @staticmethod def dis_loss(discriminator, y, t): y_dis = discriminator(y) t_dis = discriminator(t) return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis)) @staticmethod def gen_loss(discriminator, y): y_dis = discriminator(y) return F.mean(F.softplus(-y_dis)) @staticmethod def cycle_loss(y, t): return 10.0 * F.mean_absolute_error(y, t) @staticmethod def identity_loss(y, t): return 5.0 * F.mean_absolute_error(y, t) def train(epochs, iterations, batchsize, modeldir, extension, time_width, mel_bins, sampling_rate, g_learning_rate, d_learning_rate, beta1, beta2, identity_epoch, second_step, src_path, tgt_path): dataset = DatasetLoader(src_path, tgt_path, extension, time_width, mel_bins, sampling_rate) print(dataset) generator_xy = Generator() generator_xy.to_gpu() gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2) generator_yx = Generator() generator_yx.to_gpu() gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2) discriminator_y = Discriminator() discriminator_y.to_gpu() dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2) discriminator_x = Discriminator() discriminator_x.to_gpu() dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2) discriminator_xyx = Discriminator() discriminator_xyx.to_gpu() dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1, beta2) discriminator_yxy = Discriminator() discriminator_yxy.to_gpu() dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1, beta2) lossfunc = CycleGANVC2LossCalculator() for epoch in range(epochs): sum_dis_loss = 0 sum_gen_loss = 0 for batch in range(0, iterations, batchsize): x, y = dataset.train(batchsize) xy = generator_xy(x) xyx = generator_yx(xy) yx = generator_yx(y) yxy = generator_xy(yx) xy.unchain_backward() xyx.unchain_backward() yx.unchain_backward() yxy.unchain_backward() dis_loss = lossfunc.dis_loss(discriminator_y, xy, y) dis_loss += lossfunc.dis_loss(discriminator_x, yx, x) if second_step: dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x) dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y) discriminator_xyx.cleargrads() discriminator_yxy.cleargrads() discriminator_x.cleargrads() discriminator_y.cleargrads() dis_loss.backward() dis_x_opt.update() dis_y_opt.update() if second_step: dis_xyx_opt.update() dis_yxy_opt.update() dis_loss.unchain_backward() xy = generator_xy(x) xyx = generator_yx(xy) id_y = generator_xy(y) yx = generator_yx(y) yxy = generator_xy(yx) id_x = generator_yx(x) gen_loss = lossfunc.gen_loss(discriminator_y, xy) gen_loss += lossfunc.gen_loss(discriminator_x, yx) if second_step: gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy) gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx) gen_loss += lossfunc.cycle_loss(x, xyx) gen_loss += lossfunc.cycle_loss(y, xyx) if epoch < identity_epoch: gen_loss += lossfunc.identity_loss(id_y, y) gen_loss += lossfunc.identity_loss(id_x, x) generator_xy.cleargrads() generator_yx.cleargrads() gen_loss.backward() gen_xy_opt.update() gen_yx_opt.update() gen_loss.unchain_backward() sum_dis_loss += dis_loss.data sum_gen_loss += gen_loss.data if batch == 0: serializers.save_npz(f'{modeldir}/generator_xy_{epoch}.model', generator_xy) serializers.save_npz(f'{modeldir}/generator_yx_{epoch}.model', generator_yx) print('epoch : {}'.format(epoch)) print('Generator loss : {}'.format(sum_gen_loss / iterations)) print('Discriminator loss : {}'.format(sum_dis_loss / iterations)) if __name__ == '__main__': parser = argparse.ArgumentParser(description='StarGANVC2') parser.add_argument('--e', type=int, default=50, help= 'the number of epochs') parser.add_argument('--i', type=int, default=1000, help= 'the number of iterations') parser.add_argument('--b', type=int, default=16, help='batch size') parser.add_argument('--modeldir', type=Path, default='modeldir', help= 'model output directory') parser.add_argument('--ext', type=str, default='.npy', help= 'extension of training data') parser.add_argument('--tw', type=int, default=128, help= 'time width of spectral envelope') parser.add_argument('--mb', type=int, default=36, help= 'mel bins of spectral envelope') parser.add_argument('--sr', type=int, default=22050, help= 'sampling rate of audio data') parser.add_argument('--glr', type=float, default=0.0002, help= 'learning rate of Adam on generator') parser.add_argument('--dlr', type=float, default=0.0001, help= 'learning rate of Adam on discriminator') parser.add_argument('--b1', type=float, default=0.5, help='beta1 of Adam') parser.add_argument('--b2', type=float, default=0.999, help='beta2 of Adam' ) parser.add_argument('--ie', type=int, default=20, help= 'time spans enabling identity mapping loss') parser.add_argument('--second', action='store_true', help= 'enabling second step of adversaria loss') parser.add_argument('--src', type=Path, help= 'path which includes source data') parser.add_argument('--tgt', type=Path, help= 'path which includes target data') args = parser.parse_args() modeldir = args.modeldir modeldir.mkdir(exist_ok=True) train(args.e, args.i, args.b, modeldir, args.ext, args.tw, args.mb, args.sr, args.glr, args.dlr, args.b1, args.b2, args.ie, args.second, args.src, args.tgt) <|reserved_special_token_1|> <|reserved_special_token_0|> xp = cuda.cupy cuda.get_device(0).use() class CycleGANVC2LossCalculator: def __init__(self): pass @staticmethod def dis_loss(discriminator, y, t): y_dis = discriminator(y) t_dis = discriminator(t) return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis)) @staticmethod def gen_loss(discriminator, y): y_dis = discriminator(y) return F.mean(F.softplus(-y_dis)) @staticmethod def cycle_loss(y, t): return 10.0 * F.mean_absolute_error(y, t) @staticmethod def identity_loss(y, t): return 5.0 * F.mean_absolute_error(y, t) def train(epochs, iterations, batchsize, modeldir, extension, time_width, mel_bins, sampling_rate, g_learning_rate, d_learning_rate, beta1, beta2, identity_epoch, second_step, src_path, tgt_path): dataset = DatasetLoader(src_path, tgt_path, extension, time_width, mel_bins, sampling_rate) print(dataset) generator_xy = Generator() generator_xy.to_gpu() gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2) generator_yx = Generator() generator_yx.to_gpu() gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2) discriminator_y = Discriminator() discriminator_y.to_gpu() dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2) discriminator_x = Discriminator() discriminator_x.to_gpu() dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2) discriminator_xyx = Discriminator() discriminator_xyx.to_gpu() dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1, beta2) discriminator_yxy = Discriminator() discriminator_yxy.to_gpu() dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1, beta2) lossfunc = CycleGANVC2LossCalculator() for epoch in range(epochs): sum_dis_loss = 0 sum_gen_loss = 0 for batch in range(0, iterations, batchsize): x, y = dataset.train(batchsize) xy = generator_xy(x) xyx = generator_yx(xy) yx = generator_yx(y) yxy = generator_xy(yx) xy.unchain_backward() xyx.unchain_backward() yx.unchain_backward() yxy.unchain_backward() dis_loss = lossfunc.dis_loss(discriminator_y, xy, y) dis_loss += lossfunc.dis_loss(discriminator_x, yx, x) if second_step: dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x) dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y) discriminator_xyx.cleargrads() discriminator_yxy.cleargrads() discriminator_x.cleargrads() discriminator_y.cleargrads() dis_loss.backward() dis_x_opt.update() dis_y_opt.update() if second_step: dis_xyx_opt.update() dis_yxy_opt.update() dis_loss.unchain_backward() xy = generator_xy(x) xyx = generator_yx(xy) id_y = generator_xy(y) yx = generator_yx(y) yxy = generator_xy(yx) id_x = generator_yx(x) gen_loss = lossfunc.gen_loss(discriminator_y, xy) gen_loss += lossfunc.gen_loss(discriminator_x, yx) if second_step: gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy) gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx) gen_loss += lossfunc.cycle_loss(x, xyx) gen_loss += lossfunc.cycle_loss(y, xyx) if epoch < identity_epoch: gen_loss += lossfunc.identity_loss(id_y, y) gen_loss += lossfunc.identity_loss(id_x, x) generator_xy.cleargrads() generator_yx.cleargrads() gen_loss.backward() gen_xy_opt.update() gen_yx_opt.update() gen_loss.unchain_backward() sum_dis_loss += dis_loss.data sum_gen_loss += gen_loss.data if batch == 0: serializers.save_npz(f'{modeldir}/generator_xy_{epoch}.model', generator_xy) serializers.save_npz(f'{modeldir}/generator_yx_{epoch}.model', generator_yx) print('epoch : {}'.format(epoch)) print('Generator loss : {}'.format(sum_gen_loss / iterations)) print('Discriminator loss : {}'.format(sum_dis_loss / iterations)) if __name__ == '__main__': parser = argparse.ArgumentParser(description='StarGANVC2') parser.add_argument('--e', type=int, default=50, help= 'the number of epochs') parser.add_argument('--i', type=int, default=1000, help= 'the number of iterations') parser.add_argument('--b', type=int, default=16, help='batch size') parser.add_argument('--modeldir', type=Path, default='modeldir', help= 'model output directory') parser.add_argument('--ext', type=str, default='.npy', help= 'extension of training data') parser.add_argument('--tw', type=int, default=128, help= 'time width of spectral envelope') parser.add_argument('--mb', type=int, default=36, help= 'mel bins of spectral envelope') parser.add_argument('--sr', type=int, default=22050, help= 'sampling rate of audio data') parser.add_argument('--glr', type=float, default=0.0002, help= 'learning rate of Adam on generator') parser.add_argument('--dlr', type=float, default=0.0001, help= 'learning rate of Adam on discriminator') parser.add_argument('--b1', type=float, default=0.5, help='beta1 of Adam') parser.add_argument('--b2', type=float, default=0.999, help='beta2 of Adam' ) parser.add_argument('--ie', type=int, default=20, help= 'time spans enabling identity mapping loss') parser.add_argument('--second', action='store_true', help= 'enabling second step of adversaria loss') parser.add_argument('--src', type=Path, help= 'path which includes source data') parser.add_argument('--tgt', type=Path, help= 'path which includes target data') args = parser.parse_args() modeldir = args.modeldir modeldir.mkdir(exist_ok=True) train(args.e, args.i, args.b, modeldir, args.ext, args.tw, args.mb, args.sr, args.glr, args.dlr, args.b1, args.b2, args.ie, args.second, args.src, args.tgt) <|reserved_special_token_1|> import chainer import chainer.functions as F import numpy as np import argparse from model import Generator, Discriminator from chainer import cuda, serializers from pathlib import Path from utils import set_optimizer from dataset import DatasetLoader xp = cuda.cupy cuda.get_device(0).use() class CycleGANVC2LossCalculator: def __init__(self): pass @staticmethod def dis_loss(discriminator, y, t): y_dis = discriminator(y) t_dis = discriminator(t) return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis)) @staticmethod def gen_loss(discriminator, y): y_dis = discriminator(y) return F.mean(F.softplus(-y_dis)) @staticmethod def cycle_loss(y, t): return 10.0 * F.mean_absolute_error(y, t) @staticmethod def identity_loss(y, t): return 5.0 * F.mean_absolute_error(y, t) def train(epochs, iterations, batchsize, modeldir, extension, time_width, mel_bins, sampling_rate, g_learning_rate, d_learning_rate, beta1, beta2, identity_epoch, second_step, src_path, tgt_path): # Dataset definiton dataset = DatasetLoader(src_path, tgt_path, extension, time_width, mel_bins, sampling_rate) print(dataset) # Model & Optimizer definition generator_xy = Generator() generator_xy.to_gpu() gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2) generator_yx = Generator() generator_yx.to_gpu() gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2) discriminator_y = Discriminator() discriminator_y.to_gpu() dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2) discriminator_x = Discriminator() discriminator_x.to_gpu() dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2) discriminator_xyx = Discriminator() discriminator_xyx.to_gpu() dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1, beta2) discriminator_yxy = Discriminator() discriminator_yxy.to_gpu() dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1, beta2) # Loss function definition lossfunc = CycleGANVC2LossCalculator() for epoch in range(epochs): sum_dis_loss = 0 sum_gen_loss = 0 for batch in range(0, iterations, batchsize): x, y = dataset.train(batchsize) xy = generator_xy(x) xyx = generator_yx(xy) yx = generator_yx(y) yxy = generator_xy(yx) xy.unchain_backward() xyx.unchain_backward() yx.unchain_backward() yxy.unchain_backward() dis_loss = lossfunc.dis_loss(discriminator_y, xy, y) dis_loss += lossfunc.dis_loss(discriminator_x, yx, x) if second_step: dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x) dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y) discriminator_xyx.cleargrads() discriminator_yxy.cleargrads() discriminator_x.cleargrads() discriminator_y.cleargrads() dis_loss.backward() dis_x_opt.update() dis_y_opt.update() if second_step: dis_xyx_opt.update() dis_yxy_opt.update() dis_loss.unchain_backward() xy = generator_xy(x) xyx = generator_yx(xy) id_y = generator_xy(y) yx = generator_yx(y) yxy = generator_xy(yx) id_x = generator_yx(x) gen_loss = lossfunc.gen_loss(discriminator_y, xy) gen_loss += lossfunc.gen_loss(discriminator_x, yx) if second_step: gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy) gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx) gen_loss += lossfunc.cycle_loss(x, xyx) gen_loss += lossfunc.cycle_loss(y, xyx) if epoch < identity_epoch: gen_loss += lossfunc.identity_loss(id_y, y) gen_loss += lossfunc.identity_loss(id_x, x) generator_xy.cleargrads() generator_yx.cleargrads() gen_loss.backward() gen_xy_opt.update() gen_yx_opt.update() gen_loss.unchain_backward() sum_dis_loss += dis_loss.data sum_gen_loss += gen_loss.data if batch == 0: serializers.save_npz(f"{modeldir}/generator_xy_{epoch}.model", generator_xy) serializers.save_npz(f"{modeldir}/generator_yx_{epoch}.model", generator_yx) print('epoch : {}'.format(epoch)) print('Generator loss : {}'.format(sum_gen_loss / iterations)) print('Discriminator loss : {}'.format(sum_dis_loss / iterations)) if __name__ == "__main__": parser = argparse.ArgumentParser(description="StarGANVC2") parser.add_argument('--e', type=int, default=50, help="the number of epochs") parser.add_argument('--i', type=int, default=1000, help="the number of iterations") parser.add_argument('--b', type=int, default=16, help="batch size") parser.add_argument('--modeldir', type=Path, default="modeldir", help="model output directory") parser.add_argument('--ext', type=str, default=".npy", help="extension of training data") parser.add_argument('--tw', type=int, default=128, help="time width of spectral envelope") parser.add_argument('--mb', type=int, default=36, help="mel bins of spectral envelope") parser.add_argument('--sr', type=int, default=22050, help="sampling rate of audio data") parser.add_argument('--glr', type=float, default=0.0002, help="learning rate of Adam on generator") parser.add_argument('--dlr', type=float, default=0.0001, help="learning rate of Adam on discriminator") parser.add_argument('--b1', type=float, default=0.5, help="beta1 of Adam") parser.add_argument('--b2', type=float, default=0.999, help="beta2 of Adam") parser.add_argument('--ie', type=int, default=20, help="time spans enabling identity mapping loss") parser.add_argument('--second', action="store_true", help="enabling second step of adversaria loss") parser.add_argument('--src', type=Path, help="path which includes source data") parser.add_argument('--tgt', type=Path, help="path which includes target data") args = parser.parse_args() modeldir = args.modeldir modeldir.mkdir(exist_ok=True) train(args.e, args.i, args.b, modeldir, args.ext, args.tw, args.mb, args.sr, args.glr, args.dlr, args.b1, args.b2, args.ie, args.second, args.src, args.tgt)
flexible
{ "blob_id": "32105a245f6945dbe8749140d811b20d634289bc", "index": 2481, "step-1": "<mask token>\n\n\nclass CycleGANVC2LossCalculator:\n\n def __init__(self):\n pass\n <mask token>\n\n @staticmethod\n def gen_loss(discriminator, y):\n y_dis = discriminator(y)\n return F.mean(F.softplus(-y_dis))\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass CycleGANVC2LossCalculator:\n\n def __init__(self):\n pass\n\n @staticmethod\n def dis_loss(discriminator, y, t):\n y_dis = discriminator(y)\n t_dis = discriminator(t)\n return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis))\n\n @staticmethod\n def gen_loss(discriminator, y):\n y_dis = discriminator(y)\n return F.mean(F.softplus(-y_dis))\n\n @staticmethod\n def cycle_loss(y, t):\n return 10.0 * F.mean_absolute_error(y, t)\n\n @staticmethod\n def identity_loss(y, t):\n return 5.0 * F.mean_absolute_error(y, t)\n\n\ndef train(epochs, iterations, batchsize, modeldir, extension, time_width,\n mel_bins, sampling_rate, g_learning_rate, d_learning_rate, beta1, beta2,\n identity_epoch, second_step, src_path, tgt_path):\n dataset = DatasetLoader(src_path, tgt_path, extension, time_width,\n mel_bins, sampling_rate)\n print(dataset)\n generator_xy = Generator()\n generator_xy.to_gpu()\n gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2)\n generator_yx = Generator()\n generator_yx.to_gpu()\n gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2)\n discriminator_y = Discriminator()\n discriminator_y.to_gpu()\n dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2)\n discriminator_x = Discriminator()\n discriminator_x.to_gpu()\n dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2)\n discriminator_xyx = Discriminator()\n discriminator_xyx.to_gpu()\n dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1,\n beta2)\n discriminator_yxy = Discriminator()\n discriminator_yxy.to_gpu()\n dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1,\n beta2)\n lossfunc = CycleGANVC2LossCalculator()\n for epoch in range(epochs):\n sum_dis_loss = 0\n sum_gen_loss = 0\n for batch in range(0, iterations, batchsize):\n x, y = dataset.train(batchsize)\n xy = generator_xy(x)\n xyx = generator_yx(xy)\n yx = generator_yx(y)\n yxy = generator_xy(yx)\n xy.unchain_backward()\n xyx.unchain_backward()\n yx.unchain_backward()\n yxy.unchain_backward()\n dis_loss = lossfunc.dis_loss(discriminator_y, xy, y)\n dis_loss += lossfunc.dis_loss(discriminator_x, yx, x)\n if second_step:\n dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x)\n dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y)\n discriminator_xyx.cleargrads()\n discriminator_yxy.cleargrads()\n discriminator_x.cleargrads()\n discriminator_y.cleargrads()\n dis_loss.backward()\n dis_x_opt.update()\n dis_y_opt.update()\n if second_step:\n dis_xyx_opt.update()\n dis_yxy_opt.update()\n dis_loss.unchain_backward()\n xy = generator_xy(x)\n xyx = generator_yx(xy)\n id_y = generator_xy(y)\n yx = generator_yx(y)\n yxy = generator_xy(yx)\n id_x = generator_yx(x)\n gen_loss = lossfunc.gen_loss(discriminator_y, xy)\n gen_loss += lossfunc.gen_loss(discriminator_x, yx)\n if second_step:\n gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy)\n gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx)\n gen_loss += lossfunc.cycle_loss(x, xyx)\n gen_loss += lossfunc.cycle_loss(y, xyx)\n if epoch < identity_epoch:\n gen_loss += lossfunc.identity_loss(id_y, y)\n gen_loss += lossfunc.identity_loss(id_x, x)\n generator_xy.cleargrads()\n generator_yx.cleargrads()\n gen_loss.backward()\n gen_xy_opt.update()\n gen_yx_opt.update()\n gen_loss.unchain_backward()\n sum_dis_loss += dis_loss.data\n sum_gen_loss += gen_loss.data\n if batch == 0:\n serializers.save_npz(f'{modeldir}/generator_xy_{epoch}.model',\n generator_xy)\n serializers.save_npz(f'{modeldir}/generator_yx_{epoch}.model',\n generator_yx)\n print('epoch : {}'.format(epoch))\n print('Generator loss : {}'.format(sum_gen_loss / iterations))\n print('Discriminator loss : {}'.format(sum_dis_loss / iterations))\n\n\n<mask token>\n", "step-3": "<mask token>\ncuda.get_device(0).use()\n\n\nclass CycleGANVC2LossCalculator:\n\n def __init__(self):\n pass\n\n @staticmethod\n def dis_loss(discriminator, y, t):\n y_dis = discriminator(y)\n t_dis = discriminator(t)\n return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis))\n\n @staticmethod\n def gen_loss(discriminator, y):\n y_dis = discriminator(y)\n return F.mean(F.softplus(-y_dis))\n\n @staticmethod\n def cycle_loss(y, t):\n return 10.0 * F.mean_absolute_error(y, t)\n\n @staticmethod\n def identity_loss(y, t):\n return 5.0 * F.mean_absolute_error(y, t)\n\n\ndef train(epochs, iterations, batchsize, modeldir, extension, time_width,\n mel_bins, sampling_rate, g_learning_rate, d_learning_rate, beta1, beta2,\n identity_epoch, second_step, src_path, tgt_path):\n dataset = DatasetLoader(src_path, tgt_path, extension, time_width,\n mel_bins, sampling_rate)\n print(dataset)\n generator_xy = Generator()\n generator_xy.to_gpu()\n gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2)\n generator_yx = Generator()\n generator_yx.to_gpu()\n gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2)\n discriminator_y = Discriminator()\n discriminator_y.to_gpu()\n dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2)\n discriminator_x = Discriminator()\n discriminator_x.to_gpu()\n dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2)\n discriminator_xyx = Discriminator()\n discriminator_xyx.to_gpu()\n dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1,\n beta2)\n discriminator_yxy = Discriminator()\n discriminator_yxy.to_gpu()\n dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1,\n beta2)\n lossfunc = CycleGANVC2LossCalculator()\n for epoch in range(epochs):\n sum_dis_loss = 0\n sum_gen_loss = 0\n for batch in range(0, iterations, batchsize):\n x, y = dataset.train(batchsize)\n xy = generator_xy(x)\n xyx = generator_yx(xy)\n yx = generator_yx(y)\n yxy = generator_xy(yx)\n xy.unchain_backward()\n xyx.unchain_backward()\n yx.unchain_backward()\n yxy.unchain_backward()\n dis_loss = lossfunc.dis_loss(discriminator_y, xy, y)\n dis_loss += lossfunc.dis_loss(discriminator_x, yx, x)\n if second_step:\n dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x)\n dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y)\n discriminator_xyx.cleargrads()\n discriminator_yxy.cleargrads()\n discriminator_x.cleargrads()\n discriminator_y.cleargrads()\n dis_loss.backward()\n dis_x_opt.update()\n dis_y_opt.update()\n if second_step:\n dis_xyx_opt.update()\n dis_yxy_opt.update()\n dis_loss.unchain_backward()\n xy = generator_xy(x)\n xyx = generator_yx(xy)\n id_y = generator_xy(y)\n yx = generator_yx(y)\n yxy = generator_xy(yx)\n id_x = generator_yx(x)\n gen_loss = lossfunc.gen_loss(discriminator_y, xy)\n gen_loss += lossfunc.gen_loss(discriminator_x, yx)\n if second_step:\n gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy)\n gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx)\n gen_loss += lossfunc.cycle_loss(x, xyx)\n gen_loss += lossfunc.cycle_loss(y, xyx)\n if epoch < identity_epoch:\n gen_loss += lossfunc.identity_loss(id_y, y)\n gen_loss += lossfunc.identity_loss(id_x, x)\n generator_xy.cleargrads()\n generator_yx.cleargrads()\n gen_loss.backward()\n gen_xy_opt.update()\n gen_yx_opt.update()\n gen_loss.unchain_backward()\n sum_dis_loss += dis_loss.data\n sum_gen_loss += gen_loss.data\n if batch == 0:\n serializers.save_npz(f'{modeldir}/generator_xy_{epoch}.model',\n generator_xy)\n serializers.save_npz(f'{modeldir}/generator_yx_{epoch}.model',\n generator_yx)\n print('epoch : {}'.format(epoch))\n print('Generator loss : {}'.format(sum_gen_loss / iterations))\n print('Discriminator loss : {}'.format(sum_dis_loss / iterations))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='StarGANVC2')\n parser.add_argument('--e', type=int, default=50, help=\n 'the number of epochs')\n parser.add_argument('--i', type=int, default=1000, help=\n 'the number of iterations')\n parser.add_argument('--b', type=int, default=16, help='batch size')\n parser.add_argument('--modeldir', type=Path, default='modeldir', help=\n 'model output directory')\n parser.add_argument('--ext', type=str, default='.npy', help=\n 'extension of training data')\n parser.add_argument('--tw', type=int, default=128, help=\n 'time width of spectral envelope')\n parser.add_argument('--mb', type=int, default=36, help=\n 'mel bins of spectral envelope')\n parser.add_argument('--sr', type=int, default=22050, help=\n 'sampling rate of audio data')\n parser.add_argument('--glr', type=float, default=0.0002, help=\n 'learning rate of Adam on generator')\n parser.add_argument('--dlr', type=float, default=0.0001, help=\n 'learning rate of Adam on discriminator')\n parser.add_argument('--b1', type=float, default=0.5, help='beta1 of Adam')\n parser.add_argument('--b2', type=float, default=0.999, help='beta2 of Adam'\n )\n parser.add_argument('--ie', type=int, default=20, help=\n 'time spans enabling identity mapping loss')\n parser.add_argument('--second', action='store_true', help=\n 'enabling second step of adversaria loss')\n parser.add_argument('--src', type=Path, help=\n 'path which includes source data')\n parser.add_argument('--tgt', type=Path, help=\n 'path which includes target data')\n args = parser.parse_args()\n modeldir = args.modeldir\n modeldir.mkdir(exist_ok=True)\n train(args.e, args.i, args.b, modeldir, args.ext, args.tw, args.mb,\n args.sr, args.glr, args.dlr, args.b1, args.b2, args.ie, args.second,\n args.src, args.tgt)\n", "step-4": "<mask token>\nxp = cuda.cupy\ncuda.get_device(0).use()\n\n\nclass CycleGANVC2LossCalculator:\n\n def __init__(self):\n pass\n\n @staticmethod\n def dis_loss(discriminator, y, t):\n y_dis = discriminator(y)\n t_dis = discriminator(t)\n return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis))\n\n @staticmethod\n def gen_loss(discriminator, y):\n y_dis = discriminator(y)\n return F.mean(F.softplus(-y_dis))\n\n @staticmethod\n def cycle_loss(y, t):\n return 10.0 * F.mean_absolute_error(y, t)\n\n @staticmethod\n def identity_loss(y, t):\n return 5.0 * F.mean_absolute_error(y, t)\n\n\ndef train(epochs, iterations, batchsize, modeldir, extension, time_width,\n mel_bins, sampling_rate, g_learning_rate, d_learning_rate, beta1, beta2,\n identity_epoch, second_step, src_path, tgt_path):\n dataset = DatasetLoader(src_path, tgt_path, extension, time_width,\n mel_bins, sampling_rate)\n print(dataset)\n generator_xy = Generator()\n generator_xy.to_gpu()\n gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2)\n generator_yx = Generator()\n generator_yx.to_gpu()\n gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2)\n discriminator_y = Discriminator()\n discriminator_y.to_gpu()\n dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2)\n discriminator_x = Discriminator()\n discriminator_x.to_gpu()\n dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2)\n discriminator_xyx = Discriminator()\n discriminator_xyx.to_gpu()\n dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1,\n beta2)\n discriminator_yxy = Discriminator()\n discriminator_yxy.to_gpu()\n dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1,\n beta2)\n lossfunc = CycleGANVC2LossCalculator()\n for epoch in range(epochs):\n sum_dis_loss = 0\n sum_gen_loss = 0\n for batch in range(0, iterations, batchsize):\n x, y = dataset.train(batchsize)\n xy = generator_xy(x)\n xyx = generator_yx(xy)\n yx = generator_yx(y)\n yxy = generator_xy(yx)\n xy.unchain_backward()\n xyx.unchain_backward()\n yx.unchain_backward()\n yxy.unchain_backward()\n dis_loss = lossfunc.dis_loss(discriminator_y, xy, y)\n dis_loss += lossfunc.dis_loss(discriminator_x, yx, x)\n if second_step:\n dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x)\n dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y)\n discriminator_xyx.cleargrads()\n discriminator_yxy.cleargrads()\n discriminator_x.cleargrads()\n discriminator_y.cleargrads()\n dis_loss.backward()\n dis_x_opt.update()\n dis_y_opt.update()\n if second_step:\n dis_xyx_opt.update()\n dis_yxy_opt.update()\n dis_loss.unchain_backward()\n xy = generator_xy(x)\n xyx = generator_yx(xy)\n id_y = generator_xy(y)\n yx = generator_yx(y)\n yxy = generator_xy(yx)\n id_x = generator_yx(x)\n gen_loss = lossfunc.gen_loss(discriminator_y, xy)\n gen_loss += lossfunc.gen_loss(discriminator_x, yx)\n if second_step:\n gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy)\n gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx)\n gen_loss += lossfunc.cycle_loss(x, xyx)\n gen_loss += lossfunc.cycle_loss(y, xyx)\n if epoch < identity_epoch:\n gen_loss += lossfunc.identity_loss(id_y, y)\n gen_loss += lossfunc.identity_loss(id_x, x)\n generator_xy.cleargrads()\n generator_yx.cleargrads()\n gen_loss.backward()\n gen_xy_opt.update()\n gen_yx_opt.update()\n gen_loss.unchain_backward()\n sum_dis_loss += dis_loss.data\n sum_gen_loss += gen_loss.data\n if batch == 0:\n serializers.save_npz(f'{modeldir}/generator_xy_{epoch}.model',\n generator_xy)\n serializers.save_npz(f'{modeldir}/generator_yx_{epoch}.model',\n generator_yx)\n print('epoch : {}'.format(epoch))\n print('Generator loss : {}'.format(sum_gen_loss / iterations))\n print('Discriminator loss : {}'.format(sum_dis_loss / iterations))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='StarGANVC2')\n parser.add_argument('--e', type=int, default=50, help=\n 'the number of epochs')\n parser.add_argument('--i', type=int, default=1000, help=\n 'the number of iterations')\n parser.add_argument('--b', type=int, default=16, help='batch size')\n parser.add_argument('--modeldir', type=Path, default='modeldir', help=\n 'model output directory')\n parser.add_argument('--ext', type=str, default='.npy', help=\n 'extension of training data')\n parser.add_argument('--tw', type=int, default=128, help=\n 'time width of spectral envelope')\n parser.add_argument('--mb', type=int, default=36, help=\n 'mel bins of spectral envelope')\n parser.add_argument('--sr', type=int, default=22050, help=\n 'sampling rate of audio data')\n parser.add_argument('--glr', type=float, default=0.0002, help=\n 'learning rate of Adam on generator')\n parser.add_argument('--dlr', type=float, default=0.0001, help=\n 'learning rate of Adam on discriminator')\n parser.add_argument('--b1', type=float, default=0.5, help='beta1 of Adam')\n parser.add_argument('--b2', type=float, default=0.999, help='beta2 of Adam'\n )\n parser.add_argument('--ie', type=int, default=20, help=\n 'time spans enabling identity mapping loss')\n parser.add_argument('--second', action='store_true', help=\n 'enabling second step of adversaria loss')\n parser.add_argument('--src', type=Path, help=\n 'path which includes source data')\n parser.add_argument('--tgt', type=Path, help=\n 'path which includes target data')\n args = parser.parse_args()\n modeldir = args.modeldir\n modeldir.mkdir(exist_ok=True)\n train(args.e, args.i, args.b, modeldir, args.ext, args.tw, args.mb,\n args.sr, args.glr, args.dlr, args.b1, args.b2, args.ie, args.second,\n args.src, args.tgt)\n", "step-5": "import chainer\nimport chainer.functions as F\nimport numpy as np\nimport argparse\n\nfrom model import Generator, Discriminator\nfrom chainer import cuda, serializers\nfrom pathlib import Path\nfrom utils import set_optimizer\nfrom dataset import DatasetLoader\n\nxp = cuda.cupy\ncuda.get_device(0).use()\n\n\nclass CycleGANVC2LossCalculator:\n def __init__(self):\n pass\n\n @staticmethod\n def dis_loss(discriminator, y, t):\n y_dis = discriminator(y)\n t_dis = discriminator(t)\n\n return F.mean(F.softplus(-t_dis)) + F.mean(F.softplus(y_dis))\n\n @staticmethod\n def gen_loss(discriminator, y):\n y_dis = discriminator(y)\n\n return F.mean(F.softplus(-y_dis))\n\n @staticmethod\n def cycle_loss(y, t):\n return 10.0 * F.mean_absolute_error(y, t)\n\n @staticmethod\n def identity_loss(y, t):\n return 5.0 * F.mean_absolute_error(y, t)\n\n\ndef train(epochs,\n iterations,\n batchsize,\n modeldir,\n extension,\n time_width,\n mel_bins,\n sampling_rate,\n g_learning_rate,\n d_learning_rate,\n beta1,\n beta2,\n identity_epoch,\n second_step,\n src_path,\n tgt_path):\n\n # Dataset definiton\n dataset = DatasetLoader(src_path,\n tgt_path,\n extension,\n time_width,\n mel_bins,\n sampling_rate)\n print(dataset)\n\n # Model & Optimizer definition\n generator_xy = Generator()\n generator_xy.to_gpu()\n gen_xy_opt = set_optimizer(generator_xy, g_learning_rate, beta1, beta2)\n\n generator_yx = Generator()\n generator_yx.to_gpu()\n gen_yx_opt = set_optimizer(generator_yx, g_learning_rate, beta1, beta2)\n\n discriminator_y = Discriminator()\n discriminator_y.to_gpu()\n dis_y_opt = set_optimizer(discriminator_y, d_learning_rate, beta1, beta2)\n\n discriminator_x = Discriminator()\n discriminator_x.to_gpu()\n dis_x_opt = set_optimizer(discriminator_x, d_learning_rate, beta1, beta2)\n\n discriminator_xyx = Discriminator()\n discriminator_xyx.to_gpu()\n dis_xyx_opt = set_optimizer(discriminator_xyx, d_learning_rate, beta1, beta2)\n\n discriminator_yxy = Discriminator()\n discriminator_yxy.to_gpu()\n dis_yxy_opt = set_optimizer(discriminator_yxy, d_learning_rate, beta1, beta2)\n\n # Loss function definition\n lossfunc = CycleGANVC2LossCalculator()\n\n for epoch in range(epochs):\n sum_dis_loss = 0\n sum_gen_loss = 0\n\n for batch in range(0, iterations, batchsize):\n x, y = dataset.train(batchsize)\n\n xy = generator_xy(x)\n xyx = generator_yx(xy)\n\n yx = generator_yx(y)\n yxy = generator_xy(yx)\n\n xy.unchain_backward()\n xyx.unchain_backward()\n yx.unchain_backward()\n yxy.unchain_backward()\n\n dis_loss = lossfunc.dis_loss(discriminator_y, xy, y)\n dis_loss += lossfunc.dis_loss(discriminator_x, yx, x)\n\n if second_step:\n dis_loss += lossfunc.dis_loss(discriminator_xyx, xyx, x)\n dis_loss += lossfunc.dis_loss(discriminator_yxy, yxy, y)\n\n discriminator_xyx.cleargrads()\n discriminator_yxy.cleargrads()\n\n discriminator_x.cleargrads()\n discriminator_y.cleargrads()\n\n dis_loss.backward()\n dis_x_opt.update()\n dis_y_opt.update()\n\n if second_step:\n dis_xyx_opt.update()\n dis_yxy_opt.update()\n\n dis_loss.unchain_backward()\n\n xy = generator_xy(x)\n xyx = generator_yx(xy)\n id_y = generator_xy(y)\n\n yx = generator_yx(y)\n yxy = generator_xy(yx)\n id_x = generator_yx(x)\n\n gen_loss = lossfunc.gen_loss(discriminator_y, xy)\n gen_loss += lossfunc.gen_loss(discriminator_x, yx)\n\n if second_step:\n gen_loss += lossfunc.gen_loss(discriminator_yxy, yxy)\n gen_loss += lossfunc.gen_loss(discriminator_xyx, xyx)\n\n gen_loss += lossfunc.cycle_loss(x, xyx)\n gen_loss += lossfunc.cycle_loss(y, xyx)\n\n if epoch < identity_epoch:\n gen_loss += lossfunc.identity_loss(id_y, y)\n gen_loss += lossfunc.identity_loss(id_x, x)\n\n generator_xy.cleargrads()\n generator_yx.cleargrads()\n gen_loss.backward()\n gen_xy_opt.update()\n gen_yx_opt.update()\n gen_loss.unchain_backward()\n\n sum_dis_loss += dis_loss.data\n sum_gen_loss += gen_loss.data\n\n if batch == 0:\n serializers.save_npz(f\"{modeldir}/generator_xy_{epoch}.model\", generator_xy)\n serializers.save_npz(f\"{modeldir}/generator_yx_{epoch}.model\", generator_yx)\n\n print('epoch : {}'.format(epoch))\n print('Generator loss : {}'.format(sum_gen_loss / iterations))\n print('Discriminator loss : {}'.format(sum_dis_loss / iterations))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"StarGANVC2\")\n parser.add_argument('--e', type=int, default=50, help=\"the number of epochs\")\n parser.add_argument('--i', type=int, default=1000, help=\"the number of iterations\")\n parser.add_argument('--b', type=int, default=16, help=\"batch size\")\n parser.add_argument('--modeldir', type=Path, default=\"modeldir\", help=\"model output directory\")\n parser.add_argument('--ext', type=str, default=\".npy\", help=\"extension of training data\")\n parser.add_argument('--tw', type=int, default=128, help=\"time width of spectral envelope\")\n parser.add_argument('--mb', type=int, default=36, help=\"mel bins of spectral envelope\")\n parser.add_argument('--sr', type=int, default=22050, help=\"sampling rate of audio data\")\n parser.add_argument('--glr', type=float, default=0.0002, help=\"learning rate of Adam on generator\")\n parser.add_argument('--dlr', type=float, default=0.0001, help=\"learning rate of Adam on discriminator\")\n parser.add_argument('--b1', type=float, default=0.5, help=\"beta1 of Adam\")\n parser.add_argument('--b2', type=float, default=0.999, help=\"beta2 of Adam\")\n parser.add_argument('--ie', type=int, default=20, help=\"time spans enabling identity mapping loss\")\n parser.add_argument('--second', action=\"store_true\", help=\"enabling second step of adversaria loss\")\n parser.add_argument('--src', type=Path, help=\"path which includes source data\")\n parser.add_argument('--tgt', type=Path, help=\"path which includes target data\")\n args = parser.parse_args()\n\n modeldir = args.modeldir\n modeldir.mkdir(exist_ok=True)\n\n train(args.e, args.i, args.b, modeldir, args.ext, args.tw, args.mb, args.sr,\n args.glr, args.dlr, args.b1, args.b2, args.ie, args.second,\n args.src, args.tgt)\n", "step-ids": [ 3, 7, 8, 9, 11 ] }
[ 3, 7, 8, 9, 11 ]
# -*- coding: utf-8 -*- """ Created on Thu Jun 28 16:36:56 2018 @author: Alex """ #%% Import packages import pickle import numpy as np import matplotlib.pyplot as plt import networkx as nx import os os.chdir('C:\\Users\\Alex\\Documents\\GitHub\\insight-articles-project\\src\\topic modeling\\') from plotly_network import plot #%% Load data # Load metatopic allocations processed_data_folder = 'C:\\Users\\Alex\\Documents\\GitHub\\insight-articles-project\\data\\processed\\' filename = processed_data_folder + 'topic_assignments' with open(filename, 'rb') as fp: topic_assignments, meta_topic_assignments = pickle.load(fp) # Load distance matrix filename = processed_data_folder + 'graph_and_labels' with open(filename, 'rb') as fp: graph_mat,topic_labels,dist_mat,doc_topic_mat = pickle.load(fp) #%% Loop through meta-topics plt.close() #for meta_topic in np.unique(meta_topic_assignments): meta_topic = 0 # Find the sub topics sub_topics, = np.where(meta_topic_assignments == meta_topic) # Get the distance matrix just for those topics sub_dist_mat = dist_mat[sub_topics][:,sub_topics] # Generate the graph matrix by selecting an appropriate threshold graph_mat = sub_dist_mat < 0.95 if not np.any(graph_mat): min_val = np.min(sub_dist_mat) graph_mat = sub_dist_mat <= min_val # Find the docs belonging to that subtopic #docs = np.in1d(topic_assignments,sub_topics) # Get subtopic labels sub_topic_labels = {sub_topic:topic_labels[sub_topic] for sub_topic in sub_topics if sub_topic in topic_labels} new_sub_topic_labels = {} # # Rename the keys for counter, value in enumerate(sub_topic_labels.keys()): new_sub_topic_labels[counter] = sub_topic_labels[value] # Plot the graph plt.figure() G = nx.from_numpy_matrix(graph_mat) #pos = nx.graphviz_layout(G) #pos = nx.nx_agraph.graphviz_layout(G) #pos=nx.spring_layout(G) pos = nx.layout.circular_layout(G) nx.relabel_nodes(G,sub_topic_labels) nx.draw(G,pos) nx.draw_networkx_labels(G,pos,new_sub_topic_labels,font_size=16) node_labels = list(sub_topic_labels.values()) #%% Calculate text positions text_pos = [] for key, value in pos.items(): if value[0] < 0: pos_part2 = ' left' else: pos_part2 = ' right' if value[1] < 0: pos_part1 = 'bottom' else: pos_part1 = 'top' text_pos.append(pos_part1 + pos_part2) #%% Plot in plot url = plot(G,pos,node_labels,text_pos)
normal
{ "blob_id": "d98db745be2ab9c506a98539b25e9b46e4997136", "index": 3422, "step-1": "<mask token>\n", "step-2": "<mask token>\nos.chdir(\n 'C:\\\\Users\\\\Alex\\\\Documents\\\\GitHub\\\\insight-articles-project\\\\src\\\\topic modeling\\\\'\n )\n<mask token>\nwith open(filename, 'rb') as fp:\n topic_assignments, meta_topic_assignments = pickle.load(fp)\n<mask token>\nwith open(filename, 'rb') as fp:\n graph_mat, topic_labels, dist_mat, doc_topic_mat = pickle.load(fp)\nplt.close()\n<mask token>\nif not np.any(graph_mat):\n min_val = np.min(sub_dist_mat)\n graph_mat = sub_dist_mat <= min_val\n<mask token>\nfor counter, value in enumerate(sub_topic_labels.keys()):\n new_sub_topic_labels[counter] = sub_topic_labels[value]\nplt.figure()\n<mask token>\nnx.relabel_nodes(G, sub_topic_labels)\nnx.draw(G, pos)\nnx.draw_networkx_labels(G, pos, new_sub_topic_labels, font_size=16)\n<mask token>\nfor key, value in pos.items():\n if value[0] < 0:\n pos_part2 = ' left'\n else:\n pos_part2 = ' right'\n if value[1] < 0:\n pos_part1 = 'bottom'\n else:\n pos_part1 = 'top'\n text_pos.append(pos_part1 + pos_part2)\n<mask token>\n", "step-3": "<mask token>\nos.chdir(\n 'C:\\\\Users\\\\Alex\\\\Documents\\\\GitHub\\\\insight-articles-project\\\\src\\\\topic modeling\\\\'\n )\n<mask token>\nprocessed_data_folder = (\n 'C:\\\\Users\\\\Alex\\\\Documents\\\\GitHub\\\\insight-articles-project\\\\data\\\\processed\\\\'\n )\nfilename = processed_data_folder + 'topic_assignments'\nwith open(filename, 'rb') as fp:\n topic_assignments, meta_topic_assignments = pickle.load(fp)\nfilename = processed_data_folder + 'graph_and_labels'\nwith open(filename, 'rb') as fp:\n graph_mat, topic_labels, dist_mat, doc_topic_mat = pickle.load(fp)\nplt.close()\nmeta_topic = 0\nsub_topics, = np.where(meta_topic_assignments == meta_topic)\nsub_dist_mat = dist_mat[sub_topics][:, sub_topics]\ngraph_mat = sub_dist_mat < 0.95\nif not np.any(graph_mat):\n min_val = np.min(sub_dist_mat)\n graph_mat = sub_dist_mat <= min_val\nsub_topic_labels = {sub_topic: topic_labels[sub_topic] for sub_topic in\n sub_topics if sub_topic in topic_labels}\nnew_sub_topic_labels = {}\nfor counter, value in enumerate(sub_topic_labels.keys()):\n new_sub_topic_labels[counter] = sub_topic_labels[value]\nplt.figure()\nG = nx.from_numpy_matrix(graph_mat)\npos = nx.layout.circular_layout(G)\nnx.relabel_nodes(G, sub_topic_labels)\nnx.draw(G, pos)\nnx.draw_networkx_labels(G, pos, new_sub_topic_labels, font_size=16)\nnode_labels = list(sub_topic_labels.values())\ntext_pos = []\nfor key, value in pos.items():\n if value[0] < 0:\n pos_part2 = ' left'\n else:\n pos_part2 = ' right'\n if value[1] < 0:\n pos_part1 = 'bottom'\n else:\n pos_part1 = 'top'\n text_pos.append(pos_part1 + pos_part2)\nurl = plot(G, pos, node_labels, text_pos)\n", "step-4": "<mask token>\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport os\nos.chdir(\n 'C:\\\\Users\\\\Alex\\\\Documents\\\\GitHub\\\\insight-articles-project\\\\src\\\\topic modeling\\\\'\n )\nfrom plotly_network import plot\nprocessed_data_folder = (\n 'C:\\\\Users\\\\Alex\\\\Documents\\\\GitHub\\\\insight-articles-project\\\\data\\\\processed\\\\'\n )\nfilename = processed_data_folder + 'topic_assignments'\nwith open(filename, 'rb') as fp:\n topic_assignments, meta_topic_assignments = pickle.load(fp)\nfilename = processed_data_folder + 'graph_and_labels'\nwith open(filename, 'rb') as fp:\n graph_mat, topic_labels, dist_mat, doc_topic_mat = pickle.load(fp)\nplt.close()\nmeta_topic = 0\nsub_topics, = np.where(meta_topic_assignments == meta_topic)\nsub_dist_mat = dist_mat[sub_topics][:, sub_topics]\ngraph_mat = sub_dist_mat < 0.95\nif not np.any(graph_mat):\n min_val = np.min(sub_dist_mat)\n graph_mat = sub_dist_mat <= min_val\nsub_topic_labels = {sub_topic: topic_labels[sub_topic] for sub_topic in\n sub_topics if sub_topic in topic_labels}\nnew_sub_topic_labels = {}\nfor counter, value in enumerate(sub_topic_labels.keys()):\n new_sub_topic_labels[counter] = sub_topic_labels[value]\nplt.figure()\nG = nx.from_numpy_matrix(graph_mat)\npos = nx.layout.circular_layout(G)\nnx.relabel_nodes(G, sub_topic_labels)\nnx.draw(G, pos)\nnx.draw_networkx_labels(G, pos, new_sub_topic_labels, font_size=16)\nnode_labels = list(sub_topic_labels.values())\ntext_pos = []\nfor key, value in pos.items():\n if value[0] < 0:\n pos_part2 = ' left'\n else:\n pos_part2 = ' right'\n if value[1] < 0:\n pos_part1 = 'bottom'\n else:\n pos_part1 = 'top'\n text_pos.append(pos_part1 + pos_part2)\nurl = plot(G, pos, node_labels, text_pos)\n", "step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 28 16:36:56 2018\n\n@author: Alex\n\"\"\"\n\n#%% Import packages \nimport pickle \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport os \nos.chdir('C:\\\\Users\\\\Alex\\\\Documents\\\\GitHub\\\\insight-articles-project\\\\src\\\\topic modeling\\\\')\nfrom plotly_network import plot\n\n\n#%% Load data\n# Load metatopic allocations \nprocessed_data_folder = 'C:\\\\Users\\\\Alex\\\\Documents\\\\GitHub\\\\insight-articles-project\\\\data\\\\processed\\\\'\nfilename = processed_data_folder + 'topic_assignments'\n\nwith open(filename, 'rb') as fp:\n topic_assignments, meta_topic_assignments = pickle.load(fp)\n \n# Load distance matrix \nfilename = processed_data_folder + 'graph_and_labels'\n\nwith open(filename, 'rb') as fp:\n graph_mat,topic_labels,dist_mat,doc_topic_mat = pickle.load(fp) \n \n\n#%% Loop through meta-topics \nplt.close()\n#for meta_topic in np.unique(meta_topic_assignments):\nmeta_topic = 0\n# Find the sub topics \nsub_topics, = np.where(meta_topic_assignments == meta_topic)\n\n# Get the distance matrix just for those topics \nsub_dist_mat = dist_mat[sub_topics][:,sub_topics]\n\n# Generate the graph matrix by selecting an appropriate threshold\ngraph_mat = sub_dist_mat < 0.95\nif not np.any(graph_mat):\n min_val = np.min(sub_dist_mat)\n graph_mat = sub_dist_mat <= min_val\n\n# Find the docs belonging to that subtopic \n#docs = np.in1d(topic_assignments,sub_topics)\n\n# Get subtopic labels \nsub_topic_labels = {sub_topic:topic_labels[sub_topic] for sub_topic in sub_topics if sub_topic in topic_labels}\nnew_sub_topic_labels = {}\n\n# \n\n# Rename the keys \nfor counter, value in enumerate(sub_topic_labels.keys()):\n new_sub_topic_labels[counter] = sub_topic_labels[value]\n\n\n# Plot the graph \nplt.figure()\nG = nx.from_numpy_matrix(graph_mat) \n#pos = nx.graphviz_layout(G)\n#pos = nx.nx_agraph.graphviz_layout(G)\n#pos=nx.spring_layout(G)\npos = nx.layout.circular_layout(G)\nnx.relabel_nodes(G,sub_topic_labels)\nnx.draw(G,pos)\nnx.draw_networkx_labels(G,pos,new_sub_topic_labels,font_size=16)\n\nnode_labels = list(sub_topic_labels.values())\n\n#%% Calculate text positions\ntext_pos = []\nfor key, value in pos.items():\n if value[0] < 0:\n pos_part2 = ' left'\n else:\n pos_part2 = ' right'\n if value[1] < 0:\n pos_part1 = 'bottom'\n else:\n pos_part1 = 'top'\n text_pos.append(pos_part1 + pos_part2)\n\n#%% Plot in plot \nurl = plot(G,pos,node_labels,text_pos)\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
# -*- coding: utf-8 -*- # caixinjun import argparse from sklearn import metrics import datetime import jieba from sklearn.feature_extraction.text import TfidfVectorizer import pickle from sklearn import svm import os import warnings warnings.filterwarnings('ignore') def get_data(train_file): target = [] data = [] with open(train_file, 'r', encoding='utf-8') as f: for line in f.readlines(): line = line.strip().split("\t") if len(line) == 1: continue target.append(int(line[0])) data.append(line[1]) data = list(map(jieba.lcut, data)) data = [" ".join(d) for d in data] return data, target def train(cls, data, target, model_path): cls = cls.fit(data, target) with open(model_path, 'wb') as f: pickle.dump(cls, f) def trans(data, matrix_path, stopword_path): with open(stopword_path, 'r', encoding='utf-8') as fs: stop_words = [line.strip() for line in fs.readline()] tfidf = TfidfVectorizer(token_pattern=r"(?u)\b\w+\b", stop_words=stop_words) features = tfidf.fit_transform(data) with open(matrix_path, 'wb') as f: pickle.dump(tfidf, f) return features def load_models(matrix_path, model_path): tfidf, cls = None, None if os.path.isfile(model_path): with open(model_path, 'rb') as f: cls = pickle.load(f) if os.path.isfile(matrix_path): with open(matrix_path, 'rb') as f: tfidf = pickle.load(f) return tfidf, cls def test(matrix_path, model_path, data_path, outdir): curr_time = datetime.datetime.now() time_str = curr_time.strftime("%Y-%m-%d %H-%M-%S") out_path = outdir + '/%s/' % time_str out_file = os.path.join(out_path, "results.txt") if not os.path.exists(out_path): os.makedirs(out_path) data, target = get_data(data_path) tfidf, cls = load_models(matrix_path, model_path) if tfidf==None or cls==None: print("cannot load models........") return feature = tfidf.transform(data) predicted = cls.predict(feature) acc = metrics.accuracy_score(target, predicted) pre = metrics.precision_score(target, predicted) recall = metrics.recall_score(target, predicted) f1 = metrics.f1_score(target, predicted) fpr, tpr, thresholds = metrics.roc_curve(target, predicted) auc = metrics.auc(fpr, tpr) print("accuracy_score: ", acc) print("precision_score: ", pre) print("recall_score: ", recall) print("f1_score: ", f1) print("auc: ", auc) with open(out_file, 'w', encoding='utf-8') as f: for label in predicted: f.write(str(label) + '\n') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--train', type=str, default='./data/train.txt', help='training data') parser.add_argument('--test', type=str, default='./data/test.txt', help='test data') parser.add_argument('--stopwords', type=str, default='./data/hit_stopwords.txt', help='stop words') parser.add_argument('--model', type=str, default='./model/svm_model.pkl', help='classification model') parser.add_argument('--matrix', type=str, default='./model/tfidf.pkl', help='tfidf model') parser.add_argument('--outpath', type=str, default='./results/', help='out path') args = parser.parse_args() print("data processing.......") data, target = get_data(args.train) print("transform data.......") features = trans(data, args.matrix, args.stopwords) print("training model.......") cls = svm.LinearSVC() train(cls, features, target, args.model) print("test.......") test(args.matrix, args.model, args.test, args.outpath)
normal
{ "blob_id": "199872ea459a9dba9975c6531034bdbc1e77f1db", "index": 5875, "step-1": "<mask token>\n\n\ndef train(cls, data, target, model_path):\n cls = cls.fit(data, target)\n with open(model_path, 'wb') as f:\n pickle.dump(cls, f)\n\n\n<mask token>\n\n\ndef load_models(matrix_path, model_path):\n tfidf, cls = None, None\n if os.path.isfile(model_path):\n with open(model_path, 'rb') as f:\n cls = pickle.load(f)\n if os.path.isfile(matrix_path):\n with open(matrix_path, 'rb') as f:\n tfidf = pickle.load(f)\n return tfidf, cls\n\n\ndef test(matrix_path, model_path, data_path, outdir):\n curr_time = datetime.datetime.now()\n time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')\n out_path = outdir + '/%s/' % time_str\n out_file = os.path.join(out_path, 'results.txt')\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n data, target = get_data(data_path)\n tfidf, cls = load_models(matrix_path, model_path)\n if tfidf == None or cls == None:\n print('cannot load models........')\n return\n feature = tfidf.transform(data)\n predicted = cls.predict(feature)\n acc = metrics.accuracy_score(target, predicted)\n pre = metrics.precision_score(target, predicted)\n recall = metrics.recall_score(target, predicted)\n f1 = metrics.f1_score(target, predicted)\n fpr, tpr, thresholds = metrics.roc_curve(target, predicted)\n auc = metrics.auc(fpr, tpr)\n print('accuracy_score: ', acc)\n print('precision_score: ', pre)\n print('recall_score: ', recall)\n print('f1_score: ', f1)\n print('auc: ', auc)\n with open(out_file, 'w', encoding='utf-8') as f:\n for label in predicted:\n f.write(str(label) + '\\n')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef train(cls, data, target, model_path):\n cls = cls.fit(data, target)\n with open(model_path, 'wb') as f:\n pickle.dump(cls, f)\n\n\ndef trans(data, matrix_path, stopword_path):\n with open(stopword_path, 'r', encoding='utf-8') as fs:\n stop_words = [line.strip() for line in fs.readline()]\n tfidf = TfidfVectorizer(token_pattern='(?u)\\\\b\\\\w+\\\\b', stop_words=\n stop_words)\n features = tfidf.fit_transform(data)\n with open(matrix_path, 'wb') as f:\n pickle.dump(tfidf, f)\n return features\n\n\ndef load_models(matrix_path, model_path):\n tfidf, cls = None, None\n if os.path.isfile(model_path):\n with open(model_path, 'rb') as f:\n cls = pickle.load(f)\n if os.path.isfile(matrix_path):\n with open(matrix_path, 'rb') as f:\n tfidf = pickle.load(f)\n return tfidf, cls\n\n\ndef test(matrix_path, model_path, data_path, outdir):\n curr_time = datetime.datetime.now()\n time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')\n out_path = outdir + '/%s/' % time_str\n out_file = os.path.join(out_path, 'results.txt')\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n data, target = get_data(data_path)\n tfidf, cls = load_models(matrix_path, model_path)\n if tfidf == None or cls == None:\n print('cannot load models........')\n return\n feature = tfidf.transform(data)\n predicted = cls.predict(feature)\n acc = metrics.accuracy_score(target, predicted)\n pre = metrics.precision_score(target, predicted)\n recall = metrics.recall_score(target, predicted)\n f1 = metrics.f1_score(target, predicted)\n fpr, tpr, thresholds = metrics.roc_curve(target, predicted)\n auc = metrics.auc(fpr, tpr)\n print('accuracy_score: ', acc)\n print('precision_score: ', pre)\n print('recall_score: ', recall)\n print('f1_score: ', f1)\n print('auc: ', auc)\n with open(out_file, 'w', encoding='utf-8') as f:\n for label in predicted:\n f.write(str(label) + '\\n')\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef get_data(train_file):\n target = []\n data = []\n with open(train_file, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip().split('\\t')\n if len(line) == 1:\n continue\n target.append(int(line[0]))\n data.append(line[1])\n data = list(map(jieba.lcut, data))\n data = [' '.join(d) for d in data]\n return data, target\n\n\ndef train(cls, data, target, model_path):\n cls = cls.fit(data, target)\n with open(model_path, 'wb') as f:\n pickle.dump(cls, f)\n\n\ndef trans(data, matrix_path, stopword_path):\n with open(stopword_path, 'r', encoding='utf-8') as fs:\n stop_words = [line.strip() for line in fs.readline()]\n tfidf = TfidfVectorizer(token_pattern='(?u)\\\\b\\\\w+\\\\b', stop_words=\n stop_words)\n features = tfidf.fit_transform(data)\n with open(matrix_path, 'wb') as f:\n pickle.dump(tfidf, f)\n return features\n\n\ndef load_models(matrix_path, model_path):\n tfidf, cls = None, None\n if os.path.isfile(model_path):\n with open(model_path, 'rb') as f:\n cls = pickle.load(f)\n if os.path.isfile(matrix_path):\n with open(matrix_path, 'rb') as f:\n tfidf = pickle.load(f)\n return tfidf, cls\n\n\ndef test(matrix_path, model_path, data_path, outdir):\n curr_time = datetime.datetime.now()\n time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')\n out_path = outdir + '/%s/' % time_str\n out_file = os.path.join(out_path, 'results.txt')\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n data, target = get_data(data_path)\n tfidf, cls = load_models(matrix_path, model_path)\n if tfidf == None or cls == None:\n print('cannot load models........')\n return\n feature = tfidf.transform(data)\n predicted = cls.predict(feature)\n acc = metrics.accuracy_score(target, predicted)\n pre = metrics.precision_score(target, predicted)\n recall = metrics.recall_score(target, predicted)\n f1 = metrics.f1_score(target, predicted)\n fpr, tpr, thresholds = metrics.roc_curve(target, predicted)\n auc = metrics.auc(fpr, tpr)\n print('accuracy_score: ', acc)\n print('precision_score: ', pre)\n print('recall_score: ', recall)\n print('f1_score: ', f1)\n print('auc: ', auc)\n with open(out_file, 'w', encoding='utf-8') as f:\n for label in predicted:\n f.write(str(label) + '\\n')\n\n\n<mask token>\n", "step-4": "<mask token>\nwarnings.filterwarnings('ignore')\n\n\ndef get_data(train_file):\n target = []\n data = []\n with open(train_file, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip().split('\\t')\n if len(line) == 1:\n continue\n target.append(int(line[0]))\n data.append(line[1])\n data = list(map(jieba.lcut, data))\n data = [' '.join(d) for d in data]\n return data, target\n\n\ndef train(cls, data, target, model_path):\n cls = cls.fit(data, target)\n with open(model_path, 'wb') as f:\n pickle.dump(cls, f)\n\n\ndef trans(data, matrix_path, stopword_path):\n with open(stopword_path, 'r', encoding='utf-8') as fs:\n stop_words = [line.strip() for line in fs.readline()]\n tfidf = TfidfVectorizer(token_pattern='(?u)\\\\b\\\\w+\\\\b', stop_words=\n stop_words)\n features = tfidf.fit_transform(data)\n with open(matrix_path, 'wb') as f:\n pickle.dump(tfidf, f)\n return features\n\n\ndef load_models(matrix_path, model_path):\n tfidf, cls = None, None\n if os.path.isfile(model_path):\n with open(model_path, 'rb') as f:\n cls = pickle.load(f)\n if os.path.isfile(matrix_path):\n with open(matrix_path, 'rb') as f:\n tfidf = pickle.load(f)\n return tfidf, cls\n\n\ndef test(matrix_path, model_path, data_path, outdir):\n curr_time = datetime.datetime.now()\n time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')\n out_path = outdir + '/%s/' % time_str\n out_file = os.path.join(out_path, 'results.txt')\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n data, target = get_data(data_path)\n tfidf, cls = load_models(matrix_path, model_path)\n if tfidf == None or cls == None:\n print('cannot load models........')\n return\n feature = tfidf.transform(data)\n predicted = cls.predict(feature)\n acc = metrics.accuracy_score(target, predicted)\n pre = metrics.precision_score(target, predicted)\n recall = metrics.recall_score(target, predicted)\n f1 = metrics.f1_score(target, predicted)\n fpr, tpr, thresholds = metrics.roc_curve(target, predicted)\n auc = metrics.auc(fpr, tpr)\n print('accuracy_score: ', acc)\n print('precision_score: ', pre)\n print('recall_score: ', recall)\n print('f1_score: ', f1)\n print('auc: ', auc)\n with open(out_file, 'w', encoding='utf-8') as f:\n for label in predicted:\n f.write(str(label) + '\\n')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', type=str, default='./data/train.txt',\n help='training data')\n parser.add_argument('--test', type=str, default='./data/test.txt', help\n ='test data')\n parser.add_argument('--stopwords', type=str, default=\n './data/hit_stopwords.txt', help='stop words')\n parser.add_argument('--model', type=str, default=\n './model/svm_model.pkl', help='classification model')\n parser.add_argument('--matrix', type=str, default='./model/tfidf.pkl',\n help='tfidf model')\n parser.add_argument('--outpath', type=str, default='./results/', help=\n 'out path')\n args = parser.parse_args()\n print('data processing.......')\n data, target = get_data(args.train)\n print('transform data.......')\n features = trans(data, args.matrix, args.stopwords)\n print('training model.......')\n cls = svm.LinearSVC()\n train(cls, features, target, args.model)\n print('test.......')\n test(args.matrix, args.model, args.test, args.outpath)\n", "step-5": "# -*- coding: utf-8 -*-\r\n# caixinjun\r\n\r\nimport argparse\r\nfrom sklearn import metrics\r\nimport datetime\r\nimport jieba\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nimport pickle\r\nfrom sklearn import svm\r\nimport os\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n\r\ndef get_data(train_file):\r\n target = []\r\n data = []\r\n with open(train_file, 'r', encoding='utf-8') as f:\r\n for line in f.readlines():\r\n line = line.strip().split(\"\\t\")\r\n if len(line) == 1:\r\n continue\r\n target.append(int(line[0]))\r\n data.append(line[1])\r\n data = list(map(jieba.lcut, data))\r\n data = [\" \".join(d) for d in data]\r\n return data, target\r\n\r\n\r\ndef train(cls, data, target, model_path):\r\n cls = cls.fit(data, target)\r\n with open(model_path, 'wb') as f:\r\n pickle.dump(cls, f)\r\n\r\ndef trans(data, matrix_path, stopword_path):\r\n with open(stopword_path, 'r', encoding='utf-8') as fs:\r\n stop_words = [line.strip() for line in fs.readline()]\r\n tfidf = TfidfVectorizer(token_pattern=r\"(?u)\\b\\w+\\b\", stop_words=stop_words)\r\n features = tfidf.fit_transform(data)\r\n with open(matrix_path, 'wb') as f:\r\n pickle.dump(tfidf, f)\r\n return features\r\n\r\n\r\ndef load_models(matrix_path, model_path):\r\n tfidf, cls = None, None\r\n if os.path.isfile(model_path):\r\n with open(model_path, 'rb') as f:\r\n cls = pickle.load(f)\r\n if os.path.isfile(matrix_path):\r\n with open(matrix_path, 'rb') as f:\r\n tfidf = pickle.load(f)\r\n return tfidf, cls\r\n\r\ndef test(matrix_path, model_path, data_path, outdir):\r\n\r\n curr_time = datetime.datetime.now()\r\n time_str = curr_time.strftime(\"%Y-%m-%d %H-%M-%S\")\r\n out_path = outdir + '/%s/' % time_str\r\n out_file = os.path.join(out_path, \"results.txt\")\r\n if not os.path.exists(out_path):\r\n os.makedirs(out_path)\r\n data, target = get_data(data_path)\r\n tfidf, cls = load_models(matrix_path, model_path)\r\n if tfidf==None or cls==None:\r\n print(\"cannot load models........\")\r\n return\r\n\r\n feature = tfidf.transform(data)\r\n predicted = cls.predict(feature)\r\n\r\n acc = metrics.accuracy_score(target, predicted)\r\n pre = metrics.precision_score(target, predicted)\r\n recall = metrics.recall_score(target, predicted)\r\n f1 = metrics.f1_score(target, predicted)\r\n fpr, tpr, thresholds = metrics.roc_curve(target, predicted)\r\n auc = metrics.auc(fpr, tpr)\r\n\r\n print(\"accuracy_score: \", acc)\r\n print(\"precision_score: \", pre)\r\n print(\"recall_score: \", recall)\r\n print(\"f1_score: \", f1)\r\n print(\"auc: \", auc)\r\n\r\n with open(out_file, 'w', encoding='utf-8') as f:\r\n for label in predicted:\r\n f.write(str(label) + '\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--train', type=str, default='./data/train.txt', help='training data')\r\n parser.add_argument('--test', type=str, default='./data/test.txt', help='test data')\r\n parser.add_argument('--stopwords', type=str, default='./data/hit_stopwords.txt', help='stop words')\r\n parser.add_argument('--model', type=str, default='./model/svm_model.pkl', help='classification model')\r\n parser.add_argument('--matrix', type=str, default='./model/tfidf.pkl', help='tfidf model')\r\n parser.add_argument('--outpath', type=str, default='./results/', help='out path')\r\n args = parser.parse_args()\r\n\r\n print(\"data processing.......\")\r\n data, target = get_data(args.train)\r\n\r\n print(\"transform data.......\")\r\n features = trans(data, args.matrix, args.stopwords)\r\n\r\n print(\"training model.......\")\r\n cls = svm.LinearSVC()\r\n train(cls, features, target, args.model)\r\n\r\n print(\"test.......\")\r\n test(args.matrix, args.model, args.test, args.outpath)\r\n\r\n", "step-ids": [ 3, 4, 5, 6, 8 ] }
[ 3, 4, 5, 6, 8 ]
<|reserved_special_token_0|> class Top(Elaboratable): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Top(Elaboratable): <|reserved_special_token_0|> def elaborate(self, platform): m = Module() m.d.comb += self.led.eq(self.counter[2]) m.d.sync += self.counter.eq(self.counter + 1) return m <|reserved_special_token_1|> <|reserved_special_token_0|> class Top(Elaboratable): def __init__(self): self.counter = Signal(3) self.led = Signal() def elaborate(self, platform): m = Module() m.d.comb += self.led.eq(self.counter[2]) m.d.sync += self.counter.eq(self.counter + 1) return m <|reserved_special_token_1|> from nmigen import * class Top(Elaboratable): def __init__(self): self.counter = Signal(3) self.led = Signal() def elaborate(self, platform): m = Module() m.d.comb += self.led.eq(self.counter[2]) m.d.sync += self.counter.eq(self.counter + 1) return m
flexible
{ "blob_id": "22b6ea64cdb109e1c6b2536b50935d09d37a7e1a", "index": 3057, "step-1": "<mask token>\n\n\nclass Top(Elaboratable):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Top(Elaboratable):\n <mask token>\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += self.led.eq(self.counter[2])\n m.d.sync += self.counter.eq(self.counter + 1)\n return m\n", "step-3": "<mask token>\n\n\nclass Top(Elaboratable):\n\n def __init__(self):\n self.counter = Signal(3)\n self.led = Signal()\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += self.led.eq(self.counter[2])\n m.d.sync += self.counter.eq(self.counter + 1)\n return m\n", "step-4": "from nmigen import *\n\n\nclass Top(Elaboratable):\n\n def __init__(self):\n self.counter = Signal(3)\n self.led = Signal()\n\n def elaborate(self, platform):\n m = Module()\n m.d.comb += self.led.eq(self.counter[2])\n m.d.sync += self.counter.eq(self.counter + 1)\n return m\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
from rest_framework import serializers #from rest_framework.response import Response from .models import Category, Product class RecursiveSerializer(serializers.Serializer): def to_representation(self, value): serializer = self.parent.parent.__class__(value, context=self.context) return serializer.data class CategorySerializers(serializers.ModelSerializer): childcategories = RecursiveSerializer(many=True, read_only=True) class Meta: model = Category fields = ('id', 'name', 'parent', 'childcategories',) class ProductSerializer(serializers.ModelSerializer): class Meta: model = Product fields = ('id', 'name', 'price', 'categories') #class CategorySerializers(serializers.ModelSerializer): # class Meta: # model = Category # fields = ('id', 'name', 'parent') #def get_fields(self): # fields = super(CategorySerializers, self).get_fields() # #fields['childcategories'] = CategorySerializers(many=True, allow_null=True) # return fields #class CategorySerializers(serializers.ModelSerializer): # class Meta: # model = Category # fields = ('id', 'name', 'parent')
normal
{ "blob_id": "cd9cc656a62728b3649b00c03ca8d05106015007", "index": 7949, "step-1": "<mask token>\n\n\nclass CategorySerializers(serializers.ModelSerializer):\n childcategories = RecursiveSerializer(many=True, read_only=True)\n\n\n class Meta:\n model = Category\n fields = 'id', 'name', 'parent', 'childcategories'\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = 'id', 'name', 'price', 'categories'\n", "step-2": "<mask token>\n\n\nclass RecursiveSerializer(serializers.Serializer):\n <mask token>\n\n\nclass CategorySerializers(serializers.ModelSerializer):\n childcategories = RecursiveSerializer(many=True, read_only=True)\n\n\n class Meta:\n model = Category\n fields = 'id', 'name', 'parent', 'childcategories'\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = 'id', 'name', 'price', 'categories'\n", "step-3": "<mask token>\n\n\nclass RecursiveSerializer(serializers.Serializer):\n\n def to_representation(self, value):\n serializer = self.parent.parent.__class__(value, context=self.context)\n return serializer.data\n\n\nclass CategorySerializers(serializers.ModelSerializer):\n childcategories = RecursiveSerializer(many=True, read_only=True)\n\n\n class Meta:\n model = Category\n fields = 'id', 'name', 'parent', 'childcategories'\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = 'id', 'name', 'price', 'categories'\n", "step-4": "from rest_framework import serializers\nfrom .models import Category, Product\n\n\nclass RecursiveSerializer(serializers.Serializer):\n\n def to_representation(self, value):\n serializer = self.parent.parent.__class__(value, context=self.context)\n return serializer.data\n\n\nclass CategorySerializers(serializers.ModelSerializer):\n childcategories = RecursiveSerializer(many=True, read_only=True)\n\n\n class Meta:\n model = Category\n fields = 'id', 'name', 'parent', 'childcategories'\n\n\nclass ProductSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Product\n fields = 'id', 'name', 'price', 'categories'\n", "step-5": "from rest_framework import serializers\n#from rest_framework.response import Response\nfrom .models import Category, Product\n \nclass RecursiveSerializer(serializers.Serializer):\n def to_representation(self, value):\n serializer = self.parent.parent.__class__(value, context=self.context)\n return serializer.data\n\nclass CategorySerializers(serializers.ModelSerializer):\n childcategories = RecursiveSerializer(many=True, read_only=True)\n class Meta:\n model = Category\n fields = ('id', 'name', 'parent', 'childcategories',)\n\nclass ProductSerializer(serializers.ModelSerializer):\n class Meta:\n model = Product\n fields = ('id', 'name', 'price', 'categories')\n\n#class CategorySerializers(serializers.ModelSerializer):\n# class Meta:\n# model = Category\n# fields = ('id', 'name', 'parent')\n\n #def get_fields(self):\n # fields = super(CategorySerializers, self).get_fields()\n # #fields['childcategories'] = CategorySerializers(many=True, allow_null=True)\n # return fields\n\n\n#class CategorySerializers(serializers.ModelSerializer):\n# class Meta:\n# model = Category\n# fields = ('id', 'name', 'parent')", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
# 把函数视作对象 def factorial(n): """returns n!""" return 1 if n < 2 else n * factorial(n - 1) fact = factorial print(list(map(fact, range(6)))) # 构建 0! 和 5! 的一个阶乘列表。 print([fact(n) for n in range(6)]) # 使用列表推导执行相同的操作。 # filter() 函数用于过滤序列,过滤掉不符合条件的元素,返回一个迭代器对象,如果要转换为列表,可以使用 list() 来转换。 # 该接收两个参数,第一个为函数,第二个为序列,序列的每个元素作为参数传递给函数进行判,然后返回 True 或 False, # 最后将返回 True 的元素放到新列表中。 print(list(map(factorial, filter(lambda n: n % 2, range(6))))) # 使用 map 和 filter 计算直到 5! 的奇数阶乘列表。 print([factorial(n) for n in range(6) if n % 2]) # 使用列表推导做相同的工作,换掉 map 和 filter,并避免了使用 lambda 表达式。
normal
{ "blob_id": "4411c81351ac76d72512faaa6b498cd577815691", "index": 2572, "step-1": "<mask token>\n", "step-2": "def factorial(n):\n \"\"\"returns n!\"\"\"\n return 1 if n < 2 else n * factorial(n - 1)\n\n\n<mask token>\n", "step-3": "def factorial(n):\n \"\"\"returns n!\"\"\"\n return 1 if n < 2 else n * factorial(n - 1)\n\n\n<mask token>\nprint(list(map(fact, range(6))))\nprint([fact(n) for n in range(6)])\nprint(list(map(factorial, filter(lambda n: n % 2, range(6)))))\nprint([factorial(n) for n in range(6) if n % 2])\n", "step-4": "def factorial(n):\n \"\"\"returns n!\"\"\"\n return 1 if n < 2 else n * factorial(n - 1)\n\n\nfact = factorial\nprint(list(map(fact, range(6))))\nprint([fact(n) for n in range(6)])\nprint(list(map(factorial, filter(lambda n: n % 2, range(6)))))\nprint([factorial(n) for n in range(6) if n % 2])\n", "step-5": "# 把函数视作对象\r\ndef factorial(n):\r\n \"\"\"returns n!\"\"\"\r\n return 1 if n < 2 else n * factorial(n - 1)\r\n\r\n\r\nfact = factorial\r\n\r\nprint(list(map(fact, range(6)))) # 构建 0! 和 5! 的一个阶乘列表。\r\n\r\nprint([fact(n) for n in range(6)]) # 使用列表推导执行相同的操作。\r\n\r\n# filter() 函数用于过滤序列,过滤掉不符合条件的元素,返回一个迭代器对象,如果要转换为列表,可以使用 list() 来转换。\r\n# 该接收两个参数,第一个为函数,第二个为序列,序列的每个元素作为参数传递给函数进行判,然后返回 True 或 False,\r\n# 最后将返回 True 的元素放到新列表中。\r\nprint(list(map(factorial, filter(lambda n: n % 2, range(6))))) # 使用 map 和 filter 计算直到 5! 的奇数阶乘列表。\r\n\r\nprint([factorial(n) for n in range(6) if n % 2]) # 使用列表推导做相同的工作,换掉 map 和 filter,并避免了使用 lambda 表达式。\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume: int=50, payloadLength: int=-1): url = 'https://ggwave-to-file.ggerganov.com/' params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume, 'l': payloadLength} response = requests.get(url, params=params) if response == '': raise SyntaxError('Request failed') return response <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume: int=50, payloadLength: int=-1): url = 'https://ggwave-to-file.ggerganov.com/' params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume, 'l': payloadLength} response = requests.get(url, params=params) if response == '': raise SyntaxError('Request failed') return response <|reserved_special_token_0|> sys.stdout.buffer.write(result.content) <|reserved_special_token_1|> <|reserved_special_token_0|> def ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume: int=50, payloadLength: int=-1): url = 'https://ggwave-to-file.ggerganov.com/' params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume, 'l': payloadLength} response = requests.get(url, params=params) if response == '': raise SyntaxError('Request failed') return response result = ggwave('Hello world!') sys.stdout.buffer.write(result.content) <|reserved_special_token_1|> import sys import requests def ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume: int=50, payloadLength: int=-1): url = 'https://ggwave-to-file.ggerganov.com/' params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume, 'l': payloadLength} response = requests.get(url, params=params) if response == '': raise SyntaxError('Request failed') return response result = ggwave('Hello world!') sys.stdout.buffer.write(result.content) <|reserved_special_token_1|> import sys import requests def ggwave(message: str, protocolId: int = 1, sampleRate: float = 48000, volume: int = 50, payloadLength: int = -1): url = 'https://ggwave-to-file.ggerganov.com/' params = { 'm': message, # message to encode 'p': protocolId, # transmission protocol to use 's': sampleRate, # output sample rate 'v': volume, # output volume 'l': payloadLength, # if positive - use fixed-length encoding } response = requests.get(url, params=params) if response == '': raise SyntaxError('Request failed') return response result = ggwave("Hello world!") sys.stdout.buffer.write(result.content)
flexible
{ "blob_id": "f5d285b3a82151b5d7efdcd07d56cc5aaaac5836", "index": 7213, "step-1": "<mask token>\n\n\ndef ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume:\n int=50, payloadLength: int=-1):\n url = 'https://ggwave-to-file.ggerganov.com/'\n params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume,\n 'l': payloadLength}\n response = requests.get(url, params=params)\n if response == '':\n raise SyntaxError('Request failed')\n return response\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume:\n int=50, payloadLength: int=-1):\n url = 'https://ggwave-to-file.ggerganov.com/'\n params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume,\n 'l': payloadLength}\n response = requests.get(url, params=params)\n if response == '':\n raise SyntaxError('Request failed')\n return response\n\n\n<mask token>\nsys.stdout.buffer.write(result.content)\n", "step-3": "<mask token>\n\n\ndef ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume:\n int=50, payloadLength: int=-1):\n url = 'https://ggwave-to-file.ggerganov.com/'\n params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume,\n 'l': payloadLength}\n response = requests.get(url, params=params)\n if response == '':\n raise SyntaxError('Request failed')\n return response\n\n\nresult = ggwave('Hello world!')\nsys.stdout.buffer.write(result.content)\n", "step-4": "import sys\nimport requests\n\n\ndef ggwave(message: str, protocolId: int=1, sampleRate: float=48000, volume:\n int=50, payloadLength: int=-1):\n url = 'https://ggwave-to-file.ggerganov.com/'\n params = {'m': message, 'p': protocolId, 's': sampleRate, 'v': volume,\n 'l': payloadLength}\n response = requests.get(url, params=params)\n if response == '':\n raise SyntaxError('Request failed')\n return response\n\n\nresult = ggwave('Hello world!')\nsys.stdout.buffer.write(result.content)\n", "step-5": "import sys\nimport requests\n\ndef ggwave(message: str, protocolId: int = 1, sampleRate: float = 48000, volume: int = 50, payloadLength: int = -1):\n\n url = 'https://ggwave-to-file.ggerganov.com/'\n\n params = {\n 'm': message, # message to encode\n 'p': protocolId, # transmission protocol to use\n 's': sampleRate, # output sample rate\n 'v': volume, # output volume\n 'l': payloadLength, # if positive - use fixed-length encoding\n }\n\n response = requests.get(url, params=params)\n\n if response == '':\n raise SyntaxError('Request failed')\n\n return response\n\nresult = ggwave(\"Hello world!\")\n\nsys.stdout.buffer.write(result.content)\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
# -*- coding: utf-8 -*- from __future__ import absolute_import from .document import ParsedDocument,XmlDocument from .corenlp import StanfordCoreNLP from .annotation import KBPAnnMgr,ApfAnnMgr import os import codecs import sys from . import _list_files def _sequence_tag_bio(doc): outlines = u'' mentions= doc._annotate sentences = doc._text_spans for id,sentence in enumerate(sentences): tokens= sentence['tokens'] tok_num = len(tokens) mention_tags = ['O']* tok_num coref_tags = ['-']*tok_num for mention in mentions: if mention['sent_id'] != id: continue mention_tokens= mention['mention_tokens'] md_tag = mention['md_tag'] coref_tag = mention['coref_tag'] tokids=[] for token in mention_tokens: (sent, tok) = [int(d) for d in token.split('_')] if sent != id: print 'mention cross sentence at {}'.format(sentence['origin_text']) continue tokids.append(tok) for pos,tokid in enumerate(tokids): curr_md = md_tag curr_coref = coref_tag if pos ==0: curr_md = 'B-' + curr_md else: curr_md = 'I-' +curr_md if pos == 0: curr_coref = '(' + curr_coref if pos == len(tokids) -1: curr_coref = curr_coref + ')' if pos > 0 and pos < len(tokids) -1: curr_coref = '-' if mention_tags[tokid] == 'O': mention_tags[tokid] = curr_md coref_tags[tokid]= curr_coref source =[] target =[] for token,mention,coref in zip(tokens,mention_tags, coref_tags): token_feature= [token['word_lower'].replace(u'#',u'@'), token['word'].replace(u'#',u'@'), token['caps'].replace(u'#',u'@'), token['pos'].replace(u'#',u'@'), token['ner'].replace(u'#',u'@')] if token.has_key(u'comb-word'): token_feature.append( token[u'comb-word'].replace(u'#',u'@')) source.append('#'.join(token_feature)) target.append(mention) source = u' '.join(source) target = u' '.join(target) outlines += u'{}|||{} </s>\n'.format(source,target) return outlines def build_tree_tag(mentions, tok_num): mentions.sort(cmp = lambda x,y:cmp(x[0], y[0])) tag_out=[('X',[],[]) for i in range(tok_num)] for mention in mentions: (start,end, mtype)= mention tag_out[start][1].append('('+mtype) tag_out[end][2].append(')'+mtype) otags=[] for tag in tag_out: pre= ' '.join(tag[1]).strip() suc =' '.join(tag[2][::-1]).strip() if pre != '': otags.append(pre) otags.append(tag[0]) if suc != '': otags.append(suc) otags= ' '.join(otags) max_tag_num = max([len(x[1]) for x in tag_out]) if max_tag_num >1: print 'nested tag:{}'.format(otags) return otags def _sequence_tag_x(doc): outlines = u'' mentions= doc._annotate sentences = doc._text_spans for id,sentence in enumerate(sentences): tokens= sentence['tokens'] tok_num = len(tokens) curr_mentions = [] for mention in mentions: if mention['sent_id'] != id: continue mention_tokens= mention['mention_tokens'] md_tag = mention['md_tag'] tok_start= int(mention_tokens[0].split('_')[1]) tok_end = int(mention_tokens[-1].split('_')[1]) curr_mentions.append((tok_start,tok_end, md_tag)) target =build_tree_tag(curr_mentions, tok_num) source =[] for token in tokens: token_feature= [token['word_lower'].replace(u'#',u'@'), token['word'].replace(u'#',u'@'), token['caps'].replace(u'#',u'@'), token['pos'].replace(u'#',u'@'), token['ner'].replace(u'#',u'@')] if token.has_key(u'comb-word'): token_feature.append( token[u'comb-word'].replace(u'#',u'@')) source.append('#'.join(token_feature)) source = u' '.join(source) outlines += u'{}|||{} </s>\n'.format(source,target.decode('utf-8')) return outlines #in format 'BIO' will ignore all nested tags,in format 'XX' will build tree sequence def gen_sequence_tags(json_dir, outfile, fmt='BIO', encoding = 'utf-8'): fout= codecs.open(outfile, 'w', encoding= encoding) seqtag_func= None if fmt == 'BIO': seqtag_func= _sequence_tag_bio elif fmt =='XX': seqtag_func= _sequence_tag_x else: print 'unknown format {}'.format(fmt) return files = _list_files(json_dir, '.json') for f in files: print 'processing {}'.format(f) doc = ParsedDocument() doc.load(f) outlines = seqtag_func(doc) fout.write(outlines) fout.flush() fout.close()
normal
{ "blob_id": "80b8b77498f915a85185f829e8c7d5becdab8068", "index": 9286, "step-1": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom .document import ParsedDocument,XmlDocument\nfrom .corenlp import StanfordCoreNLP\nfrom .annotation import KBPAnnMgr,ApfAnnMgr\nimport os\nimport codecs\nimport sys\nfrom . import _list_files\ndef _sequence_tag_bio(doc):\n outlines = u''\n mentions= doc._annotate\n sentences = doc._text_spans\n for id,sentence in enumerate(sentences):\n tokens= sentence['tokens']\n tok_num = len(tokens)\n mention_tags = ['O']* tok_num\n coref_tags = ['-']*tok_num\n for mention in mentions:\n if mention['sent_id'] != id:\n continue\n mention_tokens= mention['mention_tokens']\n md_tag = mention['md_tag']\n coref_tag = mention['coref_tag']\n tokids=[]\n for token in mention_tokens:\n (sent, tok) = [int(d) for d in token.split('_')]\n if sent != id:\n print 'mention cross sentence at {}'.format(sentence['origin_text'])\n continue\n tokids.append(tok)\n for pos,tokid in enumerate(tokids):\n curr_md = md_tag\n curr_coref = coref_tag\n if pos ==0:\n curr_md = 'B-' + curr_md\n else:\n curr_md = 'I-' +curr_md\n if pos == 0:\n curr_coref = '(' + curr_coref\n if pos == len(tokids) -1:\n curr_coref = curr_coref + ')'\n if pos > 0 and pos < len(tokids) -1:\n curr_coref = '-'\n if mention_tags[tokid] == 'O':\n mention_tags[tokid] = curr_md\n coref_tags[tokid]= curr_coref\n source =[]\n target =[]\n for token,mention,coref in zip(tokens,mention_tags, coref_tags):\n token_feature= [token['word_lower'].replace(u'#',u'@'), token['word'].replace(u'#',u'@'),\n token['caps'].replace(u'#',u'@'), token['pos'].replace(u'#',u'@'),\n token['ner'].replace(u'#',u'@')]\n if token.has_key(u'comb-word'):\n token_feature.append( token[u'comb-word'].replace(u'#',u'@'))\n source.append('#'.join(token_feature))\n target.append(mention)\n source = u' '.join(source)\n target = u' '.join(target)\n outlines += u'{}|||{} </s>\\n'.format(source,target)\n return outlines\n\ndef build_tree_tag(mentions, tok_num):\n mentions.sort(cmp = lambda x,y:cmp(x[0], y[0]))\n tag_out=[('X',[],[]) for i in range(tok_num)]\n for mention in mentions:\n (start,end, mtype)= mention\n tag_out[start][1].append('('+mtype)\n tag_out[end][2].append(')'+mtype)\n otags=[]\n for tag in tag_out:\n pre= ' '.join(tag[1]).strip()\n suc =' '.join(tag[2][::-1]).strip()\n if pre != '':\n otags.append(pre)\n otags.append(tag[0])\n if suc != '':\n otags.append(suc)\n otags= ' '.join(otags)\n max_tag_num = max([len(x[1]) for x in tag_out])\n if max_tag_num >1:\n print 'nested tag:{}'.format(otags)\n \n return otags\n \ndef _sequence_tag_x(doc):\n outlines = u''\n mentions= doc._annotate\n sentences = doc._text_spans\n for id,sentence in enumerate(sentences):\n tokens= sentence['tokens']\n tok_num = len(tokens)\n curr_mentions = []\n for mention in mentions:\n if mention['sent_id'] != id:\n continue\n mention_tokens= mention['mention_tokens']\n md_tag = mention['md_tag']\n \n tok_start= int(mention_tokens[0].split('_')[1])\n tok_end = int(mention_tokens[-1].split('_')[1])\n curr_mentions.append((tok_start,tok_end, md_tag))\n \n target =build_tree_tag(curr_mentions, tok_num)\n source =[]\n for token in tokens:\n token_feature= [token['word_lower'].replace(u'#',u'@'), token['word'].replace(u'#',u'@'),\n token['caps'].replace(u'#',u'@'), token['pos'].replace(u'#',u'@'),\n token['ner'].replace(u'#',u'@')]\n if token.has_key(u'comb-word'):\n token_feature.append( token[u'comb-word'].replace(u'#',u'@'))\n source.append('#'.join(token_feature))\n source = u' '.join(source)\n \n outlines += u'{}|||{} </s>\\n'.format(source,target.decode('utf-8'))\n return outlines \n\n#in format 'BIO' will ignore all nested tags,in format 'XX' will build tree sequence\ndef gen_sequence_tags(json_dir, outfile, fmt='BIO', encoding = 'utf-8'):\n fout= codecs.open(outfile, 'w', encoding= encoding)\n seqtag_func= None\n if fmt == 'BIO':\n seqtag_func= _sequence_tag_bio\n elif fmt =='XX':\n seqtag_func= _sequence_tag_x\n else:\n print 'unknown format {}'.format(fmt)\n return\n files = _list_files(json_dir, '.json')\n for f in files:\n print 'processing {}'.format(f)\n doc = ParsedDocument()\n doc.load(f)\n outlines = seqtag_func(doc)\n fout.write(outlines)\n fout.flush()\n fout.close()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
''' python open() 函数用于打开一个文件,创建一个 file 对象,相关的方法才可以调用它进行读写。 更多文件操作可参考:Python 文件I/O。 函数语法 open(name[, mode[, buffering]]) 参数说明: name : 一个包含了你要访问的文件名称的字符串值。 mode : mode 决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。 buffering : 如果 buffering 的值被设为 0,就不会有寄存。如果 buffering 的值取 1,访问文件时会寄存行。如果将 buffering 的值设为大于 1 的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。 不同模式打开文件的完全列表: 模式 描述 r 以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。 rb 以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。 r+ 打开一个文件用于读写。文件指针将会放在文件的开头。 rb+ 以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。 w 打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。 wb 以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。 w+ 打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。 wb+ 以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。 a 打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。 ab 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。 a+ 打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。 ab+ 以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。 file 对象方法 file.read([size]):size 未指定则返回整个文件,如果文件大小 >2 倍内存则有问题,f.read()读到文件尾时返回""(空字串)。 file.readline():返回一行。 file.readlines([size]) :返回包含size行的列表, size 未指定则返回全部行。 for line in f: print line :通过迭代器访问。 f.write("hello\n"):如果要写入字符串以外的数据,先将他转换为字符串。 f.tell():返回一个整数,表示当前文件指针的位置(就是到文件头的比特数)。 f.seek(偏移量,[起始位置]):用来移动文件指针。 偏移量: 单位为比特,可正可负 起始位置: 0 - 文件头, 默认值; 1 - 当前位置; 2 - 文件尾 f.close() 关闭文件 open(filename [, mode [, bufsize]]) 打开一个文件,返回一个file对象。 如果文件无法打开,将处罚IOError异常。 应该使用open()来代替直接使用file类型的构造函数打开文件。 参数filename表示将要被打开的文件的路径字符串; 参数mode表示打开的模式,最常用的模式有:'r'表示读文本,'w'表示写文本文件,'a'表示在文件中追加。 Mode的默认值是'r'。 当操作的是二进制文件时,只要在模式值上添加'b'。这样提高了程序的可移植性。 可选参数bufsize定义了文件缓冲区的大小。0表示不缓冲;1表示行缓冲;任何其他正数表示使用该大小的缓冲区; 负数表示使用系统默认缓冲区大小,对于tty设备它往往是行缓冲,而对于其他文件往往完全缓冲。如果参数值被省却。 使用系统默认值。 ''' f=open('1.txt','r',encoding='utf-8') print(f.read()) ''' 输出... ltf zhongguo shanxi yuncheng 男 20 ''' #参考博客 https://www.cnblogs.com/Devilf/p/8006663.html
normal
{ "blob_id": "3a65565af4c55fa5479e323a737c48f7f2fdb8ce", "index": 596, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(f.read())\n<mask token>\n", "step-3": "<mask token>\nf = open('1.txt', 'r', encoding='utf-8')\nprint(f.read())\n<mask token>\n", "step-4": "'''\npython open() 函数用于打开一个文件,创建一个 file 对象,相关的方法才可以调用它进行读写。\n更多文件操作可参考:Python 文件I/O。\n函数语法\nopen(name[, mode[, buffering]])\n参数说明:\nname : 一个包含了你要访问的文件名称的字符串值。\nmode : mode 决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。\nbuffering : 如果 buffering 的值被设为 0,就不会有寄存。如果 buffering 的值取 1,访问文件时会寄存行。如果将 buffering 的值设为大于 1 的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。\n不同模式打开文件的完全列表:\n模式\n描述\nr\n以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。\nrb\n以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。\nr+\n打开一个文件用于读写。文件指针将会放在文件的开头。\nrb+\n以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。\nw\n打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nwb\n以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nw+\n打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nwb+\n以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\na\n打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。\nab\n以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。\na+\n打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。\nab+\n以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。\nfile 对象方法\nfile.read([size]):size 未指定则返回整个文件,如果文件大小 >2 倍内存则有问题,f.read()读到文件尾时返回\"\"(空字串)。\nfile.readline():返回一行。\nfile.readlines([size]) :返回包含size行的列表, size 未指定则返回全部行。\nfor line in f: print line :通过迭代器访问。\nf.write(\"hello\\n\"):如果要写入字符串以外的数据,先将他转换为字符串。\nf.tell():返回一个整数,表示当前文件指针的位置(就是到文件头的比特数)。\nf.seek(偏移量,[起始位置]):用来移动文件指针。\n偏移量: 单位为比特,可正可负\n起始位置: 0 - 文件头, 默认值; 1 - 当前位置; 2 - 文件尾\nf.close() 关闭文件\n\n\nopen(filename [, mode [, bufsize]])\n打开一个文件,返回一个file对象。 如果文件无法打开,将处罚IOError异常。\n应该使用open()来代替直接使用file类型的构造函数打开文件。\n参数filename表示将要被打开的文件的路径字符串;\n参数mode表示打开的模式,最常用的模式有:'r'表示读文本,'w'表示写文本文件,'a'表示在文件中追加。\nMode的默认值是'r'。\n当操作的是二进制文件时,只要在模式值上添加'b'。这样提高了程序的可移植性。\n可选参数bufsize定义了文件缓冲区的大小。0表示不缓冲;1表示行缓冲;任何其他正数表示使用该大小的缓冲区;\n负数表示使用系统默认缓冲区大小,对于tty设备它往往是行缓冲,而对于其他文件往往完全缓冲。如果参数值被省却。\n使用系统默认值。\n'''\n\nf=open('1.txt','r',encoding='utf-8')\nprint(f.read())\n'''\n输出...\nltf\nzhongguo\nshanxi\nyuncheng\n男\n20\n'''\n\n#参考博客 https://www.cnblogs.com/Devilf/p/8006663.html\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class Rational(object): <|reserved_special_token_0|> def __init__(self, num, den): """ simple constructor """ if den == 0: raise ZeroDivisionError('division by zero') if num == 0: self._num = 0 self._den = 1 else: sign = 1 if num * den < 0: sign = -1 abs_num = abs(num) abs_den = abs(den) divisor = _gcd(abs_num, abs_den) self._num = sign * abs_num // divisor self._den = abs_den // divisor def __add__(self, other): """ '+' operator """ if isinstance(other, int): return Rational(self.num + self.den * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.den + self.den * other.num, self. den * other.den) <|reserved_special_token_0|> def __sub__(self, other): """ '-' binary operator """ if isinstance(other, int): return Rational(self.num - self.den * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.den - self.den * other.num, self. den * other.den) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def __rfloordiv__(self, other): """ fallback of '//' operator """ return self.__rtruediv__(other) def __div__(self, other): """ '/' operator """ return self.__truediv__(other) <|reserved_special_token_0|> def __mod__(self, other): """ '%' operator """ if isinstance(other, int): if other == 0: raise ZeroDivisionError('division by zero') return Rational(0, 1) if not isinstance(other, Rational): return NotImplemented if other == 0: raise ZeroDivisionError('division by zero') return Rational(0, 1) def __rmod__(self, other): """ fallback of '%' operator """ if self == Rational(0, 1): raise ZeroDivisionError('division by zero') return Rational(0, 1) def __divmod__(self, other): """ 'divmod()' operation """ quot = self.__floordiv__(other) res = self.__mod__(other) if quot != NotImplemented and res != NotImplemented: return quot, res return NotImplemented def __rdivmod__(self, other): """ fallback of 'divmod()' operation """ quot = self.__rfloordiv__(other) res = self.__rmod__(other) if quot != NotImplemented and res != NotImplemented: return quot, res return NotImplemented def __pos__(self): """ '+' unary operator """ return self def __neg__(self): """ '-' unary operator """ return Rational(-1 * self.num, self.den) def __abs__(self): """ absolute value """ return Rational(abs(self.num), self.den) def __lt__(self, other): """ '<' operator """ if isinstance(other, int): return self.num - other * self.den < 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den < 0 <|reserved_special_token_0|> <|reserved_special_token_0|> def __ne__(self, other): """ '!=' or '<>' operator """ if isinstance(other, int): return self.num - other * self.den != 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den != 0 <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def __str__(self): """ 'informal' string representation """ ret = str(self.num) if self.den != 1: ret += '/' + str(self.den) return ret <|reserved_special_token_0|> def __bool__(self): """ 'bool()' operation """ return self.num != 0 <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Rational(object): <|reserved_special_token_0|> def __init__(self, num, den): """ simple constructor """ if den == 0: raise ZeroDivisionError('division by zero') if num == 0: self._num = 0 self._den = 1 else: sign = 1 if num * den < 0: sign = -1 abs_num = abs(num) abs_den = abs(den) divisor = _gcd(abs_num, abs_den) self._num = sign * abs_num // divisor self._den = abs_den // divisor def __add__(self, other): """ '+' operator """ if isinstance(other, int): return Rational(self.num + self.den * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.den + self.den * other.num, self. den * other.den) def __radd__(self, other): """ fallback of '+' operator """ if isinstance(other, int): return self.__add__(other) return NotImplemented def __sub__(self, other): """ '-' binary operator """ if isinstance(other, int): return Rational(self.num - self.den * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.den - self.den * other.num, self. den * other.den) def __rsub__(self, other): """ fallback of '-' binary operator """ if isinstance(other, int): return self.__neg__().__add__(-other) return NotImplemented def __mul__(self, other): """ '*' operator """ if isinstance(other, int): return Rational(self.num * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.num, self.den * other.den) def __rmul__(self, other): """ fallback of '*' operator """ return self.__mul__(other) <|reserved_special_token_0|> def __rtruediv__(self, other): """ fallback of '/' operator when '__future__.division' is in effect """ if isinstance(other, int): return Rational(self.den * other, self.num) return NotImplemented <|reserved_special_token_0|> def __rfloordiv__(self, other): """ fallback of '//' operator """ return self.__rtruediv__(other) def __div__(self, other): """ '/' operator """ return self.__truediv__(other) def __rdiv__(self, other): """ fallback of '/' operator """ return self.__rtruediv__(other) def __mod__(self, other): """ '%' operator """ if isinstance(other, int): if other == 0: raise ZeroDivisionError('division by zero') return Rational(0, 1) if not isinstance(other, Rational): return NotImplemented if other == 0: raise ZeroDivisionError('division by zero') return Rational(0, 1) def __rmod__(self, other): """ fallback of '%' operator """ if self == Rational(0, 1): raise ZeroDivisionError('division by zero') return Rational(0, 1) def __divmod__(self, other): """ 'divmod()' operation """ quot = self.__floordiv__(other) res = self.__mod__(other) if quot != NotImplemented and res != NotImplemented: return quot, res return NotImplemented def __rdivmod__(self, other): """ fallback of 'divmod()' operation """ quot = self.__rfloordiv__(other) res = self.__rmod__(other) if quot != NotImplemented and res != NotImplemented: return quot, res return NotImplemented def __pos__(self): """ '+' unary operator """ return self def __neg__(self): """ '-' unary operator """ return Rational(-1 * self.num, self.den) def __abs__(self): """ absolute value """ return Rational(abs(self.num), self.den) def __lt__(self, other): """ '<' operator """ if isinstance(other, int): return self.num - other * self.den < 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den < 0 def __le__(self, other): """ '<=' operator """ if isinstance(other, int): return self.num - other * self.den <= 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den <= 0 def __eq__(self, other): """ '==' operator """ if isinstance(other, int): return self.num - other * self.den == 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den == 0 def __ne__(self, other): """ '!=' or '<>' operator """ if isinstance(other, int): return self.num - other * self.den != 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den != 0 def __gt__(self, other): """ '>' operator """ if isinstance(other, int): return self.num - other * self.den > 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den > 0 def __ge__(self, other): """ '>=' operator """ if isinstance(other, int): return self.num - other * self.den >= 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den >= 0 <|reserved_special_token_0|> def __repr__(self): """ 'official' string representation """ return '<Rational: num=%d, den=%d>' % (self.num, self.den) def __str__(self): """ 'informal' string representation """ ret = str(self.num) if self.den != 1: ret += '/' + str(self.den) return ret <|reserved_special_token_0|> def __bool__(self): """ 'bool()' operation """ return self.num != 0 <|reserved_special_token_0|> def num(self): """ returns numerator of Rational """ return self.num def den(self): """ returns denominator of Rational """ return self.den <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Rational(object): <|reserved_special_token_0|> def __init__(self, num, den): """ simple constructor """ if den == 0: raise ZeroDivisionError('division by zero') if num == 0: self._num = 0 self._den = 1 else: sign = 1 if num * den < 0: sign = -1 abs_num = abs(num) abs_den = abs(den) divisor = _gcd(abs_num, abs_den) self._num = sign * abs_num // divisor self._den = abs_den // divisor def __add__(self, other): """ '+' operator """ if isinstance(other, int): return Rational(self.num + self.den * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.den + self.den * other.num, self. den * other.den) def __radd__(self, other): """ fallback of '+' operator """ if isinstance(other, int): return self.__add__(other) return NotImplemented def __sub__(self, other): """ '-' binary operator """ if isinstance(other, int): return Rational(self.num - self.den * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.den - self.den * other.num, self. den * other.den) def __rsub__(self, other): """ fallback of '-' binary operator """ if isinstance(other, int): return self.__neg__().__add__(-other) return NotImplemented def __mul__(self, other): """ '*' operator """ if isinstance(other, int): return Rational(self.num * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.num, self.den * other.den) def __rmul__(self, other): """ fallback of '*' operator """ return self.__mul__(other) <|reserved_special_token_0|> def __rtruediv__(self, other): """ fallback of '/' operator when '__future__.division' is in effect """ if isinstance(other, int): return Rational(self.den * other, self.num) return NotImplemented def __floordiv__(self, other): """ '//' operator """ return self.__truediv__(other) def __rfloordiv__(self, other): """ fallback of '//' operator """ return self.__rtruediv__(other) def __div__(self, other): """ '/' operator """ return self.__truediv__(other) def __rdiv__(self, other): """ fallback of '/' operator """ return self.__rtruediv__(other) def __mod__(self, other): """ '%' operator """ if isinstance(other, int): if other == 0: raise ZeroDivisionError('division by zero') return Rational(0, 1) if not isinstance(other, Rational): return NotImplemented if other == 0: raise ZeroDivisionError('division by zero') return Rational(0, 1) def __rmod__(self, other): """ fallback of '%' operator """ if self == Rational(0, 1): raise ZeroDivisionError('division by zero') return Rational(0, 1) def __divmod__(self, other): """ 'divmod()' operation """ quot = self.__floordiv__(other) res = self.__mod__(other) if quot != NotImplemented and res != NotImplemented: return quot, res return NotImplemented def __rdivmod__(self, other): """ fallback of 'divmod()' operation """ quot = self.__rfloordiv__(other) res = self.__rmod__(other) if quot != NotImplemented and res != NotImplemented: return quot, res return NotImplemented def __pos__(self): """ '+' unary operator """ return self def __neg__(self): """ '-' unary operator """ return Rational(-1 * self.num, self.den) def __abs__(self): """ absolute value """ return Rational(abs(self.num), self.den) def __lt__(self, other): """ '<' operator """ if isinstance(other, int): return self.num - other * self.den < 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den < 0 def __le__(self, other): """ '<=' operator """ if isinstance(other, int): return self.num - other * self.den <= 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den <= 0 def __eq__(self, other): """ '==' operator """ if isinstance(other, int): return self.num - other * self.den == 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den == 0 def __ne__(self, other): """ '!=' or '<>' operator """ if isinstance(other, int): return self.num - other * self.den != 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den != 0 def __gt__(self, other): """ '>' operator """ if isinstance(other, int): return self.num - other * self.den > 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den > 0 def __ge__(self, other): """ '>=' operator """ if isinstance(other, int): return self.num - other * self.den >= 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den >= 0 def __hash__(self): """ calc hash value """ return hash((self.num, self.den)) def __repr__(self): """ 'official' string representation """ return '<Rational: num=%d, den=%d>' % (self.num, self.den) def __str__(self): """ 'informal' string representation """ ret = str(self.num) if self.den != 1: ret += '/' + str(self.den) return ret def __bytes__(self): """ 'bytes()' operation """ return bytes(str(self), 'utf8') def __bool__(self): """ 'bool()' operation """ return self.num != 0 <|reserved_special_token_0|> def num(self): """ returns numerator of Rational """ return self.num def den(self): """ returns denominator of Rational """ return self.den <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Rational(object): <|reserved_special_token_0|> def __init__(self, num, den): """ simple constructor """ if den == 0: raise ZeroDivisionError('division by zero') if num == 0: self._num = 0 self._den = 1 else: sign = 1 if num * den < 0: sign = -1 abs_num = abs(num) abs_den = abs(den) divisor = _gcd(abs_num, abs_den) self._num = sign * abs_num // divisor self._den = abs_den // divisor def __add__(self, other): """ '+' operator """ if isinstance(other, int): return Rational(self.num + self.den * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.den + self.den * other.num, self. den * other.den) def __radd__(self, other): """ fallback of '+' operator """ if isinstance(other, int): return self.__add__(other) return NotImplemented def __sub__(self, other): """ '-' binary operator """ if isinstance(other, int): return Rational(self.num - self.den * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.den - self.den * other.num, self. den * other.den) def __rsub__(self, other): """ fallback of '-' binary operator """ if isinstance(other, int): return self.__neg__().__add__(-other) return NotImplemented def __mul__(self, other): """ '*' operator """ if isinstance(other, int): return Rational(self.num * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.num, self.den * other.den) def __rmul__(self, other): """ fallback of '*' operator """ return self.__mul__(other) <|reserved_special_token_0|> def __rtruediv__(self, other): """ fallback of '/' operator when '__future__.division' is in effect """ if isinstance(other, int): return Rational(self.den * other, self.num) return NotImplemented def __floordiv__(self, other): """ '//' operator """ return self.__truediv__(other) def __rfloordiv__(self, other): """ fallback of '//' operator """ return self.__rtruediv__(other) def __div__(self, other): """ '/' operator """ return self.__truediv__(other) def __rdiv__(self, other): """ fallback of '/' operator """ return self.__rtruediv__(other) def __mod__(self, other): """ '%' operator """ if isinstance(other, int): if other == 0: raise ZeroDivisionError('division by zero') return Rational(0, 1) if not isinstance(other, Rational): return NotImplemented if other == 0: raise ZeroDivisionError('division by zero') return Rational(0, 1) def __rmod__(self, other): """ fallback of '%' operator """ if self == Rational(0, 1): raise ZeroDivisionError('division by zero') return Rational(0, 1) def __divmod__(self, other): """ 'divmod()' operation """ quot = self.__floordiv__(other) res = self.__mod__(other) if quot != NotImplemented and res != NotImplemented: return quot, res return NotImplemented def __rdivmod__(self, other): """ fallback of 'divmod()' operation """ quot = self.__rfloordiv__(other) res = self.__rmod__(other) if quot != NotImplemented and res != NotImplemented: return quot, res return NotImplemented def __pos__(self): """ '+' unary operator """ return self def __neg__(self): """ '-' unary operator """ return Rational(-1 * self.num, self.den) def __abs__(self): """ absolute value """ return Rational(abs(self.num), self.den) def __lt__(self, other): """ '<' operator """ if isinstance(other, int): return self.num - other * self.den < 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den < 0 def __le__(self, other): """ '<=' operator """ if isinstance(other, int): return self.num - other * self.den <= 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den <= 0 def __eq__(self, other): """ '==' operator """ if isinstance(other, int): return self.num - other * self.den == 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den == 0 def __ne__(self, other): """ '!=' or '<>' operator """ if isinstance(other, int): return self.num - other * self.den != 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den != 0 def __gt__(self, other): """ '>' operator """ if isinstance(other, int): return self.num - other * self.den > 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den > 0 def __ge__(self, other): """ '>=' operator """ if isinstance(other, int): return self.num - other * self.den >= 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den >= 0 def __hash__(self): """ calc hash value """ return hash((self.num, self.den)) def __repr__(self): """ 'official' string representation """ return '<Rational: num=%d, den=%d>' % (self.num, self.den) def __str__(self): """ 'informal' string representation """ ret = str(self.num) if self.den != 1: ret += '/' + str(self.den) return ret def __bytes__(self): """ 'bytes()' operation """ return bytes(str(self), 'utf8') def __bool__(self): """ 'bool()' operation """ return self.num != 0 def isinteger(self): """ Does this Rational instance represent integer? """ return self.den == 1 def num(self): """ returns numerator of Rational """ return self.num def den(self): """ returns denominator of Rational """ return self.den @staticmethod def parse(string): """ parse string to Rational """ posslash = string.find('/') if posslash < 0: return Rational(int(string), 1) else: strs = string.split('/') return Rational(int(strs[0].strip()), int(strs[1].strip())) <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> """ module rational number """ def _gcd(num_a, num_b): """ gratest common divisor """ if num_a == 0 or num_b == 0: raise ArithmeticError('gcd of zero') var_p = num_a var_q = num_b if var_p < var_q: var_p = num_b var_q = num_a var_r = var_p % var_q while var_r != 0: var_p = var_q var_q = var_r var_r = var_p % var_q return var_q class Rational(object): """ representing rational number """ def __init__(self, num, den): """ simple constructor """ if den == 0: raise ZeroDivisionError('division by zero') if num == 0: self._num = 0 self._den = 1 else: sign = 1 if num * den < 0: sign = -1 abs_num = abs(num) abs_den = abs(den) divisor = _gcd(abs_num, abs_den) self._num = sign * abs_num // divisor self._den = abs_den // divisor # def __add__(self, other): """ '+' operator """ # supported type for operand except Rational if isinstance(other, int): return Rational(self.num + self.den * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.den + self.den * other.num, self.den * other.den) def __radd__(self, other): """ fallback of '+' operator """ if isinstance(other, int): return self.__add__(other) return NotImplemented # def __sub__(self, other): """ '-' binary operator """ # supported type for operand except Rational if isinstance(other, int): return Rational(self.num - self.den * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.den - self.den * other.num, self.den * other.den) def __rsub__(self, other): """ fallback of '-' binary operator """ if isinstance(other, int): return self.__neg__().__add__(- other) return NotImplemented # def __mul__(self, other): """ '*' operator """ # supported type for operand except Rational if isinstance(other, int): return Rational(self.num * other, self.den) if not isinstance(other, Rational): return NotImplemented return Rational(self.num * other.num, self.den * other.den) def __rmul__(self, other): """ fallback of '*' operator """ return self.__mul__(other) # def __truediv__(self, other): """ '/' operator when '__future__.division' is in effect """ # supported type for operand except Rational if isinstance(other, int): if other == 0: raise ZeroDivisionError('division by zero') return Rational(self.num, self.den * other) if not isinstance(other, Rational): return NotImplemented if other == 0: raise ZeroDivisionError('division by zero') return Rational(self.num * other.den, self.den * other.num) def __rtruediv__(self, other): """ fallback of '/' operator when '__future__.division' is in effect """ if isinstance(other, int): return Rational(self.den * other, self.num) return NotImplemented # def __floordiv__(self, other): """ '//' operator """ return self.__truediv__(other) def __rfloordiv__(self, other): """ fallback of '//' operator """ return self.__rtruediv__(other) # def __div__(self, other): """ '/' operator """ return self.__truediv__(other) def __rdiv__(self, other): """ fallback of '/' operator """ return self.__rtruediv__(other) # def __mod__(self, other): """ '%' operator """ if isinstance(other, int): if other == 0: raise ZeroDivisionError('division by zero') return Rational(0, 1) if not isinstance(other, Rational): return NotImplemented if other == 0: raise ZeroDivisionError('division by zero') return Rational(0, 1) def __rmod__(self, other): """ fallback of '%' operator """ if self == Rational(0, 1): raise ZeroDivisionError('division by zero') return Rational(0, 1) # def __divmod__(self, other): """ 'divmod()' operation """ quot = self.__floordiv__(other) res = self.__mod__(other) if quot != NotImplemented and res != NotImplemented: return (quot, res) return NotImplemented def __rdivmod__(self, other): """ fallback of 'divmod()' operation """ quot = self.__rfloordiv__(other) res = self.__rmod__(other) if quot != NotImplemented and res != NotImplemented: return (quot, res) return NotImplemented # def __pos__(self): """ '+' unary operator """ return self # def __neg__(self): """ '-' unary operator """ return Rational(-1 * self.num, self.den) # def __abs__(self): """ absolute value """ return Rational(abs(self.num), self.den) # # "rich comparison" method def __lt__(self, other): """ '<' operator """ # supported type for operand except Rational if isinstance(other, int): return self.num - other * self.den < 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den < 0 # def __le__(self, other): """ '<=' operator """ # supported type for operand except Rational if isinstance(other, int): return self.num - other * self.den <= 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den <= 0 # def __eq__(self, other): """ '==' operator """ # supported type for operand except Rational if isinstance(other, int): return self.num - other * self.den == 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den == 0 # def __ne__(self, other): """ '!=' or '<>' operator """ # supported type for operand except Rational if isinstance(other, int): return self.num - other * self.den != 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den != 0 # def __gt__(self, other): """ '>' operator """ # supported type for operand except Rational if isinstance(other, int): return self.num - other * self.den > 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den > 0 # def __ge__(self, other): """ '>=' operator """ # supported type for operand except Rational if isinstance(other, int): return self.num - other * self.den >= 0 if not isinstance(other, Rational): return NotImplemented return self.num * other.den - other.num * self.den >= 0 # def __hash__(self): """ calc hash value """ return hash((self.num, self.den)) # def __repr__(self): """ 'official' string representation """ return '<Rational: num=%d, den=%d>' % (self.num, self.den) # def __str__(self): """ 'informal' string representation """ ret = str(self.num) if self.den != 1: ret += '/' + str(self.den) return ret # def __bytes__(self): """ 'bytes()' operation """ return bytes(str(self), 'utf8') # def __bool__(self): """ 'bool()' operation """ return self.num != 0 # def isinteger(self): """ Does this Rational instance represent integer? """ return self.den == 1 # def num(self): """ returns numerator of Rational """ return self.num # def den(self): """ returns denominator of Rational """ return self.den # @staticmethod def parse(string): """ parse string to Rational """ posslash = string.find('/') if posslash < 0: return Rational(int(string), 1) else: strs = string.split('/') return Rational(int(strs[0].strip()), int(strs[1].strip())) # ZERO = None ONE = None Rational.ZERO = Rational(0, 1) Rational.ONE = Rational(1, 1)
flexible
{ "blob_id": "b1ab28a99fdcce66f0a1e4e25821073673f531cf", "index": 657, "step-1": "<mask token>\n\n\nclass Rational(object):\n <mask token>\n\n def __init__(self, num, den):\n \"\"\"\n simple constructor\n \"\"\"\n if den == 0:\n raise ZeroDivisionError('division by zero')\n if num == 0:\n self._num = 0\n self._den = 1\n else:\n sign = 1\n if num * den < 0:\n sign = -1\n abs_num = abs(num)\n abs_den = abs(den)\n divisor = _gcd(abs_num, abs_den)\n self._num = sign * abs_num // divisor\n self._den = abs_den // divisor\n\n def __add__(self, other):\n \"\"\"\n '+' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num + self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den + self.den * other.num, self.\n den * other.den)\n <mask token>\n\n def __sub__(self, other):\n \"\"\"\n '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num - self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den - self.den * other.num, self.\n den * other.den)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __rfloordiv__(self, other):\n \"\"\"\n fallback of '//' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __div__(self, other):\n \"\"\"\n '/' operator\n \"\"\"\n return self.__truediv__(other)\n <mask token>\n\n def __mod__(self, other):\n \"\"\"\n '%' operator\n \"\"\"\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __rmod__(self, other):\n \"\"\"\n fallback of '%' operator\n \"\"\"\n if self == Rational(0, 1):\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __divmod__(self, other):\n \"\"\"\n 'divmod()' operation\n \"\"\"\n quot = self.__floordiv__(other)\n res = self.__mod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __rdivmod__(self, other):\n \"\"\"\n fallback of 'divmod()' operation\n \"\"\"\n quot = self.__rfloordiv__(other)\n res = self.__rmod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __pos__(self):\n \"\"\"\n '+' unary operator\n \"\"\"\n return self\n\n def __neg__(self):\n \"\"\"\n '-' unary operator\n \"\"\"\n return Rational(-1 * self.num, self.den)\n\n def __abs__(self):\n \"\"\"\n absolute value\n \"\"\"\n return Rational(abs(self.num), self.den)\n\n def __lt__(self, other):\n \"\"\"\n '<' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den < 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den < 0\n <mask token>\n <mask token>\n\n def __ne__(self, other):\n \"\"\"\n '!=' or '<>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n \"\"\"\n 'informal' string representation\n \"\"\"\n ret = str(self.num)\n if self.den != 1:\n ret += '/' + str(self.den)\n return ret\n <mask token>\n\n def __bool__(self):\n \"\"\"\n 'bool()' operation\n \"\"\"\n return self.num != 0\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Rational(object):\n <mask token>\n\n def __init__(self, num, den):\n \"\"\"\n simple constructor\n \"\"\"\n if den == 0:\n raise ZeroDivisionError('division by zero')\n if num == 0:\n self._num = 0\n self._den = 1\n else:\n sign = 1\n if num * den < 0:\n sign = -1\n abs_num = abs(num)\n abs_den = abs(den)\n divisor = _gcd(abs_num, abs_den)\n self._num = sign * abs_num // divisor\n self._den = abs_den // divisor\n\n def __add__(self, other):\n \"\"\"\n '+' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num + self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den + self.den * other.num, self.\n den * other.den)\n\n def __radd__(self, other):\n \"\"\"\n fallback of '+' operator\n \"\"\"\n if isinstance(other, int):\n return self.__add__(other)\n return NotImplemented\n\n def __sub__(self, other):\n \"\"\"\n '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num - self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den - self.den * other.num, self.\n den * other.den)\n\n def __rsub__(self, other):\n \"\"\"\n fallback of '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return self.__neg__().__add__(-other)\n return NotImplemented\n\n def __mul__(self, other):\n \"\"\"\n '*' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.num, self.den * other.den)\n\n def __rmul__(self, other):\n \"\"\"\n fallback of '*' operator\n \"\"\"\n return self.__mul__(other)\n <mask token>\n\n def __rtruediv__(self, other):\n \"\"\"\n fallback of '/' operator when '__future__.division' is in effect\n \"\"\"\n if isinstance(other, int):\n return Rational(self.den * other, self.num)\n return NotImplemented\n <mask token>\n\n def __rfloordiv__(self, other):\n \"\"\"\n fallback of '//' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __div__(self, other):\n \"\"\"\n '/' operator\n \"\"\"\n return self.__truediv__(other)\n\n def __rdiv__(self, other):\n \"\"\"\n fallback of '/' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __mod__(self, other):\n \"\"\"\n '%' operator\n \"\"\"\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __rmod__(self, other):\n \"\"\"\n fallback of '%' operator\n \"\"\"\n if self == Rational(0, 1):\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __divmod__(self, other):\n \"\"\"\n 'divmod()' operation\n \"\"\"\n quot = self.__floordiv__(other)\n res = self.__mod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __rdivmod__(self, other):\n \"\"\"\n fallback of 'divmod()' operation\n \"\"\"\n quot = self.__rfloordiv__(other)\n res = self.__rmod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __pos__(self):\n \"\"\"\n '+' unary operator\n \"\"\"\n return self\n\n def __neg__(self):\n \"\"\"\n '-' unary operator\n \"\"\"\n return Rational(-1 * self.num, self.den)\n\n def __abs__(self):\n \"\"\"\n absolute value\n \"\"\"\n return Rational(abs(self.num), self.den)\n\n def __lt__(self, other):\n \"\"\"\n '<' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den < 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den < 0\n\n def __le__(self, other):\n \"\"\"\n '<=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den <= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den <= 0\n\n def __eq__(self, other):\n \"\"\"\n '==' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den == 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den == 0\n\n def __ne__(self, other):\n \"\"\"\n '!=' or '<>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0\n\n def __gt__(self, other):\n \"\"\"\n '>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den > 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den > 0\n\n def __ge__(self, other):\n \"\"\"\n '>=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den >= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den >= 0\n <mask token>\n\n def __repr__(self):\n \"\"\"\n 'official' string representation\n \"\"\"\n return '<Rational: num=%d, den=%d>' % (self.num, self.den)\n\n def __str__(self):\n \"\"\"\n 'informal' string representation\n \"\"\"\n ret = str(self.num)\n if self.den != 1:\n ret += '/' + str(self.den)\n return ret\n <mask token>\n\n def __bool__(self):\n \"\"\"\n 'bool()' operation\n \"\"\"\n return self.num != 0\n <mask token>\n\n def num(self):\n \"\"\"\n returns numerator of Rational\n \"\"\"\n return self.num\n\n def den(self):\n \"\"\"\n returns denominator of Rational\n \"\"\"\n return self.den\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Rational(object):\n <mask token>\n\n def __init__(self, num, den):\n \"\"\"\n simple constructor\n \"\"\"\n if den == 0:\n raise ZeroDivisionError('division by zero')\n if num == 0:\n self._num = 0\n self._den = 1\n else:\n sign = 1\n if num * den < 0:\n sign = -1\n abs_num = abs(num)\n abs_den = abs(den)\n divisor = _gcd(abs_num, abs_den)\n self._num = sign * abs_num // divisor\n self._den = abs_den // divisor\n\n def __add__(self, other):\n \"\"\"\n '+' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num + self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den + self.den * other.num, self.\n den * other.den)\n\n def __radd__(self, other):\n \"\"\"\n fallback of '+' operator\n \"\"\"\n if isinstance(other, int):\n return self.__add__(other)\n return NotImplemented\n\n def __sub__(self, other):\n \"\"\"\n '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num - self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den - self.den * other.num, self.\n den * other.den)\n\n def __rsub__(self, other):\n \"\"\"\n fallback of '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return self.__neg__().__add__(-other)\n return NotImplemented\n\n def __mul__(self, other):\n \"\"\"\n '*' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.num, self.den * other.den)\n\n def __rmul__(self, other):\n \"\"\"\n fallback of '*' operator\n \"\"\"\n return self.__mul__(other)\n <mask token>\n\n def __rtruediv__(self, other):\n \"\"\"\n fallback of '/' operator when '__future__.division' is in effect\n \"\"\"\n if isinstance(other, int):\n return Rational(self.den * other, self.num)\n return NotImplemented\n\n def __floordiv__(self, other):\n \"\"\"\n '//' operator\n \"\"\"\n return self.__truediv__(other)\n\n def __rfloordiv__(self, other):\n \"\"\"\n fallback of '//' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __div__(self, other):\n \"\"\"\n '/' operator\n \"\"\"\n return self.__truediv__(other)\n\n def __rdiv__(self, other):\n \"\"\"\n fallback of '/' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __mod__(self, other):\n \"\"\"\n '%' operator\n \"\"\"\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __rmod__(self, other):\n \"\"\"\n fallback of '%' operator\n \"\"\"\n if self == Rational(0, 1):\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __divmod__(self, other):\n \"\"\"\n 'divmod()' operation\n \"\"\"\n quot = self.__floordiv__(other)\n res = self.__mod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __rdivmod__(self, other):\n \"\"\"\n fallback of 'divmod()' operation\n \"\"\"\n quot = self.__rfloordiv__(other)\n res = self.__rmod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __pos__(self):\n \"\"\"\n '+' unary operator\n \"\"\"\n return self\n\n def __neg__(self):\n \"\"\"\n '-' unary operator\n \"\"\"\n return Rational(-1 * self.num, self.den)\n\n def __abs__(self):\n \"\"\"\n absolute value\n \"\"\"\n return Rational(abs(self.num), self.den)\n\n def __lt__(self, other):\n \"\"\"\n '<' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den < 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den < 0\n\n def __le__(self, other):\n \"\"\"\n '<=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den <= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den <= 0\n\n def __eq__(self, other):\n \"\"\"\n '==' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den == 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den == 0\n\n def __ne__(self, other):\n \"\"\"\n '!=' or '<>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0\n\n def __gt__(self, other):\n \"\"\"\n '>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den > 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den > 0\n\n def __ge__(self, other):\n \"\"\"\n '>=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den >= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den >= 0\n\n def __hash__(self):\n \"\"\"\n calc hash value\n \"\"\"\n return hash((self.num, self.den))\n\n def __repr__(self):\n \"\"\"\n 'official' string representation\n \"\"\"\n return '<Rational: num=%d, den=%d>' % (self.num, self.den)\n\n def __str__(self):\n \"\"\"\n 'informal' string representation\n \"\"\"\n ret = str(self.num)\n if self.den != 1:\n ret += '/' + str(self.den)\n return ret\n\n def __bytes__(self):\n \"\"\"\n 'bytes()' operation\n \"\"\"\n return bytes(str(self), 'utf8')\n\n def __bool__(self):\n \"\"\"\n 'bool()' operation\n \"\"\"\n return self.num != 0\n <mask token>\n\n def num(self):\n \"\"\"\n returns numerator of Rational\n \"\"\"\n return self.num\n\n def den(self):\n \"\"\"\n returns denominator of Rational\n \"\"\"\n return self.den\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-4": "<mask token>\n\n\nclass Rational(object):\n <mask token>\n\n def __init__(self, num, den):\n \"\"\"\n simple constructor\n \"\"\"\n if den == 0:\n raise ZeroDivisionError('division by zero')\n if num == 0:\n self._num = 0\n self._den = 1\n else:\n sign = 1\n if num * den < 0:\n sign = -1\n abs_num = abs(num)\n abs_den = abs(den)\n divisor = _gcd(abs_num, abs_den)\n self._num = sign * abs_num // divisor\n self._den = abs_den // divisor\n\n def __add__(self, other):\n \"\"\"\n '+' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num + self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den + self.den * other.num, self.\n den * other.den)\n\n def __radd__(self, other):\n \"\"\"\n fallback of '+' operator\n \"\"\"\n if isinstance(other, int):\n return self.__add__(other)\n return NotImplemented\n\n def __sub__(self, other):\n \"\"\"\n '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num - self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den - self.den * other.num, self.\n den * other.den)\n\n def __rsub__(self, other):\n \"\"\"\n fallback of '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return self.__neg__().__add__(-other)\n return NotImplemented\n\n def __mul__(self, other):\n \"\"\"\n '*' operator\n \"\"\"\n if isinstance(other, int):\n return Rational(self.num * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.num, self.den * other.den)\n\n def __rmul__(self, other):\n \"\"\"\n fallback of '*' operator\n \"\"\"\n return self.__mul__(other)\n <mask token>\n\n def __rtruediv__(self, other):\n \"\"\"\n fallback of '/' operator when '__future__.division' is in effect\n \"\"\"\n if isinstance(other, int):\n return Rational(self.den * other, self.num)\n return NotImplemented\n\n def __floordiv__(self, other):\n \"\"\"\n '//' operator\n \"\"\"\n return self.__truediv__(other)\n\n def __rfloordiv__(self, other):\n \"\"\"\n fallback of '//' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __div__(self, other):\n \"\"\"\n '/' operator\n \"\"\"\n return self.__truediv__(other)\n\n def __rdiv__(self, other):\n \"\"\"\n fallback of '/' operator\n \"\"\"\n return self.__rtruediv__(other)\n\n def __mod__(self, other):\n \"\"\"\n '%' operator\n \"\"\"\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __rmod__(self, other):\n \"\"\"\n fallback of '%' operator\n \"\"\"\n if self == Rational(0, 1):\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n\n def __divmod__(self, other):\n \"\"\"\n 'divmod()' operation\n \"\"\"\n quot = self.__floordiv__(other)\n res = self.__mod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __rdivmod__(self, other):\n \"\"\"\n fallback of 'divmod()' operation\n \"\"\"\n quot = self.__rfloordiv__(other)\n res = self.__rmod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return quot, res\n return NotImplemented\n\n def __pos__(self):\n \"\"\"\n '+' unary operator\n \"\"\"\n return self\n\n def __neg__(self):\n \"\"\"\n '-' unary operator\n \"\"\"\n return Rational(-1 * self.num, self.den)\n\n def __abs__(self):\n \"\"\"\n absolute value\n \"\"\"\n return Rational(abs(self.num), self.den)\n\n def __lt__(self, other):\n \"\"\"\n '<' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den < 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den < 0\n\n def __le__(self, other):\n \"\"\"\n '<=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den <= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den <= 0\n\n def __eq__(self, other):\n \"\"\"\n '==' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den == 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den == 0\n\n def __ne__(self, other):\n \"\"\"\n '!=' or '<>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0\n\n def __gt__(self, other):\n \"\"\"\n '>' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den > 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den > 0\n\n def __ge__(self, other):\n \"\"\"\n '>=' operator\n \"\"\"\n if isinstance(other, int):\n return self.num - other * self.den >= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den >= 0\n\n def __hash__(self):\n \"\"\"\n calc hash value\n \"\"\"\n return hash((self.num, self.den))\n\n def __repr__(self):\n \"\"\"\n 'official' string representation\n \"\"\"\n return '<Rational: num=%d, den=%d>' % (self.num, self.den)\n\n def __str__(self):\n \"\"\"\n 'informal' string representation\n \"\"\"\n ret = str(self.num)\n if self.den != 1:\n ret += '/' + str(self.den)\n return ret\n\n def __bytes__(self):\n \"\"\"\n 'bytes()' operation\n \"\"\"\n return bytes(str(self), 'utf8')\n\n def __bool__(self):\n \"\"\"\n 'bool()' operation\n \"\"\"\n return self.num != 0\n\n def isinteger(self):\n \"\"\"\n Does this Rational instance represent integer?\n \"\"\"\n return self.den == 1\n\n def num(self):\n \"\"\"\n returns numerator of Rational\n \"\"\"\n return self.num\n\n def den(self):\n \"\"\"\n returns denominator of Rational\n \"\"\"\n return self.den\n\n @staticmethod\n def parse(string):\n \"\"\"\n parse string to Rational\n \"\"\"\n posslash = string.find('/')\n if posslash < 0:\n return Rational(int(string), 1)\n else:\n strs = string.split('/')\n return Rational(int(strs[0].strip()), int(strs[1].strip()))\n <mask token>\n <mask token>\n\n\n<mask token>\n", "step-5": "\"\"\"\nmodule rational number\n\"\"\"\n\ndef _gcd(num_a, num_b):\n \"\"\"\n gratest common divisor\n \"\"\"\n if num_a == 0 or num_b == 0:\n raise ArithmeticError('gcd of zero')\n var_p = num_a\n var_q = num_b\n if var_p < var_q:\n var_p = num_b\n var_q = num_a\n var_r = var_p % var_q\n while var_r != 0:\n var_p = var_q\n var_q = var_r\n var_r = var_p % var_q\n return var_q\n\nclass Rational(object):\n \"\"\"\n representing rational number\n \"\"\"\n def __init__(self, num, den):\n \"\"\"\n simple constructor\n \"\"\"\n if den == 0:\n raise ZeroDivisionError('division by zero')\n if num == 0:\n self._num = 0\n self._den = 1\n else:\n sign = 1\n if num * den < 0:\n sign = -1\n abs_num = abs(num)\n abs_den = abs(den)\n divisor = _gcd(abs_num, abs_den)\n self._num = sign * abs_num // divisor\n self._den = abs_den // divisor\n #\n def __add__(self, other):\n \"\"\"\n '+' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return Rational(self.num + self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den + self.den * other.num, self.den * other.den)\n def __radd__(self, other):\n \"\"\"\n fallback of '+' operator\n \"\"\"\n if isinstance(other, int):\n return self.__add__(other)\n return NotImplemented\n #\n def __sub__(self, other):\n \"\"\"\n '-' binary operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return Rational(self.num - self.den * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.den - self.den * other.num, self.den * other.den)\n def __rsub__(self, other):\n \"\"\"\n fallback of '-' binary operator\n \"\"\"\n if isinstance(other, int):\n return self.__neg__().__add__(- other)\n return NotImplemented\n #\n def __mul__(self, other):\n \"\"\"\n '*' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return Rational(self.num * other, self.den)\n if not isinstance(other, Rational):\n return NotImplemented\n return Rational(self.num * other.num, self.den * other.den)\n def __rmul__(self, other):\n \"\"\"\n fallback of '*' operator\n \"\"\"\n return self.__mul__(other)\n #\n def __truediv__(self, other):\n \"\"\"\n '/' operator when '__future__.division' is in effect\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(self.num, self.den * other)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(self.num * other.den, self.den * other.num)\n def __rtruediv__(self, other):\n \"\"\"\n fallback of '/' operator when '__future__.division' is in effect\n \"\"\"\n if isinstance(other, int):\n return Rational(self.den * other, self.num)\n return NotImplemented\n #\n def __floordiv__(self, other):\n \"\"\"\n '//' operator\n \"\"\"\n return self.__truediv__(other)\n def __rfloordiv__(self, other):\n \"\"\"\n fallback of '//' operator\n \"\"\"\n return self.__rtruediv__(other)\n #\n def __div__(self, other):\n \"\"\"\n '/' operator\n \"\"\"\n return self.__truediv__(other)\n def __rdiv__(self, other):\n \"\"\"\n fallback of '/' operator\n \"\"\"\n return self.__rtruediv__(other)\n #\n def __mod__(self, other):\n \"\"\"\n '%' operator\n \"\"\"\n if isinstance(other, int):\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n if not isinstance(other, Rational):\n return NotImplemented\n if other == 0:\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n def __rmod__(self, other):\n \"\"\"\n fallback of '%' operator\n \"\"\"\n if self == Rational(0, 1):\n raise ZeroDivisionError('division by zero')\n return Rational(0, 1)\n #\n def __divmod__(self, other):\n \"\"\"\n 'divmod()' operation\n \"\"\"\n quot = self.__floordiv__(other)\n res = self.__mod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return (quot, res)\n return NotImplemented\n def __rdivmod__(self, other):\n \"\"\"\n fallback of 'divmod()' operation\n \"\"\"\n quot = self.__rfloordiv__(other)\n res = self.__rmod__(other)\n if quot != NotImplemented and res != NotImplemented:\n return (quot, res)\n return NotImplemented\n #\n def __pos__(self):\n \"\"\"\n '+' unary operator\n \"\"\"\n return self\n #\n def __neg__(self):\n \"\"\"\n '-' unary operator\n \"\"\"\n return Rational(-1 * self.num, self.den)\n #\n def __abs__(self):\n \"\"\"\n absolute value\n \"\"\"\n return Rational(abs(self.num), self.den)\n #\n # \"rich comparison\" method\n def __lt__(self, other):\n \"\"\"\n '<' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den < 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den < 0\n #\n def __le__(self, other):\n \"\"\"\n '<=' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den <= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den <= 0\n #\n def __eq__(self, other):\n \"\"\"\n '==' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den == 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den == 0\n #\n def __ne__(self, other):\n \"\"\"\n '!=' or '<>' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den != 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den != 0\n #\n def __gt__(self, other):\n \"\"\"\n '>' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den > 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den > 0\n #\n def __ge__(self, other):\n \"\"\"\n '>=' operator\n \"\"\"\n # supported type for operand except Rational\n if isinstance(other, int):\n return self.num - other * self.den >= 0\n if not isinstance(other, Rational):\n return NotImplemented\n return self.num * other.den - other.num * self.den >= 0\n #\n def __hash__(self):\n \"\"\"\n calc hash value\n \"\"\"\n return hash((self.num, self.den))\n #\n def __repr__(self):\n \"\"\"\n 'official' string representation\n \"\"\"\n return '<Rational: num=%d, den=%d>' % (self.num, self.den)\n #\n def __str__(self):\n \"\"\"\n 'informal' string representation\n \"\"\"\n ret = str(self.num)\n if self.den != 1:\n ret += '/' + str(self.den)\n return ret\n #\n def __bytes__(self):\n \"\"\"\n 'bytes()' operation\n \"\"\"\n return bytes(str(self), 'utf8')\n #\n def __bool__(self):\n \"\"\"\n 'bool()' operation\n \"\"\"\n return self.num != 0\n #\n def isinteger(self):\n \"\"\"\n Does this Rational instance represent integer?\n \"\"\"\n return self.den == 1\n #\n def num(self):\n \"\"\"\n returns numerator of Rational\n \"\"\"\n return self.num\n #\n def den(self):\n \"\"\"\n returns denominator of Rational\n \"\"\"\n return self.den\n #\n @staticmethod\n def parse(string):\n \"\"\"\n parse string to Rational\n \"\"\"\n posslash = string.find('/')\n if posslash < 0:\n return Rational(int(string), 1)\n else:\n strs = string.split('/')\n return Rational(int(strs[0].strip()), int(strs[1].strip()))\n #\n ZERO = None\n ONE = None\n\nRational.ZERO = Rational(0, 1)\nRational.ONE = Rational(1, 1)\n", "step-ids": [ 17, 30, 33, 35, 41 ] }
[ 17, 30, 33, 35, 41 ]
<|reserved_special_token_0|> def fun1(): s_cut = [('72af8ecf3609a546bac3150c20f70455', ['老凤祥', '六福珠宝', '周生生', '亚一珠宝', '亚一金店']), ('3e78397f7dbb88ffbd78ba52d0e925fa', ['老庙', '谢瑞麟', '中国黄金', '明牌珠宝']), ('6bee32b2f0719ea45cc194847efd8917', ['周大福', '潮宏基', '东华美钻', '周大生'])] num = 1 city_code = ['上海'] for s_key, store_names in s_cut: for store in store_names: for code in city_code: params = {'keywords': store, 'types': '购物服务', 'city': code, 'citylimit': 'True', 'output': 'json', 'key': s_key, 'offset': 20, 'page': num} response = requests.get( 'https://restapi.amap.com/v3/place/text', params=params) map_results = json.loads(response.text) print(map_results) return map_results <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def fun1(): s_cut = [('72af8ecf3609a546bac3150c20f70455', ['老凤祥', '六福珠宝', '周生生', '亚一珠宝', '亚一金店']), ('3e78397f7dbb88ffbd78ba52d0e925fa', ['老庙', '谢瑞麟', '中国黄金', '明牌珠宝']), ('6bee32b2f0719ea45cc194847efd8917', ['周大福', '潮宏基', '东华美钻', '周大生'])] num = 1 city_code = ['上海'] for s_key, store_names in s_cut: for store in store_names: for code in city_code: params = {'keywords': store, 'types': '购物服务', 'city': code, 'citylimit': 'True', 'output': 'json', 'key': s_key, 'offset': 20, 'page': num} response = requests.get( 'https://restapi.amap.com/v3/place/text', params=params) map_results = json.loads(response.text) print(map_results) return map_results <|reserved_special_token_0|> print(json_text['pois']) print(len(json_text['pois'])) <|reserved_special_token_1|> <|reserved_special_token_0|> def fun1(): s_cut = [('72af8ecf3609a546bac3150c20f70455', ['老凤祥', '六福珠宝', '周生生', '亚一珠宝', '亚一金店']), ('3e78397f7dbb88ffbd78ba52d0e925fa', ['老庙', '谢瑞麟', '中国黄金', '明牌珠宝']), ('6bee32b2f0719ea45cc194847efd8917', ['周大福', '潮宏基', '东华美钻', '周大生'])] num = 1 city_code = ['上海'] for s_key, store_names in s_cut: for store in store_names: for code in city_code: params = {'keywords': store, 'types': '购物服务', 'city': code, 'citylimit': 'True', 'output': 'json', 'key': s_key, 'offset': 20, 'page': num} response = requests.get( 'https://restapi.amap.com/v3/place/text', params=params) map_results = json.loads(response.text) print(map_results) return map_results json_text = fun1() print(json_text['pois']) print(len(json_text['pois'])) <|reserved_special_token_1|> import requests import json def fun1(): s_cut = [('72af8ecf3609a546bac3150c20f70455', ['老凤祥', '六福珠宝', '周生生', '亚一珠宝', '亚一金店']), ('3e78397f7dbb88ffbd78ba52d0e925fa', ['老庙', '谢瑞麟', '中国黄金', '明牌珠宝']), ('6bee32b2f0719ea45cc194847efd8917', ['周大福', '潮宏基', '东华美钻', '周大生'])] num = 1 city_code = ['上海'] for s_key, store_names in s_cut: for store in store_names: for code in city_code: params = {'keywords': store, 'types': '购物服务', 'city': code, 'citylimit': 'True', 'output': 'json', 'key': s_key, 'offset': 20, 'page': num} response = requests.get( 'https://restapi.amap.com/v3/place/text', params=params) map_results = json.loads(response.text) print(map_results) return map_results json_text = fun1() print(json_text['pois']) print(len(json_text['pois'])) <|reserved_special_token_1|> # -*- coding:utf-8 -*- import requests import json def fun1(): s_cut = [('72af8ecf3609a546bac3150c20f70455', ['老凤祥', '六福珠宝', '周生生', '亚一珠宝', '亚一金店']), ('3e78397f7dbb88ffbd78ba52d0e925fa', ['老庙', '谢瑞麟', '中国黄金', '明牌珠宝']), # yh ('6bee32b2f0719ea45cc194847efd8917', ['周大福', '潮宏基', '东华美钻', '周大生']), # zyy ] num = 1 city_code = ['上海'] for s_key, store_names in s_cut: for store in store_names: for code in city_code: params = {'keywords': store, 'types': '购物服务', 'city': code, 'citylimit': 'True', 'output': 'json', 'key': s_key, 'offset': 20, 'page': num} response = requests.get('https://restapi.amap.com/v3/place/text', params=params) map_results = json.loads(response.text) print(map_results) return map_results json_text = fun1() print(json_text['pois']) print(len(json_text['pois']))
flexible
{ "blob_id": "66f8fa5fc12dc80b8f46684c39781c2e4634de4a", "index": 3479, "step-1": "<mask token>\n\n\ndef fun1():\n s_cut = [('72af8ecf3609a546bac3150c20f70455', ['老凤祥', '六福珠宝', '周生生',\n '亚一珠宝', '亚一金店']), ('3e78397f7dbb88ffbd78ba52d0e925fa', ['老庙', '谢瑞麟',\n '中国黄金', '明牌珠宝']), ('6bee32b2f0719ea45cc194847efd8917', ['周大福',\n '潮宏基', '东华美钻', '周大生'])]\n num = 1\n city_code = ['上海']\n for s_key, store_names in s_cut:\n for store in store_names:\n for code in city_code:\n params = {'keywords': store, 'types': '购物服务', 'city': code,\n 'citylimit': 'True', 'output': 'json', 'key': s_key,\n 'offset': 20, 'page': num}\n response = requests.get(\n 'https://restapi.amap.com/v3/place/text', params=params)\n map_results = json.loads(response.text)\n print(map_results)\n return map_results\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef fun1():\n s_cut = [('72af8ecf3609a546bac3150c20f70455', ['老凤祥', '六福珠宝', '周生生',\n '亚一珠宝', '亚一金店']), ('3e78397f7dbb88ffbd78ba52d0e925fa', ['老庙', '谢瑞麟',\n '中国黄金', '明牌珠宝']), ('6bee32b2f0719ea45cc194847efd8917', ['周大福',\n '潮宏基', '东华美钻', '周大生'])]\n num = 1\n city_code = ['上海']\n for s_key, store_names in s_cut:\n for store in store_names:\n for code in city_code:\n params = {'keywords': store, 'types': '购物服务', 'city': code,\n 'citylimit': 'True', 'output': 'json', 'key': s_key,\n 'offset': 20, 'page': num}\n response = requests.get(\n 'https://restapi.amap.com/v3/place/text', params=params)\n map_results = json.loads(response.text)\n print(map_results)\n return map_results\n\n\n<mask token>\nprint(json_text['pois'])\nprint(len(json_text['pois']))\n", "step-3": "<mask token>\n\n\ndef fun1():\n s_cut = [('72af8ecf3609a546bac3150c20f70455', ['老凤祥', '六福珠宝', '周生生',\n '亚一珠宝', '亚一金店']), ('3e78397f7dbb88ffbd78ba52d0e925fa', ['老庙', '谢瑞麟',\n '中国黄金', '明牌珠宝']), ('6bee32b2f0719ea45cc194847efd8917', ['周大福',\n '潮宏基', '东华美钻', '周大生'])]\n num = 1\n city_code = ['上海']\n for s_key, store_names in s_cut:\n for store in store_names:\n for code in city_code:\n params = {'keywords': store, 'types': '购物服务', 'city': code,\n 'citylimit': 'True', 'output': 'json', 'key': s_key,\n 'offset': 20, 'page': num}\n response = requests.get(\n 'https://restapi.amap.com/v3/place/text', params=params)\n map_results = json.loads(response.text)\n print(map_results)\n return map_results\n\n\njson_text = fun1()\nprint(json_text['pois'])\nprint(len(json_text['pois']))\n", "step-4": "import requests\nimport json\n\n\ndef fun1():\n s_cut = [('72af8ecf3609a546bac3150c20f70455', ['老凤祥', '六福珠宝', '周生生',\n '亚一珠宝', '亚一金店']), ('3e78397f7dbb88ffbd78ba52d0e925fa', ['老庙', '谢瑞麟',\n '中国黄金', '明牌珠宝']), ('6bee32b2f0719ea45cc194847efd8917', ['周大福',\n '潮宏基', '东华美钻', '周大生'])]\n num = 1\n city_code = ['上海']\n for s_key, store_names in s_cut:\n for store in store_names:\n for code in city_code:\n params = {'keywords': store, 'types': '购物服务', 'city': code,\n 'citylimit': 'True', 'output': 'json', 'key': s_key,\n 'offset': 20, 'page': num}\n response = requests.get(\n 'https://restapi.amap.com/v3/place/text', params=params)\n map_results = json.loads(response.text)\n print(map_results)\n return map_results\n\n\njson_text = fun1()\nprint(json_text['pois'])\nprint(len(json_text['pois']))\n", "step-5": "# -*- coding:utf-8 -*-\n\nimport requests\nimport json\n\n\ndef fun1():\n s_cut = [('72af8ecf3609a546bac3150c20f70455', ['老凤祥', '六福珠宝', '周生生', '亚一珠宝', '亚一金店']),\n ('3e78397f7dbb88ffbd78ba52d0e925fa', ['老庙', '谢瑞麟', '中国黄金', '明牌珠宝']), # yh\n ('6bee32b2f0719ea45cc194847efd8917', ['周大福', '潮宏基', '东华美钻', '周大生']), # zyy\n ]\n num = 1\n city_code = ['上海']\n for s_key, store_names in s_cut:\n for store in store_names:\n for code in city_code:\n params = {'keywords': store,\n 'types': '购物服务',\n 'city': code,\n 'citylimit': 'True',\n 'output': 'json',\n 'key': s_key,\n 'offset': 20,\n 'page': num}\n response = requests.get('https://restapi.amap.com/v3/place/text', params=params)\n map_results = json.loads(response.text)\n print(map_results)\n return map_results\n\n\njson_text = fun1()\nprint(json_text['pois'])\n\nprint(len(json_text['pois']))\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import json import redis redis_client = redis.StrictRedis(host="redis", port=6379, db=1, password="pAssw0rd") def publish_data_on_redis(data, channel): redis_client.publish(channel, json.dumps(data))
normal
{ "blob_id": "d61024ecbd092852fc3396e6919d6d3c8aa554db", "index": 6178, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef publish_data_on_redis(data, channel):\n redis_client.publish(channel, json.dumps(data))\n", "step-3": "<mask token>\nredis_client = redis.StrictRedis(host='redis', port=6379, db=1, password=\n 'pAssw0rd')\n\n\ndef publish_data_on_redis(data, channel):\n redis_client.publish(channel, json.dumps(data))\n", "step-4": "import json\nimport redis\nredis_client = redis.StrictRedis(host='redis', port=6379, db=1, password=\n 'pAssw0rd')\n\n\ndef publish_data_on_redis(data, channel):\n redis_client.publish(channel, json.dumps(data))\n", "step-5": "import json\nimport redis\n\nredis_client = redis.StrictRedis(host=\"redis\", port=6379, db=1, password=\"pAssw0rd\")\n\n\ndef publish_data_on_redis(data, channel):\n redis_client.publish(channel, json.dumps(data))", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
/Users/sterlingbutters/anaconda3/lib/python3.6/encodings/cp037.py
normal
{ "blob_id": "85dfb30a380dc73f5a465c8f4be84decccfbcb59", "index": 1290, "step-1": "/Users/sterlingbutters/anaconda3/lib/python3.6/encodings/cp037.py", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> def test_components_to_conf_and_back(): for Component in comp_list: x = Component() y = x.to_conf().make() assert x == y <|reserved_special_token_0|> class TestEfConf: def test_conf_export(self): conf = EfConf(sources=[ParticleSource()], inner_regions=( InnerRegion(),)) s = conf.export_to_string() c1 = EfConf.from_string(s) assert c1 == conf def test_conf_repr(self): from numpy import array conf = EfConf(sources=[ParticleSource()], inner_regions=( InnerRegion(),)) s = repr(conf) c1 = eval(s) assert c1 == conf <|reserved_special_token_1|> <|reserved_special_token_0|> def test_components_to_conf_and_back(): for Component in comp_list: x = Component() y = x.to_conf().make() assert x == y def test_conf_to_configparser_and_back(): confs = [C().to_conf() for C in comp_list] parser = ConfigParser() for c in confs: c.add_section_to_parser(parser) conf2 = ConfigSection.parser_to_confs(parser) assert conf2 == confs <|reserved_special_token_0|> class TestEfConf: def test_conf_export(self): conf = EfConf(sources=[ParticleSource()], inner_regions=( InnerRegion(),)) s = conf.export_to_string() c1 = EfConf.from_string(s) assert c1 == conf def test_conf_repr(self): from numpy import array conf = EfConf(sources=[ParticleSource()], inner_regions=( InnerRegion(),)) s = repr(conf) c1 = eval(s) assert c1 == conf <|reserved_special_token_1|> <|reserved_special_token_0|> def test_components_to_conf_and_back(): for Component in comp_list: x = Component() y = x.to_conf().make() assert x == y def test_conf_to_configparser_and_back(): confs = [C().to_conf() for C in comp_list] parser = ConfigParser() for c in confs: c.add_section_to_parser(parser) conf2 = ConfigSection.parser_to_confs(parser) assert conf2 == confs def test_minimal_example(): parser = ConfigParser() parser.read('examples/minimal_working_example/minimal_conf.conf') components = [conf.make() for conf in ConfigSection.parser_to_confs(parser) ] assert components == [TimeGrid(1e-07, 1e-09, 1e-09), SpatialMesh((5, 5, 15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'), BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'), ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_', '.h5')] class TestEfConf: def test_conf_export(self): conf = EfConf(sources=[ParticleSource()], inner_regions=( InnerRegion(),)) s = conf.export_to_string() c1 = EfConf.from_string(s) assert c1 == conf def test_conf_repr(self): from numpy import array conf = EfConf(sources=[ParticleSource()], inner_regions=( InnerRegion(),)) s = repr(conf) c1 = eval(s) assert c1 == conf <|reserved_special_token_1|> from configparser import ConfigParser from ef.config.components import * from ef.config.efconf import EfConf from ef.config.section import ConfigSection comp_list = [BoundaryConditions, InnerRegion, OutputFile, ParticleInteractionModel, ParticleSource, SpatialMesh, TimeGrid, ExternalFieldUniform] def test_components_to_conf_and_back(): for Component in comp_list: x = Component() y = x.to_conf().make() assert x == y def test_conf_to_configparser_and_back(): confs = [C().to_conf() for C in comp_list] parser = ConfigParser() for c in confs: c.add_section_to_parser(parser) conf2 = ConfigSection.parser_to_confs(parser) assert conf2 == confs def test_minimal_example(): parser = ConfigParser() parser.read('examples/minimal_working_example/minimal_conf.conf') components = [conf.make() for conf in ConfigSection.parser_to_confs(parser) ] assert components == [TimeGrid(1e-07, 1e-09, 1e-09), SpatialMesh((5, 5, 15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'), BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'), ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_', '.h5')] class TestEfConf: def test_conf_export(self): conf = EfConf(sources=[ParticleSource()], inner_regions=( InnerRegion(),)) s = conf.export_to_string() c1 = EfConf.from_string(s) assert c1 == conf def test_conf_repr(self): from numpy import array conf = EfConf(sources=[ParticleSource()], inner_regions=( InnerRegion(),)) s = repr(conf) c1 = eval(s) assert c1 == conf <|reserved_special_token_1|> from configparser import ConfigParser from ef.config.components import * from ef.config.efconf import EfConf from ef.config.section import ConfigSection comp_list = [BoundaryConditions, InnerRegion, OutputFile, ParticleInteractionModel, ParticleSource, SpatialMesh, TimeGrid, ExternalFieldUniform] def test_components_to_conf_and_back(): for Component in comp_list: x = Component() y = x.to_conf().make() assert x == y def test_conf_to_configparser_and_back(): confs = [C().to_conf() for C in comp_list] parser = ConfigParser() for c in confs: c.add_section_to_parser(parser) conf2 = ConfigSection.parser_to_confs(parser) assert conf2 == confs def test_minimal_example(): parser = ConfigParser() parser.read("examples/minimal_working_example/minimal_conf.conf") components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)] assert components == [TimeGrid(1e-7, 1e-9, 1e-9), SpatialMesh((5, 5, 15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'), BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'), ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_', '.h5')] class TestEfConf: def test_conf_export(self): conf = EfConf(sources=[ParticleSource()], inner_regions=(InnerRegion(),)) s = conf.export_to_string() c1 = EfConf.from_string(s) assert c1 == conf def test_conf_repr(self): from numpy import array # for use in eval conf = EfConf(sources=[ParticleSource()], inner_regions=(InnerRegion(),)) s = repr(conf) c1 = eval(s) assert c1 == conf
flexible
{ "blob_id": "edcccc673994a8de281a683b747de52d2115f89e", "index": 347, "step-1": "<mask token>\n\n\ndef test_components_to_conf_and_back():\n for Component in comp_list:\n x = Component()\n y = x.to_conf().make()\n assert x == y\n\n\n<mask token>\n\n\nclass TestEfConf:\n\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "step-2": "<mask token>\n\n\ndef test_components_to_conf_and_back():\n for Component in comp_list:\n x = Component()\n y = x.to_conf().make()\n assert x == y\n\n\ndef test_conf_to_configparser_and_back():\n confs = [C().to_conf() for C in comp_list]\n parser = ConfigParser()\n for c in confs:\n c.add_section_to_parser(parser)\n conf2 = ConfigSection.parser_to_confs(parser)\n assert conf2 == confs\n\n\n<mask token>\n\n\nclass TestEfConf:\n\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "step-3": "<mask token>\n\n\ndef test_components_to_conf_and_back():\n for Component in comp_list:\n x = Component()\n y = x.to_conf().make()\n assert x == y\n\n\ndef test_conf_to_configparser_and_back():\n confs = [C().to_conf() for C in comp_list]\n parser = ConfigParser()\n for c in confs:\n c.add_section_to_parser(parser)\n conf2 = ConfigSection.parser_to_confs(parser)\n assert conf2 == confs\n\n\ndef test_minimal_example():\n parser = ConfigParser()\n parser.read('examples/minimal_working_example/minimal_conf.conf')\n components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)\n ]\n assert components == [TimeGrid(1e-07, 1e-09, 1e-09), SpatialMesh((5, 5,\n 15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'),\n BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'),\n ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_',\n '.h5')]\n\n\nclass TestEfConf:\n\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "step-4": "from configparser import ConfigParser\nfrom ef.config.components import *\nfrom ef.config.efconf import EfConf\nfrom ef.config.section import ConfigSection\ncomp_list = [BoundaryConditions, InnerRegion, OutputFile,\n ParticleInteractionModel, ParticleSource, SpatialMesh, TimeGrid,\n ExternalFieldUniform]\n\n\ndef test_components_to_conf_and_back():\n for Component in comp_list:\n x = Component()\n y = x.to_conf().make()\n assert x == y\n\n\ndef test_conf_to_configparser_and_back():\n confs = [C().to_conf() for C in comp_list]\n parser = ConfigParser()\n for c in confs:\n c.add_section_to_parser(parser)\n conf2 = ConfigSection.parser_to_confs(parser)\n assert conf2 == confs\n\n\ndef test_minimal_example():\n parser = ConfigParser()\n parser.read('examples/minimal_working_example/minimal_conf.conf')\n components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)\n ]\n assert components == [TimeGrid(1e-07, 1e-09, 1e-09), SpatialMesh((5, 5,\n 15), (0.5, 0.5, 1.5)), ParticleInteractionModel('noninteracting'),\n BoundaryConditions(0), ExternalFieldUniform('mgn_uni', 'magnetic'),\n ExternalFieldUniform('el_uni', 'electric'), OutputFile('example_',\n '.h5')]\n\n\nclass TestEfConf:\n\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array\n conf = EfConf(sources=[ParticleSource()], inner_regions=(\n InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "step-5": "from configparser import ConfigParser\n\nfrom ef.config.components import *\nfrom ef.config.efconf import EfConf\nfrom ef.config.section import ConfigSection\n\ncomp_list = [BoundaryConditions, InnerRegion, OutputFile, ParticleInteractionModel,\n ParticleSource, SpatialMesh, TimeGrid, ExternalFieldUniform]\n\n\ndef test_components_to_conf_and_back():\n for Component in comp_list:\n x = Component()\n y = x.to_conf().make()\n assert x == y\n\n\ndef test_conf_to_configparser_and_back():\n confs = [C().to_conf() for C in comp_list]\n parser = ConfigParser()\n for c in confs:\n c.add_section_to_parser(parser)\n conf2 = ConfigSection.parser_to_confs(parser)\n assert conf2 == confs\n\n\ndef test_minimal_example():\n parser = ConfigParser()\n parser.read(\"examples/minimal_working_example/minimal_conf.conf\")\n components = [conf.make() for conf in ConfigSection.parser_to_confs(parser)]\n assert components == [TimeGrid(1e-7, 1e-9, 1e-9), SpatialMesh((5, 5, 15), (0.5, 0.5, 1.5)),\n ParticleInteractionModel('noninteracting'), BoundaryConditions(0),\n ExternalFieldUniform('mgn_uni', 'magnetic'),\n ExternalFieldUniform('el_uni', 'electric'),\n OutputFile('example_', '.h5')]\n\n\nclass TestEfConf:\n def test_conf_export(self):\n conf = EfConf(sources=[ParticleSource()], inner_regions=(InnerRegion(),))\n s = conf.export_to_string()\n c1 = EfConf.from_string(s)\n assert c1 == conf\n\n def test_conf_repr(self):\n from numpy import array # for use in eval\n conf = EfConf(sources=[ParticleSource()], inner_regions=(InnerRegion(),))\n s = repr(conf)\n c1 = eval(s)\n assert c1 == conf\n", "step-ids": [ 4, 5, 6, 8, 9 ] }
[ 4, 5, 6, 8, 9 ]
#!/usr/bin/python # Find minimal distances between clouds in one bin, average these per bin # Compute geometric and arithmetical mean between all clouds per bin from netCDF4 import Dataset as NetCDFFile from matplotlib import pyplot as plt import numpy as np from numpy import ma from scipy import stats from haversine import haversine from scipy.spatial import distance from distance_methods import distances from CSD_fit import CSD_fit cusize = NetCDFFile( '/home/vanlaar/HDCP2data/TA_dom4/cusize_output_time41.nc') size = cusize.variables['size'] begin_time = 41 end_time = 48 D0_all = np.zeros((end_time-begin_time+1,len(size))) D1_all = np.zeros((end_time-begin_time+1,len(size))) nclouds_bin_all = np.zeros((end_time-begin_time+1,len(size))) mindistance_mean_all = np.zeros((end_time-begin_time+1,len(size))) mindistance_std_all = np.zeros((end_time-begin_time+1,len(size))) maxdistance_all = np.zeros((end_time-begin_time+1,len(size))) maxdistanceY_all = np.zeros((end_time-begin_time+1,len(size))) hn_normalized_all = np.zeros((end_time-begin_time+1,len(size))) for time in range(begin_time,end_time+1): print 'time:',time cusize = NetCDFFile( '/home/vanlaar/HDCP2data/TA_dom4/cusize_output_time'+str(time)+'.nc') cloudlon = cusize.variables['cloud_lon'][:] cloudlat = cusize.variables['cloud_lat'][:] nclouds_cusize = cusize.variables['nclouds'] #size = cusize.variables['size'] cloud_bin = cusize.variables['cloud_bin'][0,:] hn = cusize.variables['hn'] hn_normalized_loop = hn/nclouds_cusize[0] ncloud_bin = cusize.variables['ncloud_bin'] ncloudsint = int(nclouds_cusize[0]) cloud_lon = cloudlon[0,0:ncloudsint] cloud_lat = cloudlat[0,0:ncloudsint] filledbin=np.argmin(hn[0,:]) # last bin with clouds, rest is empty output_distances = distances(filledbin,cloud_lon,cloud_lat,cloud_bin,size,ncloudsint) D0_all[time-41] = output_distances[0] D1_all[time-41] = output_distances[1] mindistance_mean_all[time-41] = output_distances[2] mindistance_std_all[time-41] = output_distances[3] nclouds_bin_all[time-41] = output_distances[4] hn_normalized_all[time-41] = hn_normalized_loop mindistance_mean = np.mean(mindistance_mean_all,axis=0)/1000 mindistance_std = np.mean(mindistance_std_all,axis=0)/1000 D0 = np.mean(D0_all,axis=0) D1 = np.mean(D1_all,axis=0) nclouds = np.mean(nclouds_bin_all,axis=0) hn_normalized = np.mean(hn_normalized_all,axis=0) filledbin_all=np.argmin(hn_normalized[:]) fit = CSD_fit(hn_normalized[0:10],size[0:10]) logfit = fit[3] a = fit[0] b = fit[1] c = fit[2] print 'a, b, c:' print a, b, c print logfit sizelog = np.log10(size) hnlog = ma.filled(np.log10(ma.masked_equal(hn_normalized, 0)), np.nan) ncloudslog = ma.filled(np.log10(ma.masked_equal(nclouds, 0)), np.nan) #res = ma.filled(log2(ma.masked_equal(m, 0)), 0) mindistance_plus = mindistance_mean + mindistance_std mindistance_minus = mindistance_mean - mindistance_std filledbin = np.argmin(mindistance_mean) slope, intercept, r_value, p_value, std_err = stats.linregress(size[0:filledbin],mindistance_mean[0:filledbin]) print 'r-squared:',r_value**2 line = intercept + slope*size print 'slope:',slope print 'intercept:',intercept ################################################################## ### Plots threshold = 0.005*sum(nclouds) maxbin = np.min(np.where(nclouds <= threshold)) orange = (1.,0.38,0.01) blue = (0.53,0.81,1) plt.figure(figsize=(14,8)) plt.axis([0, 5500, 0, 120]) plt.xlabel('Cloud size [m]',fontsize=15) plt.ylabel('Nearest-neighbour distance [km]',fontsize=15) plt.fill_between(size,mindistance_plus,mindistance_minus,alpha=0.3,color=blue) plt.scatter(size,mindistance_mean,color='k') #plt.scatter(size,mindistance_plus,color='g') #plt.plot(size,line,color='black') ax = plt.gca() ax.axvspan(size[maxbin], 5500, alpha=0.2, color='grey') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.savefig('Figures/mindistance.pdf') plt.savefig('Figures/mindistance.png') plt.figure(figsize=(10,8)) #plt.axis([50000,220000, 50000, 220000]) plt.xlabel('D1') plt.ylabel('D0') plt.scatter(D1,D0,color='k') plt.savefig('Figures/D1-D0.pdf') plt.figure(figsize=(10,8)) plt.xlabel('log(l) [m]') plt.ylabel('log(N*(l)) [m-1]') plt.scatter(sizelog,hnlog) plt.scatter(sizelog[0:10],logfit[0:10]) plt.savefig('Figures/CSD.pdf') plt.figure(figsize=(10,8)) plt.xlabel('Cloud size') plt.ylabel('Ratio distance/size') plt.axis([0, 5500, 0, 0.02]) ax = plt.gca() ax.axvspan(size[maxbin], 5500, alpha=0.2, color='grey') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.savefig('Figures/mindistance.pdf') plt.scatter(size[0:filledbin],mindistance_mean[0:filledbin]/size[0:filledbin]) plt.savefig('Figures/ratio_distance_size.pdf') plt.figure(figsize=(10,8)) plt.xlabel('Cloud size') plt.ylabel('Number of clouds') plt.axhline(y=threshold, c='black') ax = plt.gca() ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) plt.scatter(size[0:filledbin],nclouds[0:filledbin]) plt.savefig('Figures/nclouds_size.pdf') plt.figure(figsize=(10,8)) plt.xlabel('size') plt.ylabel('nclouds') plt.scatter(size[0:filledbin],ncloudslog[0:filledbin]) plt.savefig('Figures/nclouds_size_log.pdf')
normal
{ "blob_id": "1c6e6394a6bd26b152b2f5ec87eb181a3387f794", "index": 5894, "step-1": "#!/usr/bin/python\n\n# Find minimal distances between clouds in one bin, average these per bin\n# Compute geometric and arithmetical mean between all clouds per bin\n\nfrom netCDF4 import Dataset as NetCDFFile\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom numpy import ma\nfrom scipy import stats \nfrom haversine import haversine\nfrom scipy.spatial import distance\nfrom distance_methods import distances\nfrom CSD_fit import CSD_fit\n\ncusize = NetCDFFile(\n '/home/vanlaar/HDCP2data/TA_dom4/cusize_output_time41.nc')\n\nsize = cusize.variables['size']\n\nbegin_time = 41\nend_time = 48\n\nD0_all = np.zeros((end_time-begin_time+1,len(size)))\nD1_all = np.zeros((end_time-begin_time+1,len(size)))\nnclouds_bin_all = np.zeros((end_time-begin_time+1,len(size)))\nmindistance_mean_all = np.zeros((end_time-begin_time+1,len(size)))\nmindistance_std_all = np.zeros((end_time-begin_time+1,len(size)))\nmaxdistance_all = np.zeros((end_time-begin_time+1,len(size)))\nmaxdistanceY_all = np.zeros((end_time-begin_time+1,len(size)))\nhn_normalized_all = np.zeros((end_time-begin_time+1,len(size)))\n\n\nfor time in range(begin_time,end_time+1):\n print 'time:',time\n\n cusize = NetCDFFile(\n '/home/vanlaar/HDCP2data/TA_dom4/cusize_output_time'+str(time)+'.nc')\n\n cloudlon = cusize.variables['cloud_lon'][:]\n cloudlat = cusize.variables['cloud_lat'][:]\n nclouds_cusize = cusize.variables['nclouds']\n #size = cusize.variables['size']\n cloud_bin = cusize.variables['cloud_bin'][0,:]\n hn = cusize.variables['hn']\n hn_normalized_loop = hn/nclouds_cusize[0]\n ncloud_bin = cusize.variables['ncloud_bin']\n\n ncloudsint = int(nclouds_cusize[0])\n cloud_lon = cloudlon[0,0:ncloudsint]\n cloud_lat = cloudlat[0,0:ncloudsint]\n filledbin=np.argmin(hn[0,:]) # last bin with clouds, rest is empty\n\n output_distances = distances(filledbin,cloud_lon,cloud_lat,cloud_bin,size,ncloudsint)\n\n D0_all[time-41] = output_distances[0]\n D1_all[time-41] = output_distances[1]\n mindistance_mean_all[time-41] = output_distances[2]\n mindistance_std_all[time-41] = output_distances[3]\n nclouds_bin_all[time-41] = output_distances[4]\n hn_normalized_all[time-41] = hn_normalized_loop\n\n\nmindistance_mean = np.mean(mindistance_mean_all,axis=0)/1000\nmindistance_std = np.mean(mindistance_std_all,axis=0)/1000\nD0 = np.mean(D0_all,axis=0)\nD1 = np.mean(D1_all,axis=0)\nnclouds = np.mean(nclouds_bin_all,axis=0)\nhn_normalized = np.mean(hn_normalized_all,axis=0)\n\nfilledbin_all=np.argmin(hn_normalized[:]) \nfit = CSD_fit(hn_normalized[0:10],size[0:10])\nlogfit = fit[3]\na = fit[0]\nb = fit[1]\nc = fit[2]\nprint 'a, b, c:'\nprint a, b, c\n\nprint logfit\n\nsizelog = np.log10(size)\nhnlog = ma.filled(np.log10(ma.masked_equal(hn_normalized, 0)), np.nan)\nncloudslog = ma.filled(np.log10(ma.masked_equal(nclouds, 0)), np.nan)\n\n#res = ma.filled(log2(ma.masked_equal(m, 0)), 0)\n\nmindistance_plus = mindistance_mean + mindistance_std\nmindistance_minus = mindistance_mean - mindistance_std\n\nfilledbin = np.argmin(mindistance_mean)\nslope, intercept, r_value, p_value, std_err = stats.linregress(size[0:filledbin],mindistance_mean[0:filledbin])\n\nprint 'r-squared:',r_value**2\nline = intercept + slope*size\n\nprint 'slope:',slope\nprint 'intercept:',intercept\n\n##################################################################\n### Plots\n\nthreshold = 0.005*sum(nclouds)\nmaxbin = np.min(np.where(nclouds <= threshold))\n\norange = (1.,0.38,0.01)\nblue = (0.53,0.81,1)\n\nplt.figure(figsize=(14,8))\nplt.axis([0, 5500, 0, 120])\nplt.xlabel('Cloud size [m]',fontsize=15)\nplt.ylabel('Nearest-neighbour distance [km]',fontsize=15)\nplt.fill_between(size,mindistance_plus,mindistance_minus,alpha=0.3,color=blue)\nplt.scatter(size,mindistance_mean,color='k')\n#plt.scatter(size,mindistance_plus,color='g')\n#plt.plot(size,line,color='black')\nax = plt.gca()\nax.axvspan(size[maxbin], 5500, alpha=0.2, color='grey')\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nplt.savefig('Figures/mindistance.pdf')\nplt.savefig('Figures/mindistance.png')\n\n\nplt.figure(figsize=(10,8))\n#plt.axis([50000,220000, 50000, 220000])\nplt.xlabel('D1')\nplt.ylabel('D0')\nplt.scatter(D1,D0,color='k')\nplt.savefig('Figures/D1-D0.pdf')\n\n\nplt.figure(figsize=(10,8))\nplt.xlabel('log(l) [m]')\nplt.ylabel('log(N*(l)) [m-1]')\nplt.scatter(sizelog,hnlog)\nplt.scatter(sizelog[0:10],logfit[0:10])\nplt.savefig('Figures/CSD.pdf')\n\n\nplt.figure(figsize=(10,8))\nplt.xlabel('Cloud size')\nplt.ylabel('Ratio distance/size')\nplt.axis([0, 5500, 0, 0.02])\nax = plt.gca()\nax.axvspan(size[maxbin], 5500, alpha=0.2, color='grey')\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nplt.savefig('Figures/mindistance.pdf')\nplt.scatter(size[0:filledbin],mindistance_mean[0:filledbin]/size[0:filledbin])\nplt.savefig('Figures/ratio_distance_size.pdf')\n\n\n\nplt.figure(figsize=(10,8))\nplt.xlabel('Cloud size')\nplt.ylabel('Number of clouds')\nplt.axhline(y=threshold, c='black')\nax = plt.gca()\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nplt.scatter(size[0:filledbin],nclouds[0:filledbin])\nplt.savefig('Figures/nclouds_size.pdf')\n\nplt.figure(figsize=(10,8))\nplt.xlabel('size')\nplt.ylabel('nclouds')\nplt.scatter(size[0:filledbin],ncloudslog[0:filledbin])\nplt.savefig('Figures/nclouds_size_log.pdf')\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
from discord.ext import commands def is_owner(): async def predicate(ctx): return ctx.author.id == 98208218022428672 return commands.check(predicate) class Staff(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command( name='stop', aliases=['shutdown'], description='This is a command for staff only to stop the bot' ) @is_owner() async def stop_bot(self, ctx): """Shutdown the bot""" await ctx.send('Oh, alright... I\'ll just shutup I guess.. :wave:') await self.bot.close()
normal
{ "blob_id": "23b2cc5b561a11ae7757a281a141491d5b7e23ca", "index": 2683, "step-1": "<mask token>\n\n\nclass Staff(commands.Cog):\n <mask token>\n\n @commands.command(name='stop', aliases=['shutdown'], description=\n 'This is a command for staff only to stop the bot')\n @is_owner()\n async def stop_bot(self, ctx):\n \"\"\"Shutdown the bot\"\"\"\n await ctx.send(\"Oh, alright... I'll just shutup I guess.. :wave:\")\n await self.bot.close()\n", "step-2": "<mask token>\n\n\nclass Staff(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='stop', aliases=['shutdown'], description=\n 'This is a command for staff only to stop the bot')\n @is_owner()\n async def stop_bot(self, ctx):\n \"\"\"Shutdown the bot\"\"\"\n await ctx.send(\"Oh, alright... I'll just shutup I guess.. :wave:\")\n await self.bot.close()\n", "step-3": "<mask token>\n\n\ndef is_owner():\n\n async def predicate(ctx):\n return ctx.author.id == 98208218022428672\n return commands.check(predicate)\n\n\nclass Staff(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='stop', aliases=['shutdown'], description=\n 'This is a command for staff only to stop the bot')\n @is_owner()\n async def stop_bot(self, ctx):\n \"\"\"Shutdown the bot\"\"\"\n await ctx.send(\"Oh, alright... I'll just shutup I guess.. :wave:\")\n await self.bot.close()\n", "step-4": "from discord.ext import commands\n\n\ndef is_owner():\n\n async def predicate(ctx):\n return ctx.author.id == 98208218022428672\n return commands.check(predicate)\n\n\nclass Staff(commands.Cog):\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='stop', aliases=['shutdown'], description=\n 'This is a command for staff only to stop the bot')\n @is_owner()\n async def stop_bot(self, ctx):\n \"\"\"Shutdown the bot\"\"\"\n await ctx.send(\"Oh, alright... I'll just shutup I guess.. :wave:\")\n await self.bot.close()\n", "step-5": "from discord.ext import commands\n\n\ndef is_owner():\n async def predicate(ctx):\n return ctx.author.id == 98208218022428672\n\n return commands.check(predicate)\n\n\nclass Staff(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(\n name='stop',\n aliases=['shutdown'],\n description='This is a command for staff only to stop the bot'\n )\n @is_owner()\n async def stop_bot(self, ctx):\n \"\"\"Shutdown the bot\"\"\"\n await ctx.send('Oh, alright... I\\'ll just shutup I guess.. :wave:')\n await self.bot.close()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#Opens the file that the user specifies fileopen = open(input("Please enter the name of the file that you wish to open."), 'r') #Reads the lines within the file and determines the length of the file lines = fileopen.readlines() count = len(lines) #Count is how long the file is, so number is the index values basically. #As long as the number variable is less than the amount of lines in the file (because one must be subtracted since the index starts at 0) the #number will be printed in front of the lines found in the file. number = 0 while number < count: print(number,".",lines[number]) number = number + 1 fileopen.close()
normal
{ "blob_id": "258b28153124ce42578c9eede429354069d8a7d6", "index": 2869, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile number < count:\n print(number, '.', lines[number])\n number = number + 1\nfileopen.close()\n", "step-3": "fileopen = open(input(\n 'Please enter the name of the file that you wish to open.'), 'r')\nlines = fileopen.readlines()\ncount = len(lines)\nnumber = 0\nwhile number < count:\n print(number, '.', lines[number])\n number = number + 1\nfileopen.close()\n", "step-4": "#Opens the file that the user specifies\nfileopen = open(input(\"Please enter the name of the file that you wish to open.\"), 'r')\n\n#Reads the lines within the file and determines the length of the file\nlines = fileopen.readlines()\ncount = len(lines)\n\n#Count is how long the file is, so number is the index values basically.\n#As long as the number variable is less than the amount of lines in the file (because one must be subtracted since the index starts at 0) the \n#number will be printed in front of the lines found in the file.\nnumber = 0\nwhile number < count:\n print(number,\".\",lines[number])\n number = number + 1\nfileopen.close()", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from job_description import JobDescription from resume import Resume from resume_manager import ResumeManager
normal
{ "blob_id": "a998433e45c1d5135749c5164e8ec1f2eb0e572a", "index": 1693, "step-1": "<mask token>\n", "step-2": "from job_description import JobDescription\nfrom resume import Resume\nfrom resume_manager import ResumeManager\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
<|reserved_special_token_0|> class TwitterWorker(Thread): <|reserved_special_token_0|> def run(self): streamListener = MyStreamListener() self.stream = tweepy.Stream(auth=self.api.auth, listener=streamListener ) self.stream.filter(track=['#HACKPSUHELPLINE']) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TwitterWorker(Thread): def __init__(self): Thread.__init__(self) CONSUMER_KEY = 'IwZZeJHjLXq55ewwQwD0SogHU' CONSUMER_SECRET = '80kELQhDGNvLNFfNZ7qliIbzAoA3tsgQaAEnnMNWKIr6uMN6Ri' ACCESS_TOKEN = '857838183224139776-1HrWNTQk8pywtozedEAou6tr7CkB4Uu' ACCESS_TOKEN_SECRET = 'NkP6s5UZuoBmDSW31mhTzudNSQKpvxwwuE3pcWYcytWgU' self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) self.auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) self.api = tweepy.API(self.auth) def run(self): streamListener = MyStreamListener() self.stream = tweepy.Stream(auth=self.api.auth, listener=streamListener ) self.stream.filter(track=['#HACKPSUHELPLINE']) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class TwitterWorker(Thread): def __init__(self): Thread.__init__(self) CONSUMER_KEY = 'IwZZeJHjLXq55ewwQwD0SogHU' CONSUMER_SECRET = '80kELQhDGNvLNFfNZ7qliIbzAoA3tsgQaAEnnMNWKIr6uMN6Ri' ACCESS_TOKEN = '857838183224139776-1HrWNTQk8pywtozedEAou6tr7CkB4Uu' ACCESS_TOKEN_SECRET = 'NkP6s5UZuoBmDSW31mhTzudNSQKpvxwwuE3pcWYcytWgU' self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) self.auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) self.api = tweepy.API(self.auth) def run(self): streamListener = MyStreamListener() self.stream = tweepy.Stream(auth=self.api.auth, listener=streamListener ) self.stream.filter(track=['#HACKPSUHELPLINE']) if __name__ == '__main__': print('Starting Stream..') tw = TwitterWorker() tw.streaming() <|reserved_special_token_1|> from twitter.MyStreamListener import MyStreamListener import tweepy from threading import Thread class TwitterWorker(Thread): def __init__(self): Thread.__init__(self) CONSUMER_KEY = 'IwZZeJHjLXq55ewwQwD0SogHU' CONSUMER_SECRET = '80kELQhDGNvLNFfNZ7qliIbzAoA3tsgQaAEnnMNWKIr6uMN6Ri' ACCESS_TOKEN = '857838183224139776-1HrWNTQk8pywtozedEAou6tr7CkB4Uu' ACCESS_TOKEN_SECRET = 'NkP6s5UZuoBmDSW31mhTzudNSQKpvxwwuE3pcWYcytWgU' self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) self.auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) self.api = tweepy.API(self.auth) def run(self): streamListener = MyStreamListener() self.stream = tweepy.Stream(auth=self.api.auth, listener=streamListener ) self.stream.filter(track=['#HACKPSUHELPLINE']) if __name__ == '__main__': print('Starting Stream..') tw = TwitterWorker() tw.streaming() <|reserved_special_token_1|> from twitter.MyStreamListener import MyStreamListener import tweepy from threading import Thread class TwitterWorker(Thread): def __init__(self): Thread.__init__(self) CONSUMER_KEY = 'IwZZeJHjLXq55ewwQwD0SogHU' CONSUMER_SECRET = '80kELQhDGNvLNFfNZ7qliIbzAoA3tsgQaAEnnMNWKIr6uMN6Ri' ACCESS_TOKEN = '857838183224139776-1HrWNTQk8pywtozedEAou6tr7CkB4Uu' ACCESS_TOKEN_SECRET = 'NkP6s5UZuoBmDSW31mhTzudNSQKpvxwwuE3pcWYcytWgU' self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) self.auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) self.api = tweepy.API(self.auth) def run(self): streamListener = MyStreamListener() self.stream = tweepy.Stream(auth=self.api.auth, listener=streamListener) self.stream.filter(track=['#HACKPSUHELPLINE']) if __name__ == '__main__': print("Starting Stream..") tw = TwitterWorker() tw.streaming()
flexible
{ "blob_id": "c475e095571b211693e66583637442edbf72c260", "index": 7741, "step-1": "<mask token>\n\n\nclass TwitterWorker(Thread):\n <mask token>\n\n def run(self):\n streamListener = MyStreamListener()\n self.stream = tweepy.Stream(auth=self.api.auth, listener=streamListener\n )\n self.stream.filter(track=['#HACKPSUHELPLINE'])\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass TwitterWorker(Thread):\n\n def __init__(self):\n Thread.__init__(self)\n CONSUMER_KEY = 'IwZZeJHjLXq55ewwQwD0SogHU'\n CONSUMER_SECRET = '80kELQhDGNvLNFfNZ7qliIbzAoA3tsgQaAEnnMNWKIr6uMN6Ri'\n ACCESS_TOKEN = '857838183224139776-1HrWNTQk8pywtozedEAou6tr7CkB4Uu'\n ACCESS_TOKEN_SECRET = 'NkP6s5UZuoBmDSW31mhTzudNSQKpvxwwuE3pcWYcytWgU'\n self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n self.auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n self.api = tweepy.API(self.auth)\n\n def run(self):\n streamListener = MyStreamListener()\n self.stream = tweepy.Stream(auth=self.api.auth, listener=streamListener\n )\n self.stream.filter(track=['#HACKPSUHELPLINE'])\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass TwitterWorker(Thread):\n\n def __init__(self):\n Thread.__init__(self)\n CONSUMER_KEY = 'IwZZeJHjLXq55ewwQwD0SogHU'\n CONSUMER_SECRET = '80kELQhDGNvLNFfNZ7qliIbzAoA3tsgQaAEnnMNWKIr6uMN6Ri'\n ACCESS_TOKEN = '857838183224139776-1HrWNTQk8pywtozedEAou6tr7CkB4Uu'\n ACCESS_TOKEN_SECRET = 'NkP6s5UZuoBmDSW31mhTzudNSQKpvxwwuE3pcWYcytWgU'\n self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n self.auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n self.api = tweepy.API(self.auth)\n\n def run(self):\n streamListener = MyStreamListener()\n self.stream = tweepy.Stream(auth=self.api.auth, listener=streamListener\n )\n self.stream.filter(track=['#HACKPSUHELPLINE'])\n\n\nif __name__ == '__main__':\n print('Starting Stream..')\n tw = TwitterWorker()\n tw.streaming()\n", "step-4": "from twitter.MyStreamListener import MyStreamListener\nimport tweepy\nfrom threading import Thread\n\n\nclass TwitterWorker(Thread):\n\n def __init__(self):\n Thread.__init__(self)\n CONSUMER_KEY = 'IwZZeJHjLXq55ewwQwD0SogHU'\n CONSUMER_SECRET = '80kELQhDGNvLNFfNZ7qliIbzAoA3tsgQaAEnnMNWKIr6uMN6Ri'\n ACCESS_TOKEN = '857838183224139776-1HrWNTQk8pywtozedEAou6tr7CkB4Uu'\n ACCESS_TOKEN_SECRET = 'NkP6s5UZuoBmDSW31mhTzudNSQKpvxwwuE3pcWYcytWgU'\n self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n self.auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n self.api = tweepy.API(self.auth)\n\n def run(self):\n streamListener = MyStreamListener()\n self.stream = tweepy.Stream(auth=self.api.auth, listener=streamListener\n )\n self.stream.filter(track=['#HACKPSUHELPLINE'])\n\n\nif __name__ == '__main__':\n print('Starting Stream..')\n tw = TwitterWorker()\n tw.streaming()\n", "step-5": "from twitter.MyStreamListener import MyStreamListener\nimport tweepy\nfrom threading import Thread\n\n\nclass TwitterWorker(Thread):\n def __init__(self):\n Thread.__init__(self)\n CONSUMER_KEY = 'IwZZeJHjLXq55ewwQwD0SogHU'\n CONSUMER_SECRET = '80kELQhDGNvLNFfNZ7qliIbzAoA3tsgQaAEnnMNWKIr6uMN6Ri'\n ACCESS_TOKEN = '857838183224139776-1HrWNTQk8pywtozedEAou6tr7CkB4Uu'\n ACCESS_TOKEN_SECRET = 'NkP6s5UZuoBmDSW31mhTzudNSQKpvxwwuE3pcWYcytWgU'\n self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n self.auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n self.api = tweepy.API(self.auth)\n\n def run(self):\n streamListener = MyStreamListener()\n self.stream = tweepy.Stream(auth=self.api.auth, listener=streamListener)\n self.stream.filter(track=['#HACKPSUHELPLINE'])\n\n\nif __name__ == '__main__':\n print(\"Starting Stream..\")\n tw = TwitterWorker()\n tw.streaming()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> def main(): print(generatePassword()) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def generatePassword(): numLowerCase = numUpperCase = numSpecialCase = numNumber = 0 password = '' randomChars = '-|@.,?/!~#%^&*(){}[]\\=*' length = random.randint(10, 25) while (numSpecialCase < 1 or numNumber < 1 or numLowerCase < 1 or numUpperCase < 1): password = '' numLowerCase = numUpperCase = numSpecialCase = numNumber = 0 for i in range(length): charType = random.randint(0, 3) if charType == 0: password += chr(random.randint(97, 121)) numLowerCase += 1 elif charType == 1: password += chr(random.randint(65, 90)) numUpperCase += 1 elif charType == 2: password += chr(random.randint(48, 57)) numNumber += 1 else: password += randomChars[random.randint(0, len(randomChars) - 1) ] numSpecialCase += 1 return password def main(): print(generatePassword()) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def generatePassword(): numLowerCase = numUpperCase = numSpecialCase = numNumber = 0 password = '' randomChars = '-|@.,?/!~#%^&*(){}[]\\=*' length = random.randint(10, 25) while (numSpecialCase < 1 or numNumber < 1 or numLowerCase < 1 or numUpperCase < 1): password = '' numLowerCase = numUpperCase = numSpecialCase = numNumber = 0 for i in range(length): charType = random.randint(0, 3) if charType == 0: password += chr(random.randint(97, 121)) numLowerCase += 1 elif charType == 1: password += chr(random.randint(65, 90)) numUpperCase += 1 elif charType == 2: password += chr(random.randint(48, 57)) numNumber += 1 else: password += randomChars[random.randint(0, len(randomChars) - 1) ] numSpecialCase += 1 return password def main(): print(generatePassword()) main() <|reserved_special_token_1|> import random def generatePassword(): numLowerCase = numUpperCase = numSpecialCase = numNumber = 0 password = '' randomChars = '-|@.,?/!~#%^&*(){}[]\\=*' length = random.randint(10, 25) while (numSpecialCase < 1 or numNumber < 1 or numLowerCase < 1 or numUpperCase < 1): password = '' numLowerCase = numUpperCase = numSpecialCase = numNumber = 0 for i in range(length): charType = random.randint(0, 3) if charType == 0: password += chr(random.randint(97, 121)) numLowerCase += 1 elif charType == 1: password += chr(random.randint(65, 90)) numUpperCase += 1 elif charType == 2: password += chr(random.randint(48, 57)) numNumber += 1 else: password += randomChars[random.randint(0, len(randomChars) - 1) ] numSpecialCase += 1 return password def main(): print(generatePassword()) main() <|reserved_special_token_1|> import random def generatePassword (): numLowerCase = numUpperCase = numSpecialCase = numNumber = 0 password = "" randomChars = "-|@.,?/!~#%^&*(){}[]\=*" length = random.randint(10, 25) while(numSpecialCase < 1 or numNumber < 1 or numLowerCase < 1 or numUpperCase < 1): password = "" numLowerCase = numUpperCase = numSpecialCase = numNumber = 0 for i in range (length): charType = random.randint(0, 3) #lowercase letters if(charType == 0): password+= chr(random.randint(97, 121)) numLowerCase+=1 #uppercase letters elif(charType == 1): password+= chr(random.randint(65, 90)) numUpperCase+=1 #number letters elif(charType == 2): password+= chr(random.randint(48, 57)) numNumber+=1 #special characters else: password+= randomChars[random.randint(0, len(randomChars)-1)] numSpecialCase+=1 return password def main(): print(generatePassword()) main()
flexible
{ "blob_id": "3956d4cdb0a8654b6f107975ac003ce59ddd3de1", "index": 4485, "step-1": "<mask token>\n\n\ndef main():\n print(generatePassword())\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef generatePassword():\n numLowerCase = numUpperCase = numSpecialCase = numNumber = 0\n password = ''\n randomChars = '-|@.,?/!~#%^&*(){}[]\\\\=*'\n length = random.randint(10, 25)\n while (numSpecialCase < 1 or numNumber < 1 or numLowerCase < 1 or \n numUpperCase < 1):\n password = ''\n numLowerCase = numUpperCase = numSpecialCase = numNumber = 0\n for i in range(length):\n charType = random.randint(0, 3)\n if charType == 0:\n password += chr(random.randint(97, 121))\n numLowerCase += 1\n elif charType == 1:\n password += chr(random.randint(65, 90))\n numUpperCase += 1\n elif charType == 2:\n password += chr(random.randint(48, 57))\n numNumber += 1\n else:\n password += randomChars[random.randint(0, len(randomChars) - 1)\n ]\n numSpecialCase += 1\n return password\n\n\ndef main():\n print(generatePassword())\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef generatePassword():\n numLowerCase = numUpperCase = numSpecialCase = numNumber = 0\n password = ''\n randomChars = '-|@.,?/!~#%^&*(){}[]\\\\=*'\n length = random.randint(10, 25)\n while (numSpecialCase < 1 or numNumber < 1 or numLowerCase < 1 or \n numUpperCase < 1):\n password = ''\n numLowerCase = numUpperCase = numSpecialCase = numNumber = 0\n for i in range(length):\n charType = random.randint(0, 3)\n if charType == 0:\n password += chr(random.randint(97, 121))\n numLowerCase += 1\n elif charType == 1:\n password += chr(random.randint(65, 90))\n numUpperCase += 1\n elif charType == 2:\n password += chr(random.randint(48, 57))\n numNumber += 1\n else:\n password += randomChars[random.randint(0, len(randomChars) - 1)\n ]\n numSpecialCase += 1\n return password\n\n\ndef main():\n print(generatePassword())\n\n\nmain()\n", "step-4": "import random\n\n\ndef generatePassword():\n numLowerCase = numUpperCase = numSpecialCase = numNumber = 0\n password = ''\n randomChars = '-|@.,?/!~#%^&*(){}[]\\\\=*'\n length = random.randint(10, 25)\n while (numSpecialCase < 1 or numNumber < 1 or numLowerCase < 1 or \n numUpperCase < 1):\n password = ''\n numLowerCase = numUpperCase = numSpecialCase = numNumber = 0\n for i in range(length):\n charType = random.randint(0, 3)\n if charType == 0:\n password += chr(random.randint(97, 121))\n numLowerCase += 1\n elif charType == 1:\n password += chr(random.randint(65, 90))\n numUpperCase += 1\n elif charType == 2:\n password += chr(random.randint(48, 57))\n numNumber += 1\n else:\n password += randomChars[random.randint(0, len(randomChars) - 1)\n ]\n numSpecialCase += 1\n return password\n\n\ndef main():\n print(generatePassword())\n\n\nmain()\n", "step-5": "import random\n\ndef generatePassword ():\n numLowerCase = numUpperCase = numSpecialCase = numNumber = 0\n password = \"\"\n randomChars = \"-|@.,?/!~#%^&*(){}[]\\=*\"\n length = random.randint(10, 25)\n\n while(numSpecialCase < 1 or numNumber < 1 or numLowerCase < 1 or numUpperCase < 1):\n password = \"\"\n numLowerCase = numUpperCase = numSpecialCase = numNumber = 0\n \n for i in range (length):\n charType = random.randint(0, 3)\n \n #lowercase letters\n if(charType == 0):\n password+= chr(random.randint(97, 121))\n numLowerCase+=1\n \n #uppercase letters\n elif(charType == 1):\n password+= chr(random.randint(65, 90))\n numUpperCase+=1\n \n #number letters\n elif(charType == 2):\n password+= chr(random.randint(48, 57))\n numNumber+=1\n \n #special characters\n else:\n password+= randomChars[random.randint(0, len(randomChars)-1)]\n numSpecialCase+=1\n\n return password\n\ndef main():\n print(generatePassword())\n \nmain()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
#-*- coding: utf-8 -*- """ Django settings for HyperKitty + Postorius Pay attention to settings ALLOWED_HOSTS and DATABASES! """ from os.path import abspath, dirname, join as joinpath from ConfigParser import SafeConfigParser def read_cfg(path, section=None, option=None): config = SafeConfigParser() config.read(path) def get(section, option): return config.get(section, option) if config.has_option(section, option) else None return get(section, option) if section else get mailman_cfg = read_cfg('/etc/mailman.cfg') BASE_DIR = '/usr/lib/bundles/mailman-webui' CONF_DIR = '/etc/mailman-webui' DATA_DIR = '/var/lib/mailman-webui' LOG_DIR = '/var/log/mailman-webui' # Hosts/domain names that are valid for this site. # NOTE: You MUST add domain name of your instance of this application here! # See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['localhost'] # Mailman API credentials # NOTE: Replace with hard-coded values if Mailman is running on a different host. MAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001) MAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin' MAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass') MAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key') MAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1') # REST API REST_FRAMEWORK = { 'PAGE_SIZE': 10, } # Only display mailing-lists in HyperKitty from the same virtual host # as the webserver. FILTER_VHOST = False # # Application definition # SITE_ID = 1 INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'hyperkitty', 'rest_framework', 'django_gravatar', 'paintstore', 'compressor', 'haystack', 'django_extensions', 'postorius', 'django_mailman3', 'stronghold', # Uncomment the next line to enable integration with Sentry # and set DSN in RAVEN_CONFIG. #'raven.contrib.django.raven_compat', 'allauth', 'allauth.account', 'allauth.socialaccount', # Uncomment providers that you want to use, if any. #'allauth.socialaccount.providers.openid', #'allauth.socialaccount.providers.github', #'allauth.socialaccount.providers.gitlab', #'allauth.socialaccount.providers.google', #'allauth.socialaccount.providers.twitter', #'allauth.socialaccount.providers.stackexchange', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.middleware.locale.LocaleMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'django.middleware.security.SecurityMiddleware', 'django_mailman3.middleware.TimezoneMiddleware', 'postorius.middleware.PostoriusMiddleware', # Uncomment to require a user to be authenticated to view any page. #'stronghold.middleware.LoginRequiredMiddleware', ) # A string representing the full Python import path to your root URLconf. ROOT_URLCONF = 'urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ # Directory for templates override. joinpath(DATA_DIR, 'templates'), ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.i18n', 'django.template.context_processors.media', 'django.template.context_processors.static', 'django.template.context_processors.tz', 'django.template.context_processors.csrf', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django_mailman3.context_processors.common', 'hyperkitty.context_processors.common', 'postorius.context_processors.postorius', ], }, }, ] WSGI_APPLICATION = 'wsgi.application' # Using the cache infrastructure can significantly improve performance on a # production setup. This is an example with a local Memcached server. #CACHES = { # 'default': { # 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache', # 'LOCATION': '127.0.0.1:11211', # } #} # # Databases # See https://docs.djangoproject.com/en/1.9/ref/settings/#databases # DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': joinpath(DATA_DIR, 'db.sqlite3'), } # Remove the above lines and uncomment the below to use PostgreSQL. # 'default': { # 'ENGINE': 'django.db.backends.postgresql_psycopg2', # 'NAME': 'mailman_webui', # 'USER': 'mailman_webui', # 'PASSWORD': 'change-me', # # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. # 'HOST': '127.0.0.1', # 'PORT': '', # } } # Full-text search engine HAYSTACK_CONNECTIONS = { 'default': { 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine', 'PATH': joinpath(DATA_DIR, 'fulltext_index'), }, } # # Outgoing mails # # NOTE: Replace with hard-coded values if Mailman is running on a different host. # The host and port of the SMTP server to use for sending email. EMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost' EMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25) # Username and password to use for the SMTP server defined above. EMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or '' EMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or '' # Whether to use a explicit TLS connection when talking to the SMTP server. EMAIL_USE_TLS = False # Whether to use an implicit TLS connection when talking to the SMTP server. EMAIL_USE_SSL = False # A tuple that lists people who get code error notifications. When DEBUG=False # and a view raises an exception, Django will email these people with the full # exception information. Each member of the tuple should be a tuple of (Full # name, email address). ADMINS = ( ('Mailman Admin', 'root@localhost'), ) # If you enable email reporting for error messages, this is where those emails # will appear to be coming from. Make sure you set a valid domain name, # otherwise the emails may get rejected. # https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL #SERVER_EMAIL = 'root@your-domain.org' # If you enable internal authentication, this is the address that the emails # will appear to be coming from. Make sure you set a valid domain name, # otherwise the emails may get rejected. # https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email #DEFAULT_FROM_EMAIL = 'mailing-lists@you-domain.org' # # Security settings # # A secret key used for signing sessions, cookies, password reset tokens etc. SECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read() CSRF_COOKIE_SECURE = True CSRF_COOKIE_HTTPONLY = True SESSION_COOKIE_SECURE = True SECURE_CONTENT_TYPE_NOSNIFF = True SECURE_BROWSER_XSS_FILTER = True X_FRAME_OPTIONS = 'DENY' # If you're behind a proxy, use the X-Forwarded-Host header # See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host USE_X_FORWARDED_HOST = True # And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER # https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') #SECURE_SSL_REDIRECT = True # If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT # contains at least this line: #SECURE_REDIRECT_EXEMPT = [ # 'archives/api/mailman/.*', # Request from Mailman. #] # # Authentication # AUTHENTICATION_BACKENDS = ( 'django.contrib.auth.backends.ModelBackend', # Uncomment to next line to enable LDAP authentication. #'custom.LDAPBackend', 'allauth.account.auth_backends.AuthenticationBackend', ) LOGIN_URL = 'account_login' LOGIN_REDIRECT_URL = 'hk_root' LOGOUT_URL = 'account_logout' # Whether registration of new accounts is currently permitted. REGISTRATION_OPEN = True # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator' }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator' }, ] # URLs which are ignored by LoginRequiredMiddleware, i.e. the middleware # does not *force* them to require authentication. STRONGHOLD_PUBLIC_URLS = ( r'^/accounts/.*', r'^/archives/api/mailman/.*', ) ## Django Allauth # Custom AccountAdapter for allauth that respects REGISTRATION_OPEN variable. ACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter' ACCOUNT_AUTHENTICATION_METHOD = 'username_email' ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_VERIFICATION = 'mandatory' ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https' ACCOUNT_UNIQUE_EMAIL = True # Whether to disable intermediate logout page. ACCOUNT_LOGOUT_ON_GET = False SOCIALACCOUNT_PROVIDERS = {} #SOCIALACCOUNT_PROVIDERS = { # 'openid': { # 'SERVERS': [ # { # 'id': 'yahoo', # 'name': 'Yahoo', # 'openid_url': 'http://me.yahoo.com' # } # ], # }, # 'google': { # 'SCOPE': ['profile', 'email'], # 'AUTH_PARAMS': {'access_type': 'online'}, # }, # 'facebook': { # 'METHOD': 'oauth2', # 'SCOPE': ['email'], # 'FIELDS': [ # 'email', # 'name', # 'first_name', # 'last_name', # 'locale', # 'timezone', # ], # 'VERSION': 'v2.4', # }, #} ## Django LDAP if 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS: import ldap from django_auth_ldap.config import LDAPSearch ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs') AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org' AUTH_LDAP_USER_SEARCH = LDAPSearch( 'ou=People,dc=example,dc=org', ldap.SCOPE_SUBTREE, '(&(mail=*)(uid=%(user)s))' ) AUTH_LDAP_USER_ATTR_MAP = { 'first_name': 'givenName', 'last_name': 'sn', 'email': 'mail', } # # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ # LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ # # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = joinpath(BASE_DIR, 'static') # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static". # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ) # django-compressor COMPRESS_OFFLINE = True # Compatibility with Bootstrap 3 from django.contrib.messages import constants as messages MESSAGE_TAGS = { messages.ERROR: 'danger' } # # Gravatar # https://github.com/twaddington/django-gravatar # # Gravatar base url. GRAVATAR_URL = 'http://cdn.libravatar.org/' # Gravatar base secure https url. GRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/' # Gravatar size in pixels. #GRAVATAR_DEFAULT_SIZE = '80' # An image url or one of the following: 'mm', 'identicon', 'monsterid', 'wavatar', 'retro'. GRAVATAR_DEFAULT_IMAGE = 'retro' # One of the following: 'g', 'pg', 'r', 'x'. #GRAVATAR_DEFAULT_RATING = 'g' # True to use https by default, False for plain http. GRAVATAR_DEFAULT_SECURE = True # # Logging # # A sample logging configuration. The only tangible logging performed by this # configuration is to send an email to the site admins on every HTTP 500 error # when DEBUG=False. See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'formatter': 'simple', }, 'file':{ 'level': 'INFO', #'class': 'logging.handlers.RotatingFileHandler', 'class': 'logging.handlers.WatchedFileHandler', 'filename': joinpath(LOG_DIR, 'mailman-webui.log'), 'formatter': 'verbose', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, }, 'loggers': { #'django.request': { # 'handlers': ['mail_admins'], # 'level': 'ERROR', # 'propagate': True, #}, 'django.request': { 'handlers': ['file'], 'level': 'ERROR', 'propagate': True, }, 'django': { 'handlers': ['file'], 'level': 'ERROR', 'propagate': True, }, 'postorius': { 'handlers': ['file'], 'level': 'INFO', 'propagate': True, }, 'hyperkitty': { 'handlers': ['file'], 'level': 'INFO', 'propagate': True, }, }, 'formatters': { 'verbose': { 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s' }, 'simple': { 'format': '%(levelname)s %(message)s' }, }, 'root': { 'handlers': ['file'], 'level': 'INFO', }, } if 'raven.contrib.django.raven_compat' in INSTALLED_APPS: RAVEN_CONFIG = { 'dsn': 'https://<key>:<secret>@sentry.io/<project>', } LOGGING['handlers']['sentry'] = { 'level': 'ERROR', 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler', } LOGGING['loggers']['root']['handlers'].append('sentry') try: from settings_local import * except ImportError: pass
normal
{ "blob_id": "0dd17d8872b251fbc59a322bf3c695bd8079aba4", "index": 3338, "step-1": "<mask token>\n\n\ndef read_cfg(path, section=None, option=None):\n config = SafeConfigParser()\n config.read(path)\n\n def get(section, option):\n return config.get(section, option) if config.has_option(section, option\n ) else None\n return get(section, option) if section else get\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef read_cfg(path, section=None, option=None):\n config = SafeConfigParser()\n config.read(path)\n\n def get(section, option):\n return config.get(section, option) if config.has_option(section, option\n ) else None\n return get(section, option) if section else get\n\n\n<mask token>\nif 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:\n import ldap\n from django_auth_ldap.config import LDAPSearch\n ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs')\n AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org'\n AUTH_LDAP_USER_SEARCH = LDAPSearch('ou=People,dc=example,dc=org', ldap.\n SCOPE_SUBTREE, '(&(mail=*)(uid=%(user)s))')\n AUTH_LDAP_USER_ATTR_MAP = {'first_name': 'givenName', 'last_name': 'sn',\n 'email': 'mail'}\n<mask token>\nif 'raven.contrib.django.raven_compat' in INSTALLED_APPS:\n RAVEN_CONFIG = {'dsn': 'https://<key>:<secret>@sentry.io/<project>'}\n LOGGING['handlers']['sentry'] = {'level': 'ERROR', 'class':\n 'raven.contrib.django.raven_compat.handlers.SentryHandler'}\n LOGGING['loggers']['root']['handlers'].append('sentry')\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n", "step-3": "<mask token>\n\n\ndef read_cfg(path, section=None, option=None):\n config = SafeConfigParser()\n config.read(path)\n\n def get(section, option):\n return config.get(section, option) if config.has_option(section, option\n ) else None\n return get(section, option) if section else get\n\n\nmailman_cfg = read_cfg('/etc/mailman.cfg')\nBASE_DIR = '/usr/lib/bundles/mailman-webui'\nCONF_DIR = '/etc/mailman-webui'\nDATA_DIR = '/var/lib/mailman-webui'\nLOG_DIR = '/var/log/mailman-webui'\nALLOWED_HOSTS = ['localhost']\nMAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice',\n 'port') or 8001)\nMAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin'\nMAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass')\nMAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general',\n 'api_key')\nMAILMAN_ARCHIVER_FROM = '127.0.0.1', '::1', '::ffff:127.0.0.1'\nREST_FRAMEWORK = {'PAGE_SIZE': 10}\nFILTER_VHOST = False\nSITE_ID = 1\nINSTALLED_APPS = ('django.contrib.admin', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.sites', 'django.contrib.messages',\n 'django.contrib.staticfiles', 'hyperkitty', 'rest_framework',\n 'django_gravatar', 'paintstore', 'compressor', 'haystack',\n 'django_extensions', 'postorius', 'django_mailman3', 'stronghold',\n 'allauth', 'allauth.account', 'allauth.socialaccount')\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django_mailman3.middleware.TimezoneMiddleware',\n 'postorius.middleware.PostoriusMiddleware')\nROOT_URLCONF = 'urls'\nTEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [joinpath(DATA_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS':\n {'context_processors': ['django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.csrf',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django_mailman3.context_processors.common',\n 'hyperkitty.context_processors.common',\n 'postorius.context_processors.postorius']}}]\nWSGI_APPLICATION = 'wsgi.application'\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME':\n joinpath(DATA_DIR, 'db.sqlite3')}}\nHAYSTACK_CONNECTIONS = {'default': {'ENGINE':\n 'haystack.backends.whoosh_backend.WhooshEngine', 'PATH': joinpath(\n DATA_DIR, 'fulltext_index')}}\nEMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'\nEMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)\nEMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''\nEMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''\nEMAIL_USE_TLS = False\nEMAIL_USE_SSL = False\nADMINS = ('Mailman Admin', 'root@localhost'),\nSECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nSESSION_COOKIE_SECURE = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\nX_FRAME_OPTIONS = 'DENY'\nUSE_X_FORWARDED_HOST = True\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\nAUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend')\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = 'hk_root'\nLOGOUT_URL = 'account_logout'\nREGISTRATION_OPEN = True\nAUTH_PASSWORD_VALIDATORS = [{'NAME':\n 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'\n }, {'NAME':\n 'django.contrib.auth.password_validation.MinimumLengthValidator'}, {\n 'NAME':\n 'django.contrib.auth.password_validation.CommonPasswordValidator'}, {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}\n ]\nSTRONGHOLD_PUBLIC_URLS = '^/accounts/.*', '^/archives/api/mailman/.*'\nACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'\nACCOUNT_UNIQUE_EMAIL = True\nACCOUNT_LOGOUT_ON_GET = False\nSOCIALACCOUNT_PROVIDERS = {}\nif 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:\n import ldap\n from django_auth_ldap.config import LDAPSearch\n ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs')\n AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org'\n AUTH_LDAP_USER_SEARCH = LDAPSearch('ou=People,dc=example,dc=org', ldap.\n SCOPE_SUBTREE, '(&(mail=*)(uid=%(user)s))')\n AUTH_LDAP_USER_ATTR_MAP = {'first_name': 'givenName', 'last_name': 'sn',\n 'email': 'mail'}\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_ROOT = joinpath(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = ()\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder')\nCOMPRESS_OFFLINE = True\n<mask token>\nMESSAGE_TAGS = {messages.ERROR: 'danger'}\nGRAVATAR_URL = 'http://cdn.libravatar.org/'\nGRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/'\nGRAVATAR_DEFAULT_IMAGE = 'retro'\nGRAVATAR_DEFAULT_SECURE = True\nLOGGING = {'version': 1, 'disable_existing_loggers': False, 'filters': {\n 'require_debug_false': {'()': 'django.utils.log.RequireDebugFalse'}},\n 'handlers': {'console': {'class': 'logging.StreamHandler', 'formatter':\n 'simple'}, 'file': {'level': 'INFO', 'class':\n 'logging.handlers.WatchedFileHandler', 'filename': joinpath(LOG_DIR,\n 'mailman-webui.log'), 'formatter': 'verbose'}, 'mail_admins': {'level':\n 'ERROR', 'filters': ['require_debug_false'], 'class':\n 'django.utils.log.AdminEmailHandler'}}, 'loggers': {'django.request': {\n 'handlers': ['file'], 'level': 'ERROR', 'propagate': True}, 'django': {\n 'handlers': ['file'], 'level': 'ERROR', 'propagate': True}, 'postorius':\n {'handlers': ['file'], 'level': 'INFO', 'propagate': True},\n 'hyperkitty': {'handlers': ['file'], 'level': 'INFO', 'propagate': True\n }}, 'formatters': {'verbose': {'format':\n '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n }, 'simple': {'format': '%(levelname)s %(message)s'}}, 'root': {\n 'handlers': ['file'], 'level': 'INFO'}}\nif 'raven.contrib.django.raven_compat' in INSTALLED_APPS:\n RAVEN_CONFIG = {'dsn': 'https://<key>:<secret>@sentry.io/<project>'}\n LOGGING['handlers']['sentry'] = {'level': 'ERROR', 'class':\n 'raven.contrib.django.raven_compat.handlers.SentryHandler'}\n LOGGING['loggers']['root']['handlers'].append('sentry')\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n", "step-4": "<mask token>\nfrom os.path import abspath, dirname, join as joinpath\nfrom ConfigParser import SafeConfigParser\n\n\ndef read_cfg(path, section=None, option=None):\n config = SafeConfigParser()\n config.read(path)\n\n def get(section, option):\n return config.get(section, option) if config.has_option(section, option\n ) else None\n return get(section, option) if section else get\n\n\nmailman_cfg = read_cfg('/etc/mailman.cfg')\nBASE_DIR = '/usr/lib/bundles/mailman-webui'\nCONF_DIR = '/etc/mailman-webui'\nDATA_DIR = '/var/lib/mailman-webui'\nLOG_DIR = '/var/log/mailman-webui'\nALLOWED_HOSTS = ['localhost']\nMAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice',\n 'port') or 8001)\nMAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin'\nMAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass')\nMAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general',\n 'api_key')\nMAILMAN_ARCHIVER_FROM = '127.0.0.1', '::1', '::ffff:127.0.0.1'\nREST_FRAMEWORK = {'PAGE_SIZE': 10}\nFILTER_VHOST = False\nSITE_ID = 1\nINSTALLED_APPS = ('django.contrib.admin', 'django.contrib.auth',\n 'django.contrib.contenttypes', 'django.contrib.sessions',\n 'django.contrib.sites', 'django.contrib.messages',\n 'django.contrib.staticfiles', 'hyperkitty', 'rest_framework',\n 'django_gravatar', 'paintstore', 'compressor', 'haystack',\n 'django_extensions', 'postorius', 'django_mailman3', 'stronghold',\n 'allauth', 'allauth.account', 'allauth.socialaccount')\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django_mailman3.middleware.TimezoneMiddleware',\n 'postorius.middleware.PostoriusMiddleware')\nROOT_URLCONF = 'urls'\nTEMPLATES = [{'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [joinpath(DATA_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS':\n {'context_processors': ['django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.csrf',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django_mailman3.context_processors.common',\n 'hyperkitty.context_processors.common',\n 'postorius.context_processors.postorius']}}]\nWSGI_APPLICATION = 'wsgi.application'\nDATABASES = {'default': {'ENGINE': 'django.db.backends.sqlite3', 'NAME':\n joinpath(DATA_DIR, 'db.sqlite3')}}\nHAYSTACK_CONNECTIONS = {'default': {'ENGINE':\n 'haystack.backends.whoosh_backend.WhooshEngine', 'PATH': joinpath(\n DATA_DIR, 'fulltext_index')}}\nEMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'\nEMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)\nEMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''\nEMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''\nEMAIL_USE_TLS = False\nEMAIL_USE_SSL = False\nADMINS = ('Mailman Admin', 'root@localhost'),\nSECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nSESSION_COOKIE_SECURE = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\nX_FRAME_OPTIONS = 'DENY'\nUSE_X_FORWARDED_HOST = True\nSECURE_PROXY_SSL_HEADER = 'HTTP_X_FORWARDED_PROTO', 'https'\nAUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend')\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = 'hk_root'\nLOGOUT_URL = 'account_logout'\nREGISTRATION_OPEN = True\nAUTH_PASSWORD_VALIDATORS = [{'NAME':\n 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'\n }, {'NAME':\n 'django.contrib.auth.password_validation.MinimumLengthValidator'}, {\n 'NAME':\n 'django.contrib.auth.password_validation.CommonPasswordValidator'}, {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator'}\n ]\nSTRONGHOLD_PUBLIC_URLS = '^/accounts/.*', '^/archives/api/mailman/.*'\nACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'\nACCOUNT_UNIQUE_EMAIL = True\nACCOUNT_LOGOUT_ON_GET = False\nSOCIALACCOUNT_PROVIDERS = {}\nif 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:\n import ldap\n from django_auth_ldap.config import LDAPSearch\n ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs')\n AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org'\n AUTH_LDAP_USER_SEARCH = LDAPSearch('ou=People,dc=example,dc=org', ldap.\n SCOPE_SUBTREE, '(&(mail=*)(uid=%(user)s))')\n AUTH_LDAP_USER_ATTR_MAP = {'first_name': 'givenName', 'last_name': 'sn',\n 'email': 'mail'}\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\nSTATIC_ROOT = joinpath(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = ()\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder')\nCOMPRESS_OFFLINE = True\nfrom django.contrib.messages import constants as messages\nMESSAGE_TAGS = {messages.ERROR: 'danger'}\nGRAVATAR_URL = 'http://cdn.libravatar.org/'\nGRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/'\nGRAVATAR_DEFAULT_IMAGE = 'retro'\nGRAVATAR_DEFAULT_SECURE = True\nLOGGING = {'version': 1, 'disable_existing_loggers': False, 'filters': {\n 'require_debug_false': {'()': 'django.utils.log.RequireDebugFalse'}},\n 'handlers': {'console': {'class': 'logging.StreamHandler', 'formatter':\n 'simple'}, 'file': {'level': 'INFO', 'class':\n 'logging.handlers.WatchedFileHandler', 'filename': joinpath(LOG_DIR,\n 'mailman-webui.log'), 'formatter': 'verbose'}, 'mail_admins': {'level':\n 'ERROR', 'filters': ['require_debug_false'], 'class':\n 'django.utils.log.AdminEmailHandler'}}, 'loggers': {'django.request': {\n 'handlers': ['file'], 'level': 'ERROR', 'propagate': True}, 'django': {\n 'handlers': ['file'], 'level': 'ERROR', 'propagate': True}, 'postorius':\n {'handlers': ['file'], 'level': 'INFO', 'propagate': True},\n 'hyperkitty': {'handlers': ['file'], 'level': 'INFO', 'propagate': True\n }}, 'formatters': {'verbose': {'format':\n '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n }, 'simple': {'format': '%(levelname)s %(message)s'}}, 'root': {\n 'handlers': ['file'], 'level': 'INFO'}}\nif 'raven.contrib.django.raven_compat' in INSTALLED_APPS:\n RAVEN_CONFIG = {'dsn': 'https://<key>:<secret>@sentry.io/<project>'}\n LOGGING['handlers']['sentry'] = {'level': 'ERROR', 'class':\n 'raven.contrib.django.raven_compat.handlers.SentryHandler'}\n LOGGING['loggers']['root']['handlers'].append('sentry')\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n", "step-5": "#-*- coding: utf-8 -*-\n\"\"\"\nDjango settings for HyperKitty + Postorius\n\nPay attention to settings ALLOWED_HOSTS and DATABASES!\n\"\"\"\nfrom os.path import abspath, dirname, join as joinpath\nfrom ConfigParser import SafeConfigParser\n\n\ndef read_cfg(path, section=None, option=None):\n config = SafeConfigParser()\n config.read(path)\n def get(section, option):\n return config.get(section, option) if config.has_option(section, option) else None\n return get(section, option) if section else get\n\nmailman_cfg = read_cfg('/etc/mailman.cfg')\n\n\nBASE_DIR = '/usr/lib/bundles/mailman-webui'\nCONF_DIR = '/etc/mailman-webui'\nDATA_DIR = '/var/lib/mailman-webui'\nLOG_DIR = '/var/log/mailman-webui'\n\n# Hosts/domain names that are valid for this site.\n# NOTE: You MUST add domain name of your instance of this application here!\n# See https://docs.djangoproject.com/en/1.9/ref/settings/#allowed-hosts\nALLOWED_HOSTS = ['localhost']\n\n# Mailman API credentials\n# NOTE: Replace with hard-coded values if Mailman is running on a different host.\nMAILMAN_REST_API_URL = 'http://localhost:%s' % (mailman_cfg('webservice', 'port') or 8001)\nMAILMAN_REST_API_USER = mailman_cfg('webservice', 'admin_user') or 'restadmin'\nMAILMAN_REST_API_PASS = mailman_cfg('webservice', 'admin_pass')\nMAILMAN_ARCHIVER_KEY = read_cfg('/etc/mailman.d/hyperkitty.cfg', 'general', 'api_key')\nMAILMAN_ARCHIVER_FROM = ('127.0.0.1', '::1', '::ffff:127.0.0.1')\n\n# REST API\nREST_FRAMEWORK = {\n 'PAGE_SIZE': 10,\n}\n\n# Only display mailing-lists in HyperKitty from the same virtual host\n# as the webserver.\nFILTER_VHOST = False\n\n\n#\n# Application definition\n#\n\nSITE_ID = 1\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'hyperkitty',\n 'rest_framework',\n 'django_gravatar',\n 'paintstore',\n 'compressor',\n 'haystack',\n 'django_extensions',\n 'postorius',\n 'django_mailman3',\n 'stronghold',\n\n # Uncomment the next line to enable integration with Sentry\n # and set DSN in RAVEN_CONFIG.\n #'raven.contrib.django.raven_compat',\n\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n # Uncomment providers that you want to use, if any.\n #'allauth.socialaccount.providers.openid',\n #'allauth.socialaccount.providers.github',\n #'allauth.socialaccount.providers.gitlab',\n #'allauth.socialaccount.providers.google',\n #'allauth.socialaccount.providers.twitter',\n #'allauth.socialaccount.providers.stackexchange',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django_mailman3.middleware.TimezoneMiddleware',\n 'postorius.middleware.PostoriusMiddleware',\n\n # Uncomment to require a user to be authenticated to view any page.\n #'stronghold.middleware.LoginRequiredMiddleware',\n)\n\n# A string representing the full Python import path to your root URLconf.\nROOT_URLCONF = 'urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n # Directory for templates override.\n joinpath(DATA_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.csrf',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django_mailman3.context_processors.common',\n 'hyperkitty.context_processors.common',\n 'postorius.context_processors.postorius',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'wsgi.application'\n\n# Using the cache infrastructure can significantly improve performance on a\n# production setup. This is an example with a local Memcached server.\n#CACHES = {\n# 'default': {\n# 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',\n# 'LOCATION': '127.0.0.1:11211',\n# }\n#}\n\n\n#\n# Databases\n# See https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n#\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': joinpath(DATA_DIR, 'db.sqlite3'),\n }\n# Remove the above lines and uncomment the below to use PostgreSQL.\n# 'default': {\n# 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n# 'NAME': 'mailman_webui',\n# 'USER': 'mailman_webui',\n# 'PASSWORD': 'change-me',\n# # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.\n# 'HOST': '127.0.0.1',\n# 'PORT': '',\n# }\n}\n\n# Full-text search engine\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',\n 'PATH': joinpath(DATA_DIR, 'fulltext_index'),\n },\n}\n\n\n#\n# Outgoing mails\n#\n\n# NOTE: Replace with hard-coded values if Mailman is running on a different host.\n\n# The host and port of the SMTP server to use for sending email.\nEMAIL_HOST = mailman_cfg('mta', 'smtp_host') or 'localhost'\nEMAIL_PORT = int(mailman_cfg('mta', 'smtp_port') or 25)\n\n# Username and password to use for the SMTP server defined above.\nEMAIL_HOST_USER = mailman_cfg('mta', 'smtp_user') or ''\nEMAIL_HOST_PASSWORD = mailman_cfg('mta', 'smtp_pass') or ''\n\n# Whether to use a explicit TLS connection when talking to the SMTP server.\nEMAIL_USE_TLS = False\n\n# Whether to use an implicit TLS connection when talking to the SMTP server.\nEMAIL_USE_SSL = False\n\n# A tuple that lists people who get code error notifications. When DEBUG=False\n# and a view raises an exception, Django will email these people with the full\n# exception information. Each member of the tuple should be a tuple of (Full\n# name, email address).\nADMINS = (\n ('Mailman Admin', 'root@localhost'),\n)\n\n# If you enable email reporting for error messages, this is where those emails\n# will appear to be coming from. Make sure you set a valid domain name,\n# otherwise the emails may get rejected.\n# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-SERVER_EMAIL\n#SERVER_EMAIL = 'root@your-domain.org'\n\n# If you enable internal authentication, this is the address that the emails\n# will appear to be coming from. Make sure you set a valid domain name,\n# otherwise the emails may get rejected.\n# https://docs.djangoproject.com/en/1.9/ref/settings/#default-from-email\n#DEFAULT_FROM_EMAIL = 'mailing-lists@you-domain.org'\n\n\n#\n# Security settings\n#\n\n# A secret key used for signing sessions, cookies, password reset tokens etc.\nSECRET_KEY = open(joinpath(CONF_DIR, 'secret_key')).read()\n\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nSESSION_COOKIE_SECURE = True\nSECURE_CONTENT_TYPE_NOSNIFF = True\nSECURE_BROWSER_XSS_FILTER = True\nX_FRAME_OPTIONS = 'DENY'\n\n# If you're behind a proxy, use the X-Forwarded-Host header\n# See https://docs.djangoproject.com/en/1.9/ref/settings/#use-x-forwarded-host\nUSE_X_FORWARDED_HOST = True\n\n# And if your proxy does your SSL encoding for you, set SECURE_PROXY_SSL_HEADER\n# https://docs.djangoproject.com/en/1.9/ref/settings/#secure-proxy-ssl-header\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n#SECURE_SSL_REDIRECT = True\n\n# If you set SECURE_SSL_REDIRECT to True, make sure the SECURE_REDIRECT_EXEMPT\n# contains at least this line:\n#SECURE_REDIRECT_EXEMPT = [\n# 'archives/api/mailman/.*', # Request from Mailman.\n#]\n\n\n#\n# Authentication\n#\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n # Uncomment to next line to enable LDAP authentication.\n #'custom.LDAPBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nLOGIN_URL = 'account_login'\nLOGIN_REDIRECT_URL = 'hk_root'\nLOGOUT_URL = 'account_logout'\n\n# Whether registration of new accounts is currently permitted.\nREGISTRATION_OPEN = True\n\n# Password validation\n# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators\nAUTH_PASSWORD_VALIDATORS = [\n { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator' },\n { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator' },\n { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator' },\n { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator' },\n]\n\n# URLs which are ignored by LoginRequiredMiddleware, i.e. the middleware\n# does not *force* them to require authentication.\nSTRONGHOLD_PUBLIC_URLS = (\n r'^/accounts/.*',\n r'^/archives/api/mailman/.*',\n)\n\n## Django Allauth\n\n# Custom AccountAdapter for allauth that respects REGISTRATION_OPEN variable.\nACCOUNT_ADAPTER = 'custom.CloseableRegistrationAccountAdapter'\n\nACCOUNT_AUTHENTICATION_METHOD = 'username_email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'\nACCOUNT_UNIQUE_EMAIL = True\n\n# Whether to disable intermediate logout page.\nACCOUNT_LOGOUT_ON_GET = False\n\nSOCIALACCOUNT_PROVIDERS = {}\n#SOCIALACCOUNT_PROVIDERS = {\n# 'openid': {\n# 'SERVERS': [\n# {\n# 'id': 'yahoo',\n# 'name': 'Yahoo',\n# 'openid_url': 'http://me.yahoo.com'\n# }\n# ],\n# },\n# 'google': {\n# 'SCOPE': ['profile', 'email'],\n# 'AUTH_PARAMS': {'access_type': 'online'},\n# },\n# 'facebook': {\n# 'METHOD': 'oauth2',\n# 'SCOPE': ['email'],\n# 'FIELDS': [\n# 'email',\n# 'name',\n# 'first_name',\n# 'last_name',\n# 'locale',\n# 'timezone',\n# ],\n# 'VERSION': 'v2.4',\n# },\n#}\n\n## Django LDAP\nif 'custom.LDAPBackend' in AUTHENTICATION_BACKENDS:\n import ldap\n from django_auth_ldap.config import LDAPSearch\n\n ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, '/etc/ssl/certs')\n\n AUTH_LDAP_SERVER_URI = 'ldaps://ldap.example.org'\n\n AUTH_LDAP_USER_SEARCH = LDAPSearch(\n 'ou=People,dc=example,dc=org',\n ldap.SCOPE_SUBTREE,\n '(&(mail=*)(uid=%(user)s))'\n )\n\n AUTH_LDAP_USER_ATTR_MAP = {\n 'first_name': 'givenName',\n 'last_name': 'sn',\n 'email': 'mail',\n }\n\n\n#\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n#\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\n\n#\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n#\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = joinpath(BASE_DIR, 'static')\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\".\n # Don't forget to use absolute paths, not relative paths.\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'compressor.finders.CompressorFinder',\n)\n\n# django-compressor\nCOMPRESS_OFFLINE = True\n\n# Compatibility with Bootstrap 3\nfrom django.contrib.messages import constants as messages\nMESSAGE_TAGS = {\n messages.ERROR: 'danger'\n}\n\n\n#\n# Gravatar\n# https://github.com/twaddington/django-gravatar\n#\n\n# Gravatar base url.\nGRAVATAR_URL = 'http://cdn.libravatar.org/'\n# Gravatar base secure https url.\nGRAVATAR_SECURE_URL = 'https://seccdn.libravatar.org/'\n# Gravatar size in pixels.\n#GRAVATAR_DEFAULT_SIZE = '80'\n# An image url or one of the following: 'mm', 'identicon', 'monsterid', 'wavatar', 'retro'.\nGRAVATAR_DEFAULT_IMAGE = 'retro'\n# One of the following: 'g', 'pg', 'r', 'x'.\n#GRAVATAR_DEFAULT_RATING = 'g'\n# True to use https by default, False for plain http.\nGRAVATAR_DEFAULT_SECURE = True\n\n\n#\n# Logging\n#\n\n# A sample logging configuration. The only tangible logging performed by this\n# configuration is to send an email to the site admins on every HTTP 500 error\n# when DEBUG=False. See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'simple',\n },\n 'file':{\n 'level': 'INFO',\n #'class': 'logging.handlers.RotatingFileHandler',\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': joinpath(LOG_DIR, 'mailman-webui.log'),\n 'formatter': 'verbose',\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n },\n },\n 'loggers': {\n #'django.request': {\n # 'handlers': ['mail_admins'],\n # 'level': 'ERROR',\n # 'propagate': True,\n #},\n 'django.request': {\n 'handlers': ['file'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'django': {\n 'handlers': ['file'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n 'postorius': {\n 'handlers': ['file'],\n 'level': 'INFO',\n 'propagate': True,\n },\n 'hyperkitty': {\n 'handlers': ['file'],\n 'level': 'INFO',\n 'propagate': True,\n },\n },\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n },\n 'root': {\n 'handlers': ['file'],\n 'level': 'INFO',\n },\n}\n\nif 'raven.contrib.django.raven_compat' in INSTALLED_APPS:\n RAVEN_CONFIG = {\n 'dsn': 'https://<key>:<secret>@sentry.io/<project>',\n }\n LOGGING['handlers']['sentry'] = {\n 'level': 'ERROR',\n 'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',\n }\n LOGGING['loggers']['root']['handlers'].append('sentry')\n\n\ntry:\n from settings_local import *\nexcept ImportError:\n pass\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> class Formater: <|reserved_special_token_0|> <|reserved_special_token_1|> class Formater: def clean_number(posible_number): sanitize_number = posible_number.replace(' ', '') number_of_dots = sanitize_number.count('.') if number_of_dots > 1: return None if number_of_dots == 1: dot_position = sanitize_number.index('.') try: sanitize_number.index(',', dot_position) except Exception: sanitize_number = sanitize_number.replace(',', '') else: return None finally: try: return float(sanitize_number) except Exception: return None if number_of_dots == 0: sanitize_number = sanitize_number.replace(',', '') try: return int(sanitize_number) except Exception: return None <|reserved_special_token_1|> class Formater(): def clean_number (posible_number): sanitize_number = posible_number.replace(' ', '') number_of_dots = sanitize_number.count('.') if number_of_dots > 1: return None if number_of_dots == 1: dot_position = sanitize_number.index('.') try: sanitize_number.index(',', dot_position) except Exception: sanitize_number = sanitize_number.replace(',', '') else: return None finally: try: return float(sanitize_number) except Exception: return None if number_of_dots == 0: sanitize_number = sanitize_number.replace(',', '') try: return int(sanitize_number) except Exception: return None
flexible
{ "blob_id": "02c32cf04529ff8b5edddf4e4117f8c4fdf27da9", "index": 8612, "step-1": "<mask token>\n", "step-2": "class Formater:\n <mask token>\n", "step-3": "class Formater:\n\n def clean_number(posible_number):\n sanitize_number = posible_number.replace(' ', '')\n number_of_dots = sanitize_number.count('.')\n if number_of_dots > 1:\n return None\n if number_of_dots == 1:\n dot_position = sanitize_number.index('.')\n try:\n sanitize_number.index(',', dot_position)\n except Exception:\n sanitize_number = sanitize_number.replace(',', '')\n else:\n return None\n finally:\n try:\n return float(sanitize_number)\n except Exception:\n return None\n if number_of_dots == 0:\n sanitize_number = sanitize_number.replace(',', '')\n try:\n return int(sanitize_number)\n except Exception:\n return None\n", "step-4": "class Formater():\n def clean_number (posible_number):\n sanitize_number = posible_number.replace(' ', '')\n number_of_dots = sanitize_number.count('.')\n\n if number_of_dots > 1:\n return None\n if number_of_dots == 1:\n dot_position = sanitize_number.index('.')\n try:\n sanitize_number.index(',', dot_position)\n except Exception:\n sanitize_number = sanitize_number.replace(',', '')\n else:\n return None\n finally:\n try:\n return float(sanitize_number)\n except Exception:\n return None\n if number_of_dots == 0:\n sanitize_number = sanitize_number.replace(',', '')\n try:\n return int(sanitize_number)\n except Exception:\n return None", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> model.add(Dense(5, input_dim=1, activation='relu')) model.add(Dense(3)) model.add(Dense(1)) model.summary() <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) x2 = np.array([11, 12, 13, 14, 15]) model = Sequential() model.add(Dense(5, input_dim=1, activation='relu')) model.add(Dense(3)) model.add(Dense(1)) model.summary() <|reserved_special_token_0|> <|reserved_special_token_1|> from keras.models import Sequential from keras.layers import Dense import numpy as np x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) x2 = np.array([11, 12, 13, 14, 15]) model = Sequential() model.add(Dense(5, input_dim=1, activation='relu')) model.add(Dense(3)) model.add(Dense(1)) model.summary() <|reserved_special_token_0|> <|reserved_special_token_1|> from keras.models import Sequential from keras.layers import Dense import numpy as np x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) x2 = np.array([11, 12, 13, 14, 15]) model = Sequential() model.add(Dense(5, input_dim=1, activation='relu')) model.add(Dense(3)) model.add(Dense(1)) model.summary() ''' model.compile(loss='mse', optimizer='adam', metrics=['accuracy']) model.fit(x, y, epochs=100) loss, acc = model.evaluate(x, y) print("acc : ", acc) print("loss : ", loss) y_predict = model.predict(x2) print(y_predict) '''
flexible
{ "blob_id": "43d9edd9120351ce5065eb266d482ccaa2e56177", "index": 2416, "step-1": "<mask token>\n", "step-2": "<mask token>\nmodel.add(Dense(5, input_dim=1, activation='relu'))\nmodel.add(Dense(3))\nmodel.add(Dense(1))\nmodel.summary()\n<mask token>\n", "step-3": "<mask token>\nx = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\ny = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\nx2 = np.array([11, 12, 13, 14, 15])\nmodel = Sequential()\nmodel.add(Dense(5, input_dim=1, activation='relu'))\nmodel.add(Dense(3))\nmodel.add(Dense(1))\nmodel.summary()\n<mask token>\n", "step-4": "from keras.models import Sequential\nfrom keras.layers import Dense\nimport numpy as np\nx = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\ny = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\nx2 = np.array([11, 12, 13, 14, 15])\nmodel = Sequential()\nmodel.add(Dense(5, input_dim=1, activation='relu'))\nmodel.add(Dense(3))\nmodel.add(Dense(1))\nmodel.summary()\n<mask token>\n", "step-5": "from keras.models import Sequential\nfrom keras.layers import Dense\n\nimport numpy as np\nx = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\ny = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\nx2 = np.array([11, 12, 13, 14, 15])\n\nmodel = Sequential()\nmodel.add(Dense(5, input_dim=1, activation='relu'))\nmodel.add(Dense(3))\nmodel.add(Dense(1))\n\nmodel.summary()\n\n'''\nmodel.compile(loss='mse', optimizer='adam',\n metrics=['accuracy'])\nmodel.fit(x, y, epochs=100)\n\nloss, acc = model.evaluate(x, y)\nprint(\"acc : \", acc)\nprint(\"loss : \", loss)\n\ny_predict = model.predict(x2)\nprint(y_predict)\n'''\n\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/env python from __future__ import absolute_import, print_function, unicode_literals import os import sys import unittest # Allow interactive execution from CLI, cd tests; ./test_cli.py if __package__ is None: sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) from ksconf.conf.parser import PARSECONF_LOOSE, parse_conf from ksconf.consts import EXIT_CODE_COMBINE_MARKER_MISSING, EXIT_CODE_SUCCESS from tests.cli_helper import TestWorkDir, ksconf_cli class CliKsconfCombineTestCase(unittest.TestCase): def build_test01(self, twd): twd.write_file("etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf", r""" [aws:config] SHOULD_LINEMERGE = false TRUNCATE = 8388608 TIME_PREFIX = configurationItemCaptureTime"\s*:\s*" TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ TZ = GMT MAX_TIMESTAMP_LOOKAHEAD = 28 KV_MODE = json ANNOTATE_PUNCT = false FIELDALIAS-dest = resourceType AS dest FIELDALIAS-object = resourceId AS object FIELDALIAS-object_id = ARN AS object_id EVAL-change_type = "configuration" EVAL-dvc = "AWS Config" EVAL-status="success" LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category # unify account ID field FIELDALIAS-aws-account-id = awsAccountId as aws_account_id FIELDALIAS-region-for-aws-config = awsRegion AS region """) twd.write_file("etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml", """ <nav search_view="search" color="#65A637"> <view name="Inputs" default="true" label="Inputs" /> <view name="Configuration" default="false" label="Configuration" /> <view name="search" default="false" label="Search" /> </nav> """) # In the future there will be a more efficient way to handle the global 'ANNOTATE_PUCT' scenario twd.write_file("etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf", """ [aws:config] TZ = UTC # Corp want's punct to be enabled globally ANNOTATE_PUNCT = true """) twd.write_file("etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf", """ [aws:config] # Our config is bigger than yours! TRUNCATE = 9999999 """) twd.write_file("etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf", """ [aws_sns_modular_alert] is_custom = 1 label = AWS SNS Alert description = Publish search result to AWS SNS payload_format = json icon_path = appIcon.png """) twd.write_file("etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf", """ [aws_sns_modular_alert] param.account = DeptAwsAccount """) twd.write_file("etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml", """ <nav search_view="search" color="#65A637"> <view name="My custom view" /> <view name="Inputs" default="true" label="Inputs" /> <view name="Configuration" default="false" label="Configuration" /> <view name="search" default="false" label="Search" /> </nav> """) def test_combine_3dir(self): # Note that this test tests the old shool version of '*.d' processing. But we must preserve this behavior. # Be aware that we pass in 'default.d/*' as a string, and expand the glob vs allowing the shell to handle this # and this is _normal_ behavior when dealing with Windows. twd = TestWorkDir() self.build_test01(twd) default = twd.get_path("etc/apps/Splunk_TA_aws/default") with ksconf_cli: ko = ksconf_cli("combine", "--dry-run", "--target", default, default + ".d/*") # Q: Why do we run this once, but not check anything about it? (To ensure dry-run has no side effects?) ko = ksconf_cli("combine", "--target", default, default + ".d/*") self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS) cfg = parse_conf(twd.get_path("etc/apps/Splunk_TA_aws/default/props.conf")) self.assertIn("aws:config", cfg) self.assertEqual(cfg["aws:config"]["ANNOTATE_PUNCT"], "true") self.assertEqual(cfg["aws:config"]["EVAL-change_type"], '"configuration"') self.assertEqual(cfg["aws:config"]["TRUNCATE"], '9999999') nav_content = twd.read_file("etc/apps/Splunk_TA_aws/default/data/ui/nav/default.xml") self.assertIn("My custom view", nav_content) twd.write_conf("etc/apps/Splunk_TA_aws/default.d/99-theforce/props.conf", { "aws:config": {"TIME_FORMAT": "%Y-%m-%dT%H:%M:%S.%6NZ"} }) twd.write_file("etc/apps/Splunk_TA_aws/default.d/99-theforce/data/ui/nav/default.xml", """ <nav search_view="search" color="#65A637"> <view name="My custom view" /> <view name="Inputs" default="true" label="Inputs" /> <view name="Configuration" default="false" label="Configuration" /> </nav> """) twd.write_file("etc/apps/Splunk_TA_aws/default/data/dead.conf", "# File to remove") twd.write_file("etc/apps/Splunk_TA_aws/default/data/tags.conf", "# Locally created file") twd.write_file("etc/apps/Splunk_TA_aws/default.d/99-blah/same.txt", "SAME TEXT") twd.write_file("etc/apps/Splunk_TA_aws/default/same.txt", "SAME TEXT") twd.write_file("etc/apps/Splunk_TA_aws/default.d/99-blah/binary.bin", b"#BINARY \xff \x00") twd.write_file("etc/apps/Splunk_TA_aws/default/binary.bin", b"#BINARY NEW \x00 \xff \xFB") with ksconf_cli: ko = ksconf_cli("combine", "--dry-run", "--target", default, default + ".d/*") self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS) self.assertRegex(ko.stdout, r'[\r\n][-]\s*<view name="search"') self.assertRegex(ko.stdout, r'[\r\n][-] ?[\r\n]') # Remove empty lines from nav self.assertRegex(ko.stdout, r"[\r\n][+]TIME_FORMAT = [^\r\n]+%6N") with ksconf_cli: ko = ksconf_cli("combine", "--target", default, default + ".d/*") def test_sort_order(self): "Confirm that single input files are copied as-is" twd = TestWorkDir() default = twd.get_path("input") target = twd.get_path("output") unique_conf = [ "z = 1", " b=? ", "a = 9"] twd.write_file("input/unique.conf", "\n".join(unique_conf)) with ksconf_cli: ko = ksconf_cli("combine", "--layer-method", "disable", "--banner", "", "--target", target, default) self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS) data = twd.read_file("output/unique.conf").splitlines() self.assertListEqual(unique_conf, data) def test_combine_dird(self): twd = TestWorkDir() self.build_test01(twd) default = twd.get_path("etc/apps/Splunk_TA_aws") target = twd.get_path("etc/apps/Splunk_TA_aws-OUTPUT") with ksconf_cli: ko = ksconf_cli("combine", "--layer-method", "dir.d", "--dry-run", "--target", target, default) ko = ksconf_cli("combine", "--layer-method", "dir.d", "--target", target, default) self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS) cfg = parse_conf(target + "/default/props.conf") self.assertIn("aws:config", cfg) self.assertEqual(cfg["aws:config"]["ANNOTATE_PUNCT"], "true") self.assertEqual(cfg["aws:config"]["EVAL-change_type"], '"configuration"') self.assertEqual(cfg["aws:config"]["TRUNCATE"], '9999999') nav_content = twd.read_file("etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml") self.assertIn("My custom view", nav_content) alert_action = twd.read_conf("etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf") self.assertIn("aws_sns_modular_alert", alert_action) self.assertEqual(alert_action["aws_sns_modular_alert"]["param.account"], "DeptAwsAccount") # layer 10 self.assertEqual(alert_action["aws_sns_modular_alert"]["label"], "AWS SNS Alert") # layer 60 def test_keep_existing_ds_local_app(self): twd = TestWorkDir() src = twd.get_path("repo/apps/Splunk_TA_nix") target = twd.get_path("etc/deployment-apps/Splunk_TA_nix") twd.write_file("repo/apps/Splunk_TA_nix/default/app.conf", r""" [install] allows_disable = false is_configured = true state = enabled [launcher] author = Splunk description = The app is Splunk version = 7.0.0 """) # Make partent diretories os.makedirs(twd.get_path("etc/deployment-apps")) # First run (creates maker file) with ksconf_cli: ko = ksconf_cli("combine", "--keep-existing", "local/app.conf", "--target", target, src) self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS) # Local folder hasn't been created yet self.assertFalse(os.path.isdir(twd.get_path("etc/deployment-apps/Splunk_TA_nix/local"))) # Simulate a 'splunk reload deploy-server' twd.write_file("etc/deployment-apps/Splunk_TA_nix/local/app.conf", "# Autogenerated file") with ksconf_cli: ko = ksconf_cli("combine", "--keep-existing", "local/app.conf", "--target", target, src) self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS) cfg = parse_conf(os.path.join(target, "default/app.conf")) self.assertIn("install", cfg) self.assertEqual(cfg["launcher"]["version"], "7.0.0") self.assertEqual(twd.read_file("etc/deployment-apps/Splunk_TA_nix/local/app.conf"), "# Autogenerated file") # This time the file will be removed ko = ksconf_cli("combine", "--target", target, src) self.assertFalse(os.path.isfile(twd.get_path("etc/deployment-apps/Splunk_TA_nix/local/app.conf")), "local/app.conf should have been removed.") def test_combine_conf_spec(self): twd = TestWorkDir() self.build_test01(twd) twd.write_file("etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec", r""" [<stanza_type1>] important_field = <str> * Some notes about the important field. * Required! disabled = <bool> """) twd.write_file("etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec", r""" [bookmark::<prefixed_stanza_type>] resource = <url> category = <str> * Label for organization disabled = <bool> """) default = twd.get_path("etc/apps/Splunk_TA_aws") target = twd.get_path("etc/apps/Splunk_TA_aws-OUTPUT") with ksconf_cli: ko = ksconf_cli("combine", "--layer-method", "dir.d", "--target", target, default) self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS) spec_file = twd.get_path("etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec") spec = parse_conf(spec_file, profile=PARSECONF_LOOSE) self.assertIn("bookmark::<prefixed_stanza_type>", spec) self.assertIn("<stanza_type1>", spec) def test_require_arg(self): with ksconf_cli: ko = ksconf_cli("combine", "source-dir") self.assertRegex(ko.stderr, "Must provide [^\r\n]+--target") def test_missing_marker(self): twd = TestWorkDir() twd.write_file("source-dir/someapp/default/blah.conf", "[entry]\nboring=yes\n") twd.write_file("dest-dir/someapp/default/blah.conf", "[entry]\nboring=yes\n") ko = ksconf_cli("combine", twd.get_path("source-dir"), "--target", twd.get_path("dest-dir")) self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING) self.assertRegex(ko.stderr, r".*Marker file missing\b.*") if __name__ == '__main__': # pragma: no cover unittest.main()
normal
{ "blob_id": "1bb953b665f48638691986e2fcae73b10a1c2ce0", "index": 7729, "step-1": "<mask token>\n\n\nclass CliKsconfCombineTestCase(unittest.TestCase):\n\n def build_test01(self, twd):\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf',\n \"\"\"\n [aws:config]\n SHOULD_LINEMERGE = false\n TRUNCATE = 8388608\n TIME_PREFIX = configurationItemCaptureTime\"\\\\s*:\\\\s*\"\n TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ\n TZ = GMT\n MAX_TIMESTAMP_LOOKAHEAD = 28\n KV_MODE = json\n ANNOTATE_PUNCT = false\n\n FIELDALIAS-dest = resourceType AS dest\n FIELDALIAS-object = resourceId AS object\n FIELDALIAS-object_id = ARN AS object_id\n EVAL-change_type = \"configuration\"\n EVAL-dvc = \"AWS Config\"\n EVAL-status=\"success\"\n LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action\n LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category\n\n # unify account ID field\n FIELDALIAS-aws-account-id = awsAccountId as aws_account_id\n FIELDALIAS-region-for-aws-config = awsRegion AS region\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf',\n \"\"\"\n [aws:config]\n TZ = UTC\n # Corp want's punct to be enabled globally\n ANNOTATE_PUNCT = true\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf',\n \"\"\"\n [aws:config]\n # Our config is bigger than yours!\n TRUNCATE = 9999999\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n is_custom = 1\n label = AWS SNS Alert\n description = Publish search result to AWS SNS\n payload_format = json\n icon_path = appIcon.png\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n param.account = DeptAwsAccount\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml',\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n <mask token>\n <mask token>\n\n def test_combine_dird(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--dry-run', '--target', target, default)\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(target + '/default/props.conf')\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml'\n )\n self.assertIn('My custom view', nav_content)\n alert_action = twd.read_conf(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf')\n self.assertIn('aws_sns_modular_alert', alert_action)\n self.assertEqual(alert_action['aws_sns_modular_alert'][\n 'param.account'], 'DeptAwsAccount')\n self.assertEqual(alert_action['aws_sns_modular_alert']['label'],\n 'AWS SNS Alert')\n\n def test_keep_existing_ds_local_app(self):\n twd = TestWorkDir()\n src = twd.get_path('repo/apps/Splunk_TA_nix')\n target = twd.get_path('etc/deployment-apps/Splunk_TA_nix')\n twd.write_file('repo/apps/Splunk_TA_nix/default/app.conf',\n \"\"\"\n [install]\n allows_disable = false\n is_configured = true\n state = enabled\n\n [launcher]\n author = Splunk\n description = The app is Splunk\n version = 7.0.0\n \"\"\"\n )\n os.makedirs(twd.get_path('etc/deployment-apps'))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertFalse(os.path.isdir(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local')))\n twd.write_file('etc/deployment-apps/Splunk_TA_nix/local/app.conf',\n '# Autogenerated file')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(os.path.join(target, 'default/app.conf'))\n self.assertIn('install', cfg)\n self.assertEqual(cfg['launcher']['version'], '7.0.0')\n self.assertEqual(twd.read_file(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf'),\n '# Autogenerated file')\n ko = ksconf_cli('combine', '--target', target, src)\n self.assertFalse(os.path.isfile(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf')),\n 'local/app.conf should have been removed.')\n\n def test_combine_conf_spec(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec'\n ,\n \"\"\"\n [<stanza_type1>]\n important_field = <str>\n * Some notes about the important field.\n * Required!\n disabled = <bool>\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec',\n \"\"\"\n [bookmark::<prefixed_stanza_type>]\n resource = <url>\n category = <str>\n * Label for organization\n disabled = <bool>\n \"\"\"\n )\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n spec_file = twd.get_path(\n 'etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec')\n spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)\n self.assertIn('bookmark::<prefixed_stanza_type>', spec)\n self.assertIn('<stanza_type1>', spec)\n\n def test_require_arg(self):\n with ksconf_cli:\n ko = ksconf_cli('combine', 'source-dir')\n self.assertRegex(ko.stderr, 'Must provide [^\\r\\n]+--target')\n\n def test_missing_marker(self):\n twd = TestWorkDir()\n twd.write_file('source-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n twd.write_file('dest-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n ko = ksconf_cli('combine', twd.get_path('source-dir'), '--target',\n twd.get_path('dest-dir'))\n self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)\n self.assertRegex(ko.stderr, '.*Marker file missing\\\\b.*')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass CliKsconfCombineTestCase(unittest.TestCase):\n\n def build_test01(self, twd):\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf',\n \"\"\"\n [aws:config]\n SHOULD_LINEMERGE = false\n TRUNCATE = 8388608\n TIME_PREFIX = configurationItemCaptureTime\"\\\\s*:\\\\s*\"\n TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ\n TZ = GMT\n MAX_TIMESTAMP_LOOKAHEAD = 28\n KV_MODE = json\n ANNOTATE_PUNCT = false\n\n FIELDALIAS-dest = resourceType AS dest\n FIELDALIAS-object = resourceId AS object\n FIELDALIAS-object_id = ARN AS object_id\n EVAL-change_type = \"configuration\"\n EVAL-dvc = \"AWS Config\"\n EVAL-status=\"success\"\n LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action\n LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category\n\n # unify account ID field\n FIELDALIAS-aws-account-id = awsAccountId as aws_account_id\n FIELDALIAS-region-for-aws-config = awsRegion AS region\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf',\n \"\"\"\n [aws:config]\n TZ = UTC\n # Corp want's punct to be enabled globally\n ANNOTATE_PUNCT = true\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf',\n \"\"\"\n [aws:config]\n # Our config is bigger than yours!\n TRUNCATE = 9999999\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n is_custom = 1\n label = AWS SNS Alert\n description = Publish search result to AWS SNS\n payload_format = json\n icon_path = appIcon.png\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n param.account = DeptAwsAccount\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml',\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n <mask token>\n\n def test_sort_order(self):\n \"\"\"Confirm that single input files are copied as-is\"\"\"\n twd = TestWorkDir()\n default = twd.get_path('input')\n target = twd.get_path('output')\n unique_conf = ['z = 1', ' b=? ', 'a = 9']\n twd.write_file('input/unique.conf', '\\n'.join(unique_conf))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'disable',\n '--banner', '', '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n data = twd.read_file('output/unique.conf').splitlines()\n self.assertListEqual(unique_conf, data)\n\n def test_combine_dird(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--dry-run', '--target', target, default)\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(target + '/default/props.conf')\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml'\n )\n self.assertIn('My custom view', nav_content)\n alert_action = twd.read_conf(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf')\n self.assertIn('aws_sns_modular_alert', alert_action)\n self.assertEqual(alert_action['aws_sns_modular_alert'][\n 'param.account'], 'DeptAwsAccount')\n self.assertEqual(alert_action['aws_sns_modular_alert']['label'],\n 'AWS SNS Alert')\n\n def test_keep_existing_ds_local_app(self):\n twd = TestWorkDir()\n src = twd.get_path('repo/apps/Splunk_TA_nix')\n target = twd.get_path('etc/deployment-apps/Splunk_TA_nix')\n twd.write_file('repo/apps/Splunk_TA_nix/default/app.conf',\n \"\"\"\n [install]\n allows_disable = false\n is_configured = true\n state = enabled\n\n [launcher]\n author = Splunk\n description = The app is Splunk\n version = 7.0.0\n \"\"\"\n )\n os.makedirs(twd.get_path('etc/deployment-apps'))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertFalse(os.path.isdir(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local')))\n twd.write_file('etc/deployment-apps/Splunk_TA_nix/local/app.conf',\n '# Autogenerated file')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(os.path.join(target, 'default/app.conf'))\n self.assertIn('install', cfg)\n self.assertEqual(cfg['launcher']['version'], '7.0.0')\n self.assertEqual(twd.read_file(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf'),\n '# Autogenerated file')\n ko = ksconf_cli('combine', '--target', target, src)\n self.assertFalse(os.path.isfile(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf')),\n 'local/app.conf should have been removed.')\n\n def test_combine_conf_spec(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec'\n ,\n \"\"\"\n [<stanza_type1>]\n important_field = <str>\n * Some notes about the important field.\n * Required!\n disabled = <bool>\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec',\n \"\"\"\n [bookmark::<prefixed_stanza_type>]\n resource = <url>\n category = <str>\n * Label for organization\n disabled = <bool>\n \"\"\"\n )\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n spec_file = twd.get_path(\n 'etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec')\n spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)\n self.assertIn('bookmark::<prefixed_stanza_type>', spec)\n self.assertIn('<stanza_type1>', spec)\n\n def test_require_arg(self):\n with ksconf_cli:\n ko = ksconf_cli('combine', 'source-dir')\n self.assertRegex(ko.stderr, 'Must provide [^\\r\\n]+--target')\n\n def test_missing_marker(self):\n twd = TestWorkDir()\n twd.write_file('source-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n twd.write_file('dest-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n ko = ksconf_cli('combine', twd.get_path('source-dir'), '--target',\n twd.get_path('dest-dir'))\n self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)\n self.assertRegex(ko.stderr, '.*Marker file missing\\\\b.*')\n\n\n<mask token>\n", "step-3": "<mask token>\nif __package__ is None:\n sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n )\n<mask token>\n\n\nclass CliKsconfCombineTestCase(unittest.TestCase):\n\n def build_test01(self, twd):\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf',\n \"\"\"\n [aws:config]\n SHOULD_LINEMERGE = false\n TRUNCATE = 8388608\n TIME_PREFIX = configurationItemCaptureTime\"\\\\s*:\\\\s*\"\n TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ\n TZ = GMT\n MAX_TIMESTAMP_LOOKAHEAD = 28\n KV_MODE = json\n ANNOTATE_PUNCT = false\n\n FIELDALIAS-dest = resourceType AS dest\n FIELDALIAS-object = resourceId AS object\n FIELDALIAS-object_id = ARN AS object_id\n EVAL-change_type = \"configuration\"\n EVAL-dvc = \"AWS Config\"\n EVAL-status=\"success\"\n LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action\n LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category\n\n # unify account ID field\n FIELDALIAS-aws-account-id = awsAccountId as aws_account_id\n FIELDALIAS-region-for-aws-config = awsRegion AS region\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf',\n \"\"\"\n [aws:config]\n TZ = UTC\n # Corp want's punct to be enabled globally\n ANNOTATE_PUNCT = true\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf',\n \"\"\"\n [aws:config]\n # Our config is bigger than yours!\n TRUNCATE = 9999999\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n is_custom = 1\n label = AWS SNS Alert\n description = Publish search result to AWS SNS\n payload_format = json\n icon_path = appIcon.png\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n param.account = DeptAwsAccount\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml',\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n\n def test_combine_3dir(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws/default')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--dry-run', '--target', default, \n default + '.d/*')\n ko = ksconf_cli('combine', '--target', default, default + '.d/*')\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(twd.get_path(\n 'etc/apps/Splunk_TA_aws/default/props.conf'))\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws/default/data/ui/nav/default.xml')\n self.assertIn('My custom view', nav_content)\n twd.write_conf(\n 'etc/apps/Splunk_TA_aws/default.d/99-theforce/props.conf', {\n 'aws:config': {'TIME_FORMAT': '%Y-%m-%dT%H:%M:%S.%6NZ'}})\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/99-theforce/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default/data/dead.conf',\n '# File to remove')\n twd.write_file('etc/apps/Splunk_TA_aws/default/data/tags.conf',\n '# Locally created file')\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/99-blah/same.txt',\n 'SAME TEXT')\n twd.write_file('etc/apps/Splunk_TA_aws/default/same.txt', 'SAME TEXT')\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/99-blah/binary.bin',\n b'#BINARY \\xff \\x00')\n twd.write_file('etc/apps/Splunk_TA_aws/default/binary.bin',\n b'#BINARY NEW \\x00 \\xff \\xfb')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--dry-run', '--target', default, \n default + '.d/*')\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertRegex(ko.stdout, '[\\\\r\\\\n][-]\\\\s*<view name=\"search\"')\n self.assertRegex(ko.stdout, '[\\\\r\\\\n][-] ?[\\\\r\\\\n]')\n self.assertRegex(ko.stdout,\n '[\\\\r\\\\n][+]TIME_FORMAT = [^\\\\r\\\\n]+%6N')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--target', default, default + '.d/*')\n\n def test_sort_order(self):\n \"\"\"Confirm that single input files are copied as-is\"\"\"\n twd = TestWorkDir()\n default = twd.get_path('input')\n target = twd.get_path('output')\n unique_conf = ['z = 1', ' b=? ', 'a = 9']\n twd.write_file('input/unique.conf', '\\n'.join(unique_conf))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'disable',\n '--banner', '', '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n data = twd.read_file('output/unique.conf').splitlines()\n self.assertListEqual(unique_conf, data)\n\n def test_combine_dird(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--dry-run', '--target', target, default)\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(target + '/default/props.conf')\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml'\n )\n self.assertIn('My custom view', nav_content)\n alert_action = twd.read_conf(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf')\n self.assertIn('aws_sns_modular_alert', alert_action)\n self.assertEqual(alert_action['aws_sns_modular_alert'][\n 'param.account'], 'DeptAwsAccount')\n self.assertEqual(alert_action['aws_sns_modular_alert']['label'],\n 'AWS SNS Alert')\n\n def test_keep_existing_ds_local_app(self):\n twd = TestWorkDir()\n src = twd.get_path('repo/apps/Splunk_TA_nix')\n target = twd.get_path('etc/deployment-apps/Splunk_TA_nix')\n twd.write_file('repo/apps/Splunk_TA_nix/default/app.conf',\n \"\"\"\n [install]\n allows_disable = false\n is_configured = true\n state = enabled\n\n [launcher]\n author = Splunk\n description = The app is Splunk\n version = 7.0.0\n \"\"\"\n )\n os.makedirs(twd.get_path('etc/deployment-apps'))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertFalse(os.path.isdir(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local')))\n twd.write_file('etc/deployment-apps/Splunk_TA_nix/local/app.conf',\n '# Autogenerated file')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(os.path.join(target, 'default/app.conf'))\n self.assertIn('install', cfg)\n self.assertEqual(cfg['launcher']['version'], '7.0.0')\n self.assertEqual(twd.read_file(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf'),\n '# Autogenerated file')\n ko = ksconf_cli('combine', '--target', target, src)\n self.assertFalse(os.path.isfile(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf')),\n 'local/app.conf should have been removed.')\n\n def test_combine_conf_spec(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec'\n ,\n \"\"\"\n [<stanza_type1>]\n important_field = <str>\n * Some notes about the important field.\n * Required!\n disabled = <bool>\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec',\n \"\"\"\n [bookmark::<prefixed_stanza_type>]\n resource = <url>\n category = <str>\n * Label for organization\n disabled = <bool>\n \"\"\"\n )\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n spec_file = twd.get_path(\n 'etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec')\n spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)\n self.assertIn('bookmark::<prefixed_stanza_type>', spec)\n self.assertIn('<stanza_type1>', spec)\n\n def test_require_arg(self):\n with ksconf_cli:\n ko = ksconf_cli('combine', 'source-dir')\n self.assertRegex(ko.stderr, 'Must provide [^\\r\\n]+--target')\n\n def test_missing_marker(self):\n twd = TestWorkDir()\n twd.write_file('source-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n twd.write_file('dest-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n ko = ksconf_cli('combine', twd.get_path('source-dir'), '--target',\n twd.get_path('dest-dir'))\n self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)\n self.assertRegex(ko.stderr, '.*Marker file missing\\\\b.*')\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-4": "from __future__ import absolute_import, print_function, unicode_literals\nimport os\nimport sys\nimport unittest\nif __package__ is None:\n sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n )\nfrom ksconf.conf.parser import PARSECONF_LOOSE, parse_conf\nfrom ksconf.consts import EXIT_CODE_COMBINE_MARKER_MISSING, EXIT_CODE_SUCCESS\nfrom tests.cli_helper import TestWorkDir, ksconf_cli\n\n\nclass CliKsconfCombineTestCase(unittest.TestCase):\n\n def build_test01(self, twd):\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf',\n \"\"\"\n [aws:config]\n SHOULD_LINEMERGE = false\n TRUNCATE = 8388608\n TIME_PREFIX = configurationItemCaptureTime\"\\\\s*:\\\\s*\"\n TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ\n TZ = GMT\n MAX_TIMESTAMP_LOOKAHEAD = 28\n KV_MODE = json\n ANNOTATE_PUNCT = false\n\n FIELDALIAS-dest = resourceType AS dest\n FIELDALIAS-object = resourceId AS object\n FIELDALIAS-object_id = ARN AS object_id\n EVAL-change_type = \"configuration\"\n EVAL-dvc = \"AWS Config\"\n EVAL-status=\"success\"\n LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action\n LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category\n\n # unify account ID field\n FIELDALIAS-aws-account-id = awsAccountId as aws_account_id\n FIELDALIAS-region-for-aws-config = awsRegion AS region\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf',\n \"\"\"\n [aws:config]\n TZ = UTC\n # Corp want's punct to be enabled globally\n ANNOTATE_PUNCT = true\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf',\n \"\"\"\n [aws:config]\n # Our config is bigger than yours!\n TRUNCATE = 9999999\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n is_custom = 1\n label = AWS SNS Alert\n description = Publish search result to AWS SNS\n payload_format = json\n icon_path = appIcon.png\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf',\n \"\"\"\n [aws_sns_modular_alert]\n param.account = DeptAwsAccount\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml',\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\"\n )\n\n def test_combine_3dir(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws/default')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--dry-run', '--target', default, \n default + '.d/*')\n ko = ksconf_cli('combine', '--target', default, default + '.d/*')\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(twd.get_path(\n 'etc/apps/Splunk_TA_aws/default/props.conf'))\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws/default/data/ui/nav/default.xml')\n self.assertIn('My custom view', nav_content)\n twd.write_conf(\n 'etc/apps/Splunk_TA_aws/default.d/99-theforce/props.conf', {\n 'aws:config': {'TIME_FORMAT': '%Y-%m-%dT%H:%M:%S.%6NZ'}})\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/default.d/99-theforce/data/ui/nav/default.xml'\n ,\n \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n </nav>\n \"\"\"\n )\n twd.write_file('etc/apps/Splunk_TA_aws/default/data/dead.conf',\n '# File to remove')\n twd.write_file('etc/apps/Splunk_TA_aws/default/data/tags.conf',\n '# Locally created file')\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/99-blah/same.txt',\n 'SAME TEXT')\n twd.write_file('etc/apps/Splunk_TA_aws/default/same.txt', 'SAME TEXT')\n twd.write_file('etc/apps/Splunk_TA_aws/default.d/99-blah/binary.bin',\n b'#BINARY \\xff \\x00')\n twd.write_file('etc/apps/Splunk_TA_aws/default/binary.bin',\n b'#BINARY NEW \\x00 \\xff \\xfb')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--dry-run', '--target', default, \n default + '.d/*')\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertRegex(ko.stdout, '[\\\\r\\\\n][-]\\\\s*<view name=\"search\"')\n self.assertRegex(ko.stdout, '[\\\\r\\\\n][-] ?[\\\\r\\\\n]')\n self.assertRegex(ko.stdout,\n '[\\\\r\\\\n][+]TIME_FORMAT = [^\\\\r\\\\n]+%6N')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--target', default, default + '.d/*')\n\n def test_sort_order(self):\n \"\"\"Confirm that single input files are copied as-is\"\"\"\n twd = TestWorkDir()\n default = twd.get_path('input')\n target = twd.get_path('output')\n unique_conf = ['z = 1', ' b=? ', 'a = 9']\n twd.write_file('input/unique.conf', '\\n'.join(unique_conf))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'disable',\n '--banner', '', '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n data = twd.read_file('output/unique.conf').splitlines()\n self.assertListEqual(unique_conf, data)\n\n def test_combine_dird(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--dry-run', '--target', target, default)\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(target + '/default/props.conf')\n self.assertIn('aws:config', cfg)\n self.assertEqual(cfg['aws:config']['ANNOTATE_PUNCT'], 'true')\n self.assertEqual(cfg['aws:config']['EVAL-change_type'],\n '\"configuration\"')\n self.assertEqual(cfg['aws:config']['TRUNCATE'], '9999999')\n nav_content = twd.read_file(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml'\n )\n self.assertIn('My custom view', nav_content)\n alert_action = twd.read_conf(\n 'etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf')\n self.assertIn('aws_sns_modular_alert', alert_action)\n self.assertEqual(alert_action['aws_sns_modular_alert'][\n 'param.account'], 'DeptAwsAccount')\n self.assertEqual(alert_action['aws_sns_modular_alert']['label'],\n 'AWS SNS Alert')\n\n def test_keep_existing_ds_local_app(self):\n twd = TestWorkDir()\n src = twd.get_path('repo/apps/Splunk_TA_nix')\n target = twd.get_path('etc/deployment-apps/Splunk_TA_nix')\n twd.write_file('repo/apps/Splunk_TA_nix/default/app.conf',\n \"\"\"\n [install]\n allows_disable = false\n is_configured = true\n state = enabled\n\n [launcher]\n author = Splunk\n description = The app is Splunk\n version = 7.0.0\n \"\"\"\n )\n os.makedirs(twd.get_path('etc/deployment-apps'))\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertFalse(os.path.isdir(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local')))\n twd.write_file('etc/deployment-apps/Splunk_TA_nix/local/app.conf',\n '# Autogenerated file')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--keep-existing', 'local/app.conf',\n '--target', target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(os.path.join(target, 'default/app.conf'))\n self.assertIn('install', cfg)\n self.assertEqual(cfg['launcher']['version'], '7.0.0')\n self.assertEqual(twd.read_file(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf'),\n '# Autogenerated file')\n ko = ksconf_cli('combine', '--target', target, src)\n self.assertFalse(os.path.isfile(twd.get_path(\n 'etc/deployment-apps/Splunk_TA_nix/local/app.conf')),\n 'local/app.conf should have been removed.')\n\n def test_combine_conf_spec(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec'\n ,\n \"\"\"\n [<stanza_type1>]\n important_field = <str>\n * Some notes about the important field.\n * Required!\n disabled = <bool>\n \"\"\"\n )\n twd.write_file(\n 'etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec',\n \"\"\"\n [bookmark::<prefixed_stanza_type>]\n resource = <url>\n category = <str>\n * Label for organization\n disabled = <bool>\n \"\"\"\n )\n default = twd.get_path('etc/apps/Splunk_TA_aws')\n target = twd.get_path('etc/apps/Splunk_TA_aws-OUTPUT')\n with ksconf_cli:\n ko = ksconf_cli('combine', '--layer-method', 'dir.d',\n '--target', target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n spec_file = twd.get_path(\n 'etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec')\n spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)\n self.assertIn('bookmark::<prefixed_stanza_type>', spec)\n self.assertIn('<stanza_type1>', spec)\n\n def test_require_arg(self):\n with ksconf_cli:\n ko = ksconf_cli('combine', 'source-dir')\n self.assertRegex(ko.stderr, 'Must provide [^\\r\\n]+--target')\n\n def test_missing_marker(self):\n twd = TestWorkDir()\n twd.write_file('source-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n twd.write_file('dest-dir/someapp/default/blah.conf',\n '[entry]\\nboring=yes\\n')\n ko = ksconf_cli('combine', twd.get_path('source-dir'), '--target',\n twd.get_path('dest-dir'))\n self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)\n self.assertRegex(ko.stderr, '.*Marker file missing\\\\b.*')\n\n\nif __name__ == '__main__':\n unittest.main()\n", "step-5": "#!/usr/bin/env python\nfrom __future__ import absolute_import, print_function, unicode_literals\n\nimport os\nimport sys\nimport unittest\n\n# Allow interactive execution from CLI, cd tests; ./test_cli.py\nif __package__ is None:\n sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom ksconf.conf.parser import PARSECONF_LOOSE, parse_conf\nfrom ksconf.consts import EXIT_CODE_COMBINE_MARKER_MISSING, EXIT_CODE_SUCCESS\nfrom tests.cli_helper import TestWorkDir, ksconf_cli\n\n\nclass CliKsconfCombineTestCase(unittest.TestCase):\n\n def build_test01(self, twd):\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/10-upstream/props.conf\", r\"\"\"\n [aws:config]\n SHOULD_LINEMERGE = false\n TRUNCATE = 8388608\n TIME_PREFIX = configurationItemCaptureTime\"\\s*:\\s*\"\n TIME_FORMAT = %Y-%m-%dT%H:%M:%S.%3NZ\n TZ = GMT\n MAX_TIMESTAMP_LOOKAHEAD = 28\n KV_MODE = json\n ANNOTATE_PUNCT = false\n\n FIELDALIAS-dest = resourceType AS dest\n FIELDALIAS-object = resourceId AS object\n FIELDALIAS-object_id = ARN AS object_id\n EVAL-change_type = \"configuration\"\n EVAL-dvc = \"AWS Config\"\n EVAL-status=\"success\"\n LOOKUP-action= aws_config_action_lookup status AS configurationItemStatus OUTPUT action\n LOOKUP-object_category = aws_config_object_category_lookup type AS resourceType OUTPUT object_category\n\n # unify account ID field\n FIELDALIAS-aws-account-id = awsAccountId as aws_account_id\n FIELDALIAS-region-for-aws-config = awsRegion AS region\n \"\"\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/10-upstream/data/ui/nav/default.xml\", \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\")\n # In the future there will be a more efficient way to handle the global 'ANNOTATE_PUCT' scenario\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/20-corp/props.conf\", \"\"\"\n [aws:config]\n TZ = UTC\n # Corp want's punct to be enabled globally\n ANNOTATE_PUNCT = true\n \"\"\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/60-dept/props.conf\", \"\"\"\n [aws:config]\n # Our config is bigger than yours!\n TRUNCATE = 9999999\n \"\"\")\n\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/10-upstream/alert_actions.conf\", \"\"\"\n [aws_sns_modular_alert]\n is_custom = 1\n label = AWS SNS Alert\n description = Publish search result to AWS SNS\n payload_format = json\n icon_path = appIcon.png\n \"\"\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/60-dept/alert_actions.conf\", \"\"\"\n [aws_sns_modular_alert]\n param.account = DeptAwsAccount\n \"\"\")\n\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/60-dept/data/ui/nav/default.xml\", \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n <view name=\"search\" default=\"false\" label=\"Search\" />\n\n </nav>\n \"\"\")\n\n def test_combine_3dir(self):\n # Note that this test tests the old shool version of '*.d' processing. But we must preserve this behavior.\n # Be aware that we pass in 'default.d/*' as a string, and expand the glob vs allowing the shell to handle this\n # and this is _normal_ behavior when dealing with Windows.\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path(\"etc/apps/Splunk_TA_aws/default\")\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--dry-run\", \"--target\", default, default + \".d/*\")\n # Q: Why do we run this once, but not check anything about it? (To ensure dry-run has no side effects?)\n ko = ksconf_cli(\"combine\", \"--target\", default, default + \".d/*\")\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(twd.get_path(\"etc/apps/Splunk_TA_aws/default/props.conf\"))\n self.assertIn(\"aws:config\", cfg)\n self.assertEqual(cfg[\"aws:config\"][\"ANNOTATE_PUNCT\"], \"true\")\n self.assertEqual(cfg[\"aws:config\"][\"EVAL-change_type\"], '\"configuration\"')\n self.assertEqual(cfg[\"aws:config\"][\"TRUNCATE\"], '9999999')\n nav_content = twd.read_file(\"etc/apps/Splunk_TA_aws/default/data/ui/nav/default.xml\")\n self.assertIn(\"My custom view\", nav_content)\n\n twd.write_conf(\"etc/apps/Splunk_TA_aws/default.d/99-theforce/props.conf\", {\n \"aws:config\": {\"TIME_FORMAT\": \"%Y-%m-%dT%H:%M:%S.%6NZ\"}\n })\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/99-theforce/data/ui/nav/default.xml\", \"\"\"\n <nav search_view=\"search\" color=\"#65A637\">\n <view name=\"My custom view\" />\n <view name=\"Inputs\" default=\"true\" label=\"Inputs\" />\n <view name=\"Configuration\" default=\"false\" label=\"Configuration\" />\n </nav>\n \"\"\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default/data/dead.conf\", \"# File to remove\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default/data/tags.conf\", \"# Locally created file\")\n\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/99-blah/same.txt\", \"SAME TEXT\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default/same.txt\", \"SAME TEXT\")\n\n twd.write_file(\"etc/apps/Splunk_TA_aws/default.d/99-blah/binary.bin\", b\"#BINARY \\xff \\x00\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/default/binary.bin\", b\"#BINARY NEW \\x00 \\xff \\xFB\")\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--dry-run\", \"--target\", default, default + \".d/*\")\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n self.assertRegex(ko.stdout, r'[\\r\\n][-]\\s*<view name=\"search\"')\n self.assertRegex(ko.stdout, r'[\\r\\n][-] ?[\\r\\n]') # Remove empty lines from nav\n self.assertRegex(ko.stdout, r\"[\\r\\n][+]TIME_FORMAT = [^\\r\\n]+%6N\")\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--target\", default, default + \".d/*\")\n\n def test_sort_order(self):\n \"Confirm that single input files are copied as-is\"\n twd = TestWorkDir()\n default = twd.get_path(\"input\")\n target = twd.get_path(\"output\")\n unique_conf = [\n \"z = 1\",\n \" b=? \",\n \"a = 9\"]\n twd.write_file(\"input/unique.conf\",\n \"\\n\".join(unique_conf))\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--layer-method\", \"disable\", \"--banner\", \"\",\n \"--target\", target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n data = twd.read_file(\"output/unique.conf\").splitlines()\n self.assertListEqual(unique_conf, data)\n\n def test_combine_dird(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n default = twd.get_path(\"etc/apps/Splunk_TA_aws\")\n target = twd.get_path(\"etc/apps/Splunk_TA_aws-OUTPUT\")\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--layer-method\", \"dir.d\", \"--dry-run\", \"--target\", target, default)\n ko = ksconf_cli(\"combine\", \"--layer-method\", \"dir.d\", \"--target\", target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(target + \"/default/props.conf\")\n self.assertIn(\"aws:config\", cfg)\n self.assertEqual(cfg[\"aws:config\"][\"ANNOTATE_PUNCT\"], \"true\")\n self.assertEqual(cfg[\"aws:config\"][\"EVAL-change_type\"], '\"configuration\"')\n self.assertEqual(cfg[\"aws:config\"][\"TRUNCATE\"], '9999999')\n nav_content = twd.read_file(\"etc/apps/Splunk_TA_aws-OUTPUT/default/data/ui/nav/default.xml\")\n self.assertIn(\"My custom view\", nav_content)\n\n alert_action = twd.read_conf(\"etc/apps/Splunk_TA_aws-OUTPUT/default/alert_actions.conf\")\n self.assertIn(\"aws_sns_modular_alert\", alert_action)\n self.assertEqual(alert_action[\"aws_sns_modular_alert\"][\"param.account\"], \"DeptAwsAccount\") # layer 10\n self.assertEqual(alert_action[\"aws_sns_modular_alert\"][\"label\"], \"AWS SNS Alert\") # layer 60\n\n def test_keep_existing_ds_local_app(self):\n twd = TestWorkDir()\n src = twd.get_path(\"repo/apps/Splunk_TA_nix\")\n target = twd.get_path(\"etc/deployment-apps/Splunk_TA_nix\")\n\n twd.write_file(\"repo/apps/Splunk_TA_nix/default/app.conf\", r\"\"\"\n [install]\n allows_disable = false\n is_configured = true\n state = enabled\n\n [launcher]\n author = Splunk\n description = The app is Splunk\n version = 7.0.0\n \"\"\")\n # Make partent diretories\n os.makedirs(twd.get_path(\"etc/deployment-apps\"))\n\n # First run (creates maker file)\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--keep-existing\", \"local/app.conf\",\n \"--target\", target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n # Local folder hasn't been created yet\n self.assertFalse(os.path.isdir(twd.get_path(\"etc/deployment-apps/Splunk_TA_nix/local\")))\n\n # Simulate a 'splunk reload deploy-server'\n twd.write_file(\"etc/deployment-apps/Splunk_TA_nix/local/app.conf\", \"# Autogenerated file\")\n\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--keep-existing\", \"local/app.conf\",\n \"--target\", target, src)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n cfg = parse_conf(os.path.join(target, \"default/app.conf\"))\n self.assertIn(\"install\", cfg)\n self.assertEqual(cfg[\"launcher\"][\"version\"], \"7.0.0\")\n\n self.assertEqual(twd.read_file(\"etc/deployment-apps/Splunk_TA_nix/local/app.conf\"),\n \"# Autogenerated file\")\n\n # This time the file will be removed\n ko = ksconf_cli(\"combine\", \"--target\", target, src)\n self.assertFalse(os.path.isfile(twd.get_path(\"etc/deployment-apps/Splunk_TA_nix/local/app.conf\")),\n \"local/app.conf should have been removed.\")\n\n def test_combine_conf_spec(self):\n twd = TestWorkDir()\n self.build_test01(twd)\n\n twd.write_file(\"etc/apps/Splunk_TA_aws/README.d/10-upstream/custom_config.conf.spec\", r\"\"\"\n [<stanza_type1>]\n important_field = <str>\n * Some notes about the important field.\n * Required!\n disabled = <bool>\n \"\"\")\n twd.write_file(\"etc/apps/Splunk_TA_aws/README.d/60-dept/custom_config.conf.spec\", r\"\"\"\n [bookmark::<prefixed_stanza_type>]\n resource = <url>\n category = <str>\n * Label for organization\n disabled = <bool>\n \"\"\")\n\n default = twd.get_path(\"etc/apps/Splunk_TA_aws\")\n target = twd.get_path(\"etc/apps/Splunk_TA_aws-OUTPUT\")\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"--layer-method\", \"dir.d\", \"--target\", target, default)\n self.assertEqual(ko.returncode, EXIT_CODE_SUCCESS)\n\n spec_file = twd.get_path(\"etc/apps/Splunk_TA_aws-OUTPUT/README/custom_config.conf.spec\")\n spec = parse_conf(spec_file, profile=PARSECONF_LOOSE)\n\n self.assertIn(\"bookmark::<prefixed_stanza_type>\", spec)\n self.assertIn(\"<stanza_type1>\", spec)\n\n def test_require_arg(self):\n with ksconf_cli:\n ko = ksconf_cli(\"combine\", \"source-dir\")\n self.assertRegex(ko.stderr, \"Must provide [^\\r\\n]+--target\")\n\n def test_missing_marker(self):\n twd = TestWorkDir()\n twd.write_file(\"source-dir/someapp/default/blah.conf\", \"[entry]\\nboring=yes\\n\")\n twd.write_file(\"dest-dir/someapp/default/blah.conf\", \"[entry]\\nboring=yes\\n\")\n\n ko = ksconf_cli(\"combine\", twd.get_path(\"source-dir\"), \"--target\", twd.get_path(\"dest-dir\"))\n self.assertEqual(ko.returncode, EXIT_CODE_COMBINE_MARKER_MISSING)\n self.assertRegex(ko.stderr, r\".*Marker file missing\\b.*\")\n\n\nif __name__ == '__main__': # pragma: no cover\n unittest.main()\n", "step-ids": [ 7, 8, 10, 11, 12 ] }
[ 7, 8, 10, 11, 12 ]
<|reserved_special_token_0|> def isheadless(): if len(argv) > 1: if argv[1] == 'head': return False else: raise ValueError("optional arg must be : 'head'") return True <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def execute_script(InstaBot): InstaBot.get_unfollowers() def isheadless(): if len(argv) > 1: if argv[1] == 'head': return False else: raise ValueError("optional arg must be : 'head'") return True <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def execute_script(InstaBot): InstaBot.get_unfollowers() def isheadless(): if len(argv) > 1: if argv[1] == 'head': return False else: raise ValueError("optional arg must be : 'head'") return True if __name__ == '__main__': bot = None headless = isheadless() if headless: bot = InstaBot(username, pw, True) else: bot = InstaBot(username, pw) if bot.legal: execute_script(bot) bot.close_session() <|reserved_special_token_1|> from igbot import InstaBot from settings import username, pw from sys import argv def execute_script(InstaBot): InstaBot.get_unfollowers() def isheadless(): if len(argv) > 1: if argv[1] == 'head': return False else: raise ValueError("optional arg must be : 'head'") return True if __name__ == '__main__': bot = None headless = isheadless() if headless: bot = InstaBot(username, pw, True) else: bot = InstaBot(username, pw) if bot.legal: execute_script(bot) bot.close_session() <|reserved_special_token_1|> from igbot import InstaBot from settings import username, pw from sys import argv def execute_script(InstaBot): InstaBot.get_unfollowers() #InstaBot.unfollow() #InstaBot.follow() #InstaBot.remove_followers() def isheadless(): if len(argv) > 1: if argv[1] == 'head': return False else: raise ValueError("optional arg must be : 'head'") return True if __name__ == '__main__': bot = None headless = isheadless() if headless: bot = InstaBot(username, pw, True) else: bot = InstaBot(username, pw) if bot.legal: execute_script(bot) bot.close_session()
flexible
{ "blob_id": "f379092cefe83a0a449789fbc09af490081b00a4", "index": 3818, "step-1": "<mask token>\n\n\ndef isheadless():\n if len(argv) > 1:\n if argv[1] == 'head':\n return False\n else:\n raise ValueError(\"optional arg must be : 'head'\")\n return True\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef execute_script(InstaBot):\n InstaBot.get_unfollowers()\n\n\ndef isheadless():\n if len(argv) > 1:\n if argv[1] == 'head':\n return False\n else:\n raise ValueError(\"optional arg must be : 'head'\")\n return True\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef execute_script(InstaBot):\n InstaBot.get_unfollowers()\n\n\ndef isheadless():\n if len(argv) > 1:\n if argv[1] == 'head':\n return False\n else:\n raise ValueError(\"optional arg must be : 'head'\")\n return True\n\n\nif __name__ == '__main__':\n bot = None\n headless = isheadless()\n if headless:\n bot = InstaBot(username, pw, True)\n else:\n bot = InstaBot(username, pw)\n if bot.legal:\n execute_script(bot)\n bot.close_session()\n", "step-4": "from igbot import InstaBot\nfrom settings import username, pw\nfrom sys import argv\n\n\ndef execute_script(InstaBot):\n InstaBot.get_unfollowers()\n\n\ndef isheadless():\n if len(argv) > 1:\n if argv[1] == 'head':\n return False\n else:\n raise ValueError(\"optional arg must be : 'head'\")\n return True\n\n\nif __name__ == '__main__':\n bot = None\n headless = isheadless()\n if headless:\n bot = InstaBot(username, pw, True)\n else:\n bot = InstaBot(username, pw)\n if bot.legal:\n execute_script(bot)\n bot.close_session()\n", "step-5": "from igbot import InstaBot\nfrom settings import username, pw\nfrom sys import argv\n\ndef execute_script(InstaBot):\n\tInstaBot.get_unfollowers()\n\t#InstaBot.unfollow()\n\t#InstaBot.follow()\n\t#InstaBot.remove_followers()\n\ndef isheadless():\n\tif len(argv) > 1:\n\t\tif argv[1] == 'head':\n\t\t\treturn False\n\t\telse:\n\t\t\traise ValueError(\"optional arg must be : 'head'\")\n\treturn True\n\nif __name__ == '__main__':\n\tbot = None\n\theadless = isheadless()\n\tif headless:\n\t\tbot = InstaBot(username, pw, True)\n\telse:\n\t\tbot = InstaBot(username, pw)\n\n\tif bot.legal:\n\t\texecute_script(bot)\n\t\tbot.close_session()", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> def _build(_input, *nodes): x = _input for node in nodes: if callable(node): x = node(x) elif isinstance(node, list): x = [_build(x, branch) for branch in node] elif isinstance(node, tuple): x = _build(x, *node) else: x = node return x <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def _build(_input, *nodes): x = _input for node in nodes: if callable(node): x = node(x) elif isinstance(node, list): x = [_build(x, branch) for branch in node] elif isinstance(node, tuple): x = _build(x, *node) else: x = node return x <|reserved_special_token_0|> model.summary() <|reserved_special_token_1|> <|reserved_special_token_0|> os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' classes = ['normal cells', 'blasts', 'blasts_highSSC_granulocytes', 'blasts_highSSC_middle_ugly', 'blasts_highSSC_upper_dead'] num_classes = len(classes) image_size = 66 imagefiles = np.load('imagefiles_supplementary.npz') X_train = imagefiles['X_train'] X_test = imagefiles['X_test'] y_train = imagefiles['y_train'] y_test = imagefiles['y_test'] X_train = X_train.reshape((-1, image_size, image_size, 1)) X_test = X_test.reshape((-1, image_size, image_size, 1)) X_train = X_train.astype('float32') X_test = X_test.astype('float32') y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) def _build(_input, *nodes): x = _input for node in nodes: if callable(node): x = node(x) elif isinstance(node, list): x = [_build(x, branch) for branch in node] elif isinstance(node, tuple): x = _build(x, *node) else: x = node return x _input = Input(X_train.shape[1:]) output = _build(_input, [(Conv2D(96, (3, 3), strides=(2, 2)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3, 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3, 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3, 3), strides=(2, 2))], Add(), [(Conv2D(112, (1, 1), strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu')), (Conv2D(48, (3, 3), strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon= 0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu'))], Add(), [MaxPooling2D(pool_size=(2, 2), strides= None, padding='valid', data_format=None), Flatten(), Dense(256, activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax')] ) model = Model(_input, output) model.summary() <|reserved_special_token_1|> import os import numpy as np from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Flatten, concatenate from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation from keras.layers.normalization import BatchNormalization from keras.optimizers import SGD from keras.utils import np_utils import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, f1_score import seaborn as sns from keras.layers import Input, Dense, Add, Multiply os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' classes = ['normal cells', 'blasts', 'blasts_highSSC_granulocytes', 'blasts_highSSC_middle_ugly', 'blasts_highSSC_upper_dead'] num_classes = len(classes) image_size = 66 imagefiles = np.load('imagefiles_supplementary.npz') X_train = imagefiles['X_train'] X_test = imagefiles['X_test'] y_train = imagefiles['y_train'] y_test = imagefiles['y_test'] X_train = X_train.reshape((-1, image_size, image_size, 1)) X_test = X_test.reshape((-1, image_size, image_size, 1)) X_train = X_train.astype('float32') X_test = X_test.astype('float32') y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) def _build(_input, *nodes): x = _input for node in nodes: if callable(node): x = node(x) elif isinstance(node, list): x = [_build(x, branch) for branch in node] elif isinstance(node, tuple): x = _build(x, *node) else: x = node return x _input = Input(X_train.shape[1:]) output = _build(_input, [(Conv2D(96, (3, 3), strides=(2, 2)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3, 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3, 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3, 3), strides=(2, 2))], Add(), [(Conv2D(112, (1, 1), strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu')), (Conv2D(48, (3, 3), strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon= 0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu'))], Add(), [MaxPooling2D(pool_size=(2, 2), strides= None, padding='valid', data_format=None), Flatten(), Dense(256, activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax')] ) model = Model(_input, output) model.summary() <|reserved_special_token_1|> import os import numpy as np from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Flatten, concatenate from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation from keras.layers.normalization import BatchNormalization from keras.optimizers import SGD from keras.utils import np_utils import matplotlib.pyplot as plt from sklearn.metrics import confusion_matrix, f1_score import seaborn as sns from keras.layers import Input, Dense, Add, Multiply # macOS特有の警告文を非表示(GPUがないからCPUでやるときに出る) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # パラメータの初期化 classes = [ "normal cells", "blasts", "blasts_highSSC_granulocytes", "blasts_highSSC_middle_ugly", "blasts_highSSC_upper_dead", ] num_classes = len(classes) image_size = 66 # データの読み込み imagefiles = np.load("imagefiles_supplementary.npz") X_train = imagefiles['X_train'] X_test = imagefiles['X_test'] y_train = imagefiles['y_train'] y_test = imagefiles['y_test'] # グレースケール画像をCNNに入力するための次元操作 X_train = X_train.reshape((-1, image_size, image_size, 1)) X_test = X_test.reshape((-1, image_size, image_size, 1)) # データの正規化 X_train = X_train.astype("float32") X_test = X_test.astype("float32") # OneHotVector化する(正解ラベルの位置に1がつく) y_train = np_utils.to_categorical(y_train, num_classes) y_test = np_utils.to_categorical(y_test, num_classes) def _build(_input, *nodes): x = _input for node in nodes: if callable(node): x = node(x) elif isinstance(node, list): x = [_build(x, branch) for branch in node] elif isinstance(node, tuple): x = _build(x, *node) else: x = node return x _input = Input(X_train.shape[1:]) output = _build( _input, # Reduction dual-path module×3の定義 # --------------------------- # 畳み込み層の追加(96:フィルタ数) # バッチ正規化 # 活性化関数:ReLu # --------------------------- # MaxPooling # --------------------------- # Reduction dual-path module1 [(Conv2D(96, (3, 3), strides=(2, 2)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3, 3), strides=(2, 2))], # Reduction dual-path module2 Add(), [(Conv2D(96, (3, 3), strides=(2, 2)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3, 3), strides=(2, 2))], # Reduction dual-path module3 Add(), [(Conv2D(96, (3, 3), strides=(2, 2)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3, 3), strides=(2, 2))], # Dual-path modules×10の定義 # --------------------------- # 畳み込み層の追加(112:フィルタ数) # バッチ正規化 # 活性化関数:ReLu # --------------------------- # Dual-path modules2の定義 # 畳み込み層の追加(48:フィルタ数) # バッチ正規化 # 活性化関数:ReLu # --------------------------- # Dual-path modules1 Add(), [(Conv2D(112, (1, 1), strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu'), ), (Conv2D(48, (3, 3), strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None), Activation('relu'), )], # # Dual-path modules2 # Add(), # [(Conv2D(112, (1, 1), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu')), # (Conv2D(48, (3, 3), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu'))], # # Dual-path modules3 # Add(), # [(Conv2D(112, (1, 1), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu')), # (Conv2D(48, (3, 3), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu'))], # # Dual-path modules4 # Add(), # [(Conv2D(112, (1, 1), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu')), # (Conv2D(48, (3, 3), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu'))], # # Dual-path modules5 # Add(), # [(Conv2D(112, (1, 1), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu')), # (Conv2D(48, (3, 3), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu'))], # # Dual-path modules6 # Add(), # [(Conv2D(112, (1, 1), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu')), # (Conv2D(48, (3, 3), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu'))], # # Dual-path modules7 # Add(), # [(Conv2D(112, (1, 1), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu')), # (Conv2D(48, (3, 3), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu'))], # # Dual-path modules8 # Add(), # [(Conv2D(112, (1, 1), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu')), # (Conv2D(48, (3, 3), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu'))], # # Dual-path modules9 # Add(), # [(Conv2D(112, (1, 1), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu')), # (Conv2D(48, (3, 3), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu'))], # # Dual-path modules10 # Add(), # [(Conv2D(112, (1, 1), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu')), # (Conv2D(48, (3, 3), strides=(1, 1)), # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True, # beta_initializer='zeros', gamma_initializer='ones', # moving_mean_initializer='zeros', moving_variance_initializer='ones', # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, # gamma_constraint=None), # Activation('relu'))], # 全結合 Add(), [MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None), Flatten(), Dense(256, activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax') ] ) model = Model(_input, output) model.summary() # # 損失関数の設定 # opt = SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False) # model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) # # # トレーニングの実施 # # 学習 # print("start training") # hist = model.fit(X_train, y_train, batch_size=32, epochs=30, validation_data=(X_test, y_test)) # # 評価 # print("start eval") # score = model.evaluate(X_test, y_test, batch_size=32, verbose=1) # verbose:途中結果表示 # print('Test Loss: ', score[0]) # print('Test Accuracy: ', score[1]) # # model.save('leukemia_cnn_supplementary.h5') # # # 学習の様子をグラフへ描画 # # 正解率の推移をプロット # fig = plt.figure() # plt.plot(hist.history['accuracy']) # plt.plot(hist.history['val_accuracy']) # plt.title('Accuracy') # plt.legend(['train', 'test'], loc='upper left') # fig.savefig('result/cnn_supplementary/cnn_accuracy_supplementary.png') # plt.close() # # ロスの推移をプロット # fig = plt.figure() # plt.plot(hist.history['loss']) # plt.plot(hist.history['val_loss']) # plt.title('Loss') # plt.legend(['train', 'test'], loc='upper left') # fig.savefig('result/cnn_supplementary/cnn_loss_supplementary.png') # plt.close() # # Confusion matrix作成 # plt.figure() # y_pred = model.predict(X_test) # y_test = imagefiles['y_test'] # one hot vector化されているのでロードし直す # cm = confusion_matrix(y_test, np.argmax(y_pred, axis=1)) # ticklabels = ["blasts_highSSC_granulocytes", # "blasts_highSSC_middle_ugly", # "blasts", # "normal cells", # "blasts_highSSC_upper_dead"] # sns.heatmap(cm, annot=True, cmap='Blues', yticklabels=ticklabels, xticklabels=ticklabels) # plt.ylabel("Correct") # plt.xlabel("Prediction") # plt.tight_layout() # plt.savefig('result/cnn_supplementary/confusion_matrix_cnn_supplementary.png') # plt.close() # # # F1 micro/macro # f1_macro = f1_score(y_test, np.argmax(y_pred, axis=1), average="macro") # f1_micro = f1_score(y_test, np.argmax(y_pred, axis=1), average="micro") # print(f"f1_macro:{f1_macro}") # print(f"f1_miro:{f1_micro}")
flexible
{ "blob_id": "ebc050544da69837cc2b8977f347380b94474bab", "index": 576, "step-1": "<mask token>\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n<mask token>\nmodel.summary()\n", "step-3": "<mask token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nclasses = ['normal cells', 'blasts', 'blasts_highSSC_granulocytes',\n 'blasts_highSSC_middle_ugly', 'blasts_highSSC_upper_dead']\nnum_classes = len(classes)\nimage_size = 66\nimagefiles = np.load('imagefiles_supplementary.npz')\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(_input, [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), (Conv2D(48, (3, 3),\n strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=\n 0.001, center=True, scale=True, beta_initializer='zeros',\n gamma_initializer='ones', moving_mean_initializer='zeros',\n moving_variance_initializer='ones', beta_regularizer=None,\n gamma_regularizer=None, beta_constraint=None, gamma_constraint=None),\n Activation('relu'))], Add(), [MaxPooling2D(pool_size=(2, 2), strides=\n None, padding='valid', data_format=None), Flatten(), Dense(256,\n activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax')]\n )\nmodel = Model(_input, output)\nmodel.summary()\n", "step-4": "import os\nimport numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, concatenate\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, f1_score\nimport seaborn as sns\nfrom keras.layers import Input, Dense, Add, Multiply\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nclasses = ['normal cells', 'blasts', 'blasts_highSSC_granulocytes',\n 'blasts_highSSC_middle_ugly', 'blasts_highSSC_upper_dead']\nnum_classes = len(classes)\nimage_size = 66\nimagefiles = np.load('imagefiles_supplementary.npz')\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(_input, [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), (Conv2D(48, (3, 3),\n strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=\n 0.001, center=True, scale=True, beta_initializer='zeros',\n gamma_initializer='ones', moving_mean_initializer='zeros',\n moving_variance_initializer='ones', beta_regularizer=None,\n gamma_regularizer=None, beta_constraint=None, gamma_constraint=None),\n Activation('relu'))], Add(), [MaxPooling2D(pool_size=(2, 2), strides=\n None, padding='valid', data_format=None), Flatten(), Dense(256,\n activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax')]\n )\nmodel = Model(_input, output)\nmodel.summary()\n", "step-5": "import os\n\n\nimport numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, concatenate\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, f1_score\nimport seaborn as sns\nfrom keras.layers import Input, Dense, Add, Multiply\n\n# macOS特有の警告文を非表示(GPUがないからCPUでやるときに出る)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# パラメータの初期化\nclasses = [\n \"normal cells\",\n \"blasts\",\n \"blasts_highSSC_granulocytes\",\n \"blasts_highSSC_middle_ugly\",\n \"blasts_highSSC_upper_dead\",\n]\nnum_classes = len(classes)\nimage_size = 66\n\n# データの読み込み\nimagefiles = np.load(\"imagefiles_supplementary.npz\")\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\n# グレースケール画像をCNNに入力するための次元操作\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\n# データの正規化\nX_train = X_train.astype(\"float32\")\nX_test = X_test.astype(\"float32\")\n# OneHotVector化する(正解ラベルの位置に1がつく)\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(\n _input,\n # Reduction dual-path module×3の定義\n # ---------------------------\n # 畳み込み層の追加(96:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # MaxPooling\n # ---------------------------\n # Reduction dual-path module1\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n # Reduction dual-path module2\n Add(),\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n # Reduction dual-path module3\n Add(),\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n\n # Dual-path modules×10の定義\n # ---------------------------\n # 畳み込み層の追加(112:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # Dual-path modules2の定義\n # 畳み込み層の追加(48:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # Dual-path modules1\n Add(),\n [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu'),\n ),\n (Conv2D(48, (3, 3), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu'),\n )],\n # # Dual-path modules2\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules3\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules4\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules5\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules6\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules7\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules8\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules9\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules10\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # 全結合\n Add(),\n [MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None),\n Flatten(),\n Dense(256, activation='relu'),\n Dropout(0.5),\n Dense(num_classes, activation='softmax')\n ]\n)\nmodel = Model(_input, output)\nmodel.summary()\n\n# # 損失関数の設定\n# opt = SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)\n# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n#\n# # トレーニングの実施\n# # 学習\n# print(\"start training\")\n# hist = model.fit(X_train, y_train, batch_size=32, epochs=30, validation_data=(X_test, y_test))\n# # 評価\n# print(\"start eval\")\n# score = model.evaluate(X_test, y_test, batch_size=32, verbose=1) # verbose:途中結果表示\n# print('Test Loss: ', score[0])\n# print('Test Accuracy: ', score[1])\n#\n# model.save('leukemia_cnn_supplementary.h5')\n#\n# # 学習の様子をグラフへ描画\n# # 正解率の推移をプロット\n# fig = plt.figure()\n# plt.plot(hist.history['accuracy'])\n# plt.plot(hist.history['val_accuracy'])\n# plt.title('Accuracy')\n# plt.legend(['train', 'test'], loc='upper left')\n# fig.savefig('result/cnn_supplementary/cnn_accuracy_supplementary.png')\n# plt.close()\n# # ロスの推移をプロット\n# fig = plt.figure()\n# plt.plot(hist.history['loss'])\n# plt.plot(hist.history['val_loss'])\n# plt.title('Loss')\n# plt.legend(['train', 'test'], loc='upper left')\n# fig.savefig('result/cnn_supplementary/cnn_loss_supplementary.png')\n# plt.close()\n# # Confusion matrix作成\n# plt.figure()\n# y_pred = model.predict(X_test)\n# y_test = imagefiles['y_test'] # one hot vector化されているのでロードし直す\n# cm = confusion_matrix(y_test, np.argmax(y_pred, axis=1))\n# ticklabels = [\"blasts_highSSC_granulocytes\",\n# \"blasts_highSSC_middle_ugly\",\n# \"blasts\",\n# \"normal cells\",\n# \"blasts_highSSC_upper_dead\"]\n# sns.heatmap(cm, annot=True, cmap='Blues', yticklabels=ticklabels, xticklabels=ticklabels)\n# plt.ylabel(\"Correct\")\n# plt.xlabel(\"Prediction\")\n# plt.tight_layout()\n# plt.savefig('result/cnn_supplementary/confusion_matrix_cnn_supplementary.png')\n# plt.close()\n#\n# # F1 micro/macro\n# f1_macro = f1_score(y_test, np.argmax(y_pred, axis=1), average=\"macro\")\n# f1_micro = f1_score(y_test, np.argmax(y_pred, axis=1), average=\"micro\")\n# print(f\"f1_macro:{f1_macro}\")\n# print(f\"f1_miro:{f1_micro}\")\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> setup(name='gromacsplotter', version='0.1', description= 'Read xvg files created with gromacs for plotting with matplotlib', url ='', author='Ilyas Kuhlemann', author_email='ilyasp.ku@gmail.com', license='MIT', packages=['gromacsplotter'], scripts=[], install_requires=['numpy', 'matplotlib'], entry_points={ 'console_scripts': [ 'gromacsplotter = gromacsplotter.plot_xvg_data:main']}, zip_safe=False) <|reserved_special_token_1|> <|reserved_special_token_0|> from setuptools import setup setup(name='gromacsplotter', version='0.1', description= 'Read xvg files created with gromacs for plotting with matplotlib', url ='', author='Ilyas Kuhlemann', author_email='ilyasp.ku@gmail.com', license='MIT', packages=['gromacsplotter'], scripts=[], install_requires=['numpy', 'matplotlib'], entry_points={ 'console_scripts': [ 'gromacsplotter = gromacsplotter.plot_xvg_data:main']}, zip_safe=False) <|reserved_special_token_1|> """ USAGE: o install in develop mode: navigate to the folder containing this file, and type 'python setup.py develop --user'. (ommit '--user' if you want to install for all users) """ from setuptools import setup setup(name='gromacsplotter', version='0.1', description='Read xvg files created with gromacs for plotting with matplotlib', url='', author='Ilyas Kuhlemann', author_email='ilyasp.ku@gmail.com', license='MIT', packages=["gromacsplotter"], scripts=[], install_requires=['numpy', "matplotlib"], entry_points = { 'console_scripts': ["gromacsplotter = gromacsplotter.plot_xvg_data:main"] }, zip_safe=False)
flexible
{ "blob_id": "cfa862988edf9d70aa5e975cca58b4e61a4de847", "index": 759, "step-1": "<mask token>\n", "step-2": "<mask token>\nsetup(name='gromacsplotter', version='0.1', description=\n 'Read xvg files created with gromacs for plotting with matplotlib', url\n ='', author='Ilyas Kuhlemann', author_email='ilyasp.ku@gmail.com',\n license='MIT', packages=['gromacsplotter'], scripts=[],\n install_requires=['numpy', 'matplotlib'], entry_points={\n 'console_scripts': [\n 'gromacsplotter = gromacsplotter.plot_xvg_data:main']}, zip_safe=False)\n", "step-3": "<mask token>\nfrom setuptools import setup\nsetup(name='gromacsplotter', version='0.1', description=\n 'Read xvg files created with gromacs for plotting with matplotlib', url\n ='', author='Ilyas Kuhlemann', author_email='ilyasp.ku@gmail.com',\n license='MIT', packages=['gromacsplotter'], scripts=[],\n install_requires=['numpy', 'matplotlib'], entry_points={\n 'console_scripts': [\n 'gromacsplotter = gromacsplotter.plot_xvg_data:main']}, zip_safe=False)\n", "step-4": "\"\"\"\nUSAGE: \n o install in develop mode: navigate to the folder containing this file,\n and type 'python setup.py develop --user'.\n (ommit '--user' if you want to install for \n all users) \n\"\"\"\n\n\nfrom setuptools import setup\n\nsetup(name='gromacsplotter',\n version='0.1',\n description='Read xvg files created with gromacs for plotting with matplotlib',\n url='',\n author='Ilyas Kuhlemann',\n author_email='ilyasp.ku@gmail.com',\n license='MIT',\n packages=[\"gromacsplotter\"],\n scripts=[],\n install_requires=['numpy',\n \"matplotlib\"],\n entry_points = {\n 'console_scripts': [\"gromacsplotter = gromacsplotter.plot_xvg_data:main\"]\n },\n zip_safe=False)\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import ctypes from game import GameWindow import start_window as m_window def start_button_callback(obj, w, h, amount): _max = int(w.get()) * int(h.get()) if not (obj.validation_check(w) and obj.validation_check(h) and obj.validation_check(amount, _max)): ctypes.windll.user32.MessageBoxW(0, "Wprowadź poprawne dane", "Błąd", 1) return False else: obj.exit() game = GameWindow(int(w.get()), int(h.get()), int(amount.get())) game.start_game() return True def main(): main_window = m_window.MainWindow() main_window.init(start_button_callback).mainloop() if __name__ == '__main__': main()
normal
{ "blob_id": "65eb7d01ccea137605d54d816b707c2cd3709931", "index": 2067, "step-1": "<mask token>\n\n\ndef start_button_callback(obj, w, h, amount):\n _max = int(w.get()) * int(h.get())\n if not (obj.validation_check(w) and obj.validation_check(h) and obj.\n validation_check(amount, _max)):\n ctypes.windll.user32.MessageBoxW(0, 'Wprowadź poprawne dane', 'Błąd', 1\n )\n return False\n else:\n obj.exit()\n game = GameWindow(int(w.get()), int(h.get()), int(amount.get()))\n game.start_game()\n return True\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef start_button_callback(obj, w, h, amount):\n _max = int(w.get()) * int(h.get())\n if not (obj.validation_check(w) and obj.validation_check(h) and obj.\n validation_check(amount, _max)):\n ctypes.windll.user32.MessageBoxW(0, 'Wprowadź poprawne dane', 'Błąd', 1\n )\n return False\n else:\n obj.exit()\n game = GameWindow(int(w.get()), int(h.get()), int(amount.get()))\n game.start_game()\n return True\n\n\ndef main():\n main_window = m_window.MainWindow()\n main_window.init(start_button_callback).mainloop()\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef start_button_callback(obj, w, h, amount):\n _max = int(w.get()) * int(h.get())\n if not (obj.validation_check(w) and obj.validation_check(h) and obj.\n validation_check(amount, _max)):\n ctypes.windll.user32.MessageBoxW(0, 'Wprowadź poprawne dane', 'Błąd', 1\n )\n return False\n else:\n obj.exit()\n game = GameWindow(int(w.get()), int(h.get()), int(amount.get()))\n game.start_game()\n return True\n\n\ndef main():\n main_window = m_window.MainWindow()\n main_window.init(start_button_callback).mainloop()\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import ctypes\nfrom game import GameWindow\nimport start_window as m_window\n\n\ndef start_button_callback(obj, w, h, amount):\n _max = int(w.get()) * int(h.get())\n if not (obj.validation_check(w) and obj.validation_check(h) and obj.\n validation_check(amount, _max)):\n ctypes.windll.user32.MessageBoxW(0, 'Wprowadź poprawne dane', 'Błąd', 1\n )\n return False\n else:\n obj.exit()\n game = GameWindow(int(w.get()), int(h.get()), int(amount.get()))\n game.start_game()\n return True\n\n\ndef main():\n main_window = m_window.MainWindow()\n main_window.init(start_button_callback).mainloop()\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import ctypes\n\nfrom game import GameWindow\nimport start_window as m_window\n\n\ndef start_button_callback(obj, w, h, amount):\n _max = int(w.get()) * int(h.get())\n if not (obj.validation_check(w) and obj.validation_check(h) and obj.validation_check(amount, _max)):\n ctypes.windll.user32.MessageBoxW(0, \"Wprowadź poprawne dane\", \"Błąd\", 1)\n return False\n else:\n obj.exit()\n game = GameWindow(int(w.get()), int(h.get()), int(amount.get()))\n game.start_game()\n return True\n\n\ndef main():\n main_window = m_window.MainWindow()\n main_window.init(start_button_callback).mainloop()\n\n\nif __name__ == '__main__':\n main()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for i in range(img.height): for j in range(img.width): pixel_val = pixels[i][j] color_idx = None if saturation_color == 'R': color_idx = 0 elif saturation_color == 'G': color_idx = 1 elif saturation_color == 'B': color_idx = 2 color_val = pixel_val[color_idx] + saturation_modifier if color_val > 255: color_val = 255 pixel_list = list(pixel_val) pixel_list[color_idx] = color_val pixels[i][j] = tuple(pixel_list) savePixelsToImage(editedFilePath(file_name, 'saturated'), pixels) <|reserved_special_token_1|> <|reserved_special_token_0|> file_name = sys.argv[1] saturation_color = sys.argv[2] saturation_modifier = int(sys.argv[3]) img = getImage(file_name) pixels = pixelValues(img) for i in range(img.height): for j in range(img.width): pixel_val = pixels[i][j] color_idx = None if saturation_color == 'R': color_idx = 0 elif saturation_color == 'G': color_idx = 1 elif saturation_color == 'B': color_idx = 2 color_val = pixel_val[color_idx] + saturation_modifier if color_val > 255: color_val = 255 pixel_list = list(pixel_val) pixel_list[color_idx] = color_val pixels[i][j] = tuple(pixel_list) savePixelsToImage(editedFilePath(file_name, 'saturated'), pixels) <|reserved_special_token_1|> import sys from PIL import Image from pr_common import * file_name = sys.argv[1] saturation_color = sys.argv[2] saturation_modifier = int(sys.argv[3]) img = getImage(file_name) pixels = pixelValues(img) for i in range(img.height): for j in range(img.width): pixel_val = pixels[i][j] color_idx = None if saturation_color == 'R': color_idx = 0 elif saturation_color == 'G': color_idx = 1 elif saturation_color == 'B': color_idx = 2 color_val = pixel_val[color_idx] + saturation_modifier if color_val > 255: color_val = 255 pixel_list = list(pixel_val) pixel_list[color_idx] = color_val pixels[i][j] = tuple(pixel_list) savePixelsToImage(editedFilePath(file_name, 'saturated'), pixels) <|reserved_special_token_1|> import sys from PIL import Image from pr_common import * file_name = sys.argv[1] saturation_color = sys.argv[2] saturation_modifier = int(sys.argv[3]) img = getImage(file_name) pixels = pixelValues(img) for i in range(img.height): for j in range(img.width): pixel_val = pixels[i][j] color_idx = None if (saturation_color == "R"): color_idx = 0 elif (saturation_color == "G"): color_idx = 1 elif (saturation_color == "B"): color_idx = 2 color_val = pixel_val[color_idx] + saturation_modifier if (color_val > 255): color_val = 255 pixel_list = list(pixel_val) pixel_list[color_idx] = color_val pixels[i][j] = tuple(pixel_list) savePixelsToImage(editedFilePath(file_name, "saturated"), pixels)
flexible
{ "blob_id": "96ef95d8997eeab3d85a1bb6e4f8c86c9bfbb0a2", "index": 4732, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor i in range(img.height):\n for j in range(img.width):\n pixel_val = pixels[i][j]\n color_idx = None\n if saturation_color == 'R':\n color_idx = 0\n elif saturation_color == 'G':\n color_idx = 1\n elif saturation_color == 'B':\n color_idx = 2\n color_val = pixel_val[color_idx] + saturation_modifier\n if color_val > 255:\n color_val = 255\n pixel_list = list(pixel_val)\n pixel_list[color_idx] = color_val\n pixels[i][j] = tuple(pixel_list)\nsavePixelsToImage(editedFilePath(file_name, 'saturated'), pixels)\n", "step-3": "<mask token>\nfile_name = sys.argv[1]\nsaturation_color = sys.argv[2]\nsaturation_modifier = int(sys.argv[3])\nimg = getImage(file_name)\npixels = pixelValues(img)\nfor i in range(img.height):\n for j in range(img.width):\n pixel_val = pixels[i][j]\n color_idx = None\n if saturation_color == 'R':\n color_idx = 0\n elif saturation_color == 'G':\n color_idx = 1\n elif saturation_color == 'B':\n color_idx = 2\n color_val = pixel_val[color_idx] + saturation_modifier\n if color_val > 255:\n color_val = 255\n pixel_list = list(pixel_val)\n pixel_list[color_idx] = color_val\n pixels[i][j] = tuple(pixel_list)\nsavePixelsToImage(editedFilePath(file_name, 'saturated'), pixels)\n", "step-4": "import sys\nfrom PIL import Image\nfrom pr_common import *\nfile_name = sys.argv[1]\nsaturation_color = sys.argv[2]\nsaturation_modifier = int(sys.argv[3])\nimg = getImage(file_name)\npixels = pixelValues(img)\nfor i in range(img.height):\n for j in range(img.width):\n pixel_val = pixels[i][j]\n color_idx = None\n if saturation_color == 'R':\n color_idx = 0\n elif saturation_color == 'G':\n color_idx = 1\n elif saturation_color == 'B':\n color_idx = 2\n color_val = pixel_val[color_idx] + saturation_modifier\n if color_val > 255:\n color_val = 255\n pixel_list = list(pixel_val)\n pixel_list[color_idx] = color_val\n pixels[i][j] = tuple(pixel_list)\nsavePixelsToImage(editedFilePath(file_name, 'saturated'), pixels)\n", "step-5": "import sys\nfrom PIL import Image\nfrom pr_common import *\n\nfile_name = sys.argv[1]\nsaturation_color = sys.argv[2]\nsaturation_modifier = int(sys.argv[3])\n\nimg = getImage(file_name)\npixels = pixelValues(img)\n\nfor i in range(img.height):\n for j in range(img.width):\n pixel_val = pixels[i][j]\n color_idx = None\n\n if (saturation_color == \"R\"):\n color_idx = 0\n elif (saturation_color == \"G\"):\n color_idx = 1\n elif (saturation_color == \"B\"):\n color_idx = 2\n\n color_val = pixel_val[color_idx] + saturation_modifier\n \n if (color_val > 255):\n color_val = 255\n \n pixel_list = list(pixel_val)\n pixel_list[color_idx] = color_val\n pixels[i][j] = tuple(pixel_list)\n\nsavePixelsToImage(editedFilePath(file_name, \"saturated\"), pixels)\n ", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
from pydis.datastruct.sds import SdsImp class RPCStub(object): def __init__(self): pass def SET(self, key, value): self print("{}: {}".format(key, value))
normal
{ "blob_id": "74f85732b4e1f4ef2b82a48818cbaedb18a56083", "index": 8122, "step-1": "<mask token>\n\n\nclass RPCStub(object):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass RPCStub(object):\n\n def __init__(self):\n pass\n <mask token>\n", "step-3": "<mask token>\n\n\nclass RPCStub(object):\n\n def __init__(self):\n pass\n\n def SET(self, key, value):\n self\n print('{}: {}'.format(key, value))\n", "step-4": "from pydis.datastruct.sds import SdsImp\n\n\nclass RPCStub(object):\n\n def __init__(self):\n pass\n\n def SET(self, key, value):\n self\n print('{}: {}'.format(key, value))\n", "step-5": "from pydis.datastruct.sds import SdsImp\n\n\nclass RPCStub(object):\n def __init__(self):\n pass\n\n def SET(self, key, value):\n self\n print(\"{}: {}\".format(key, value))\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
class UnknownResponseFormat(Exception): pass
normal
{ "blob_id": "e5e460eb704e2ab5f747d1beee05e012ea95fbd2", "index": 3871, "step-1": "<mask token>\n", "step-2": "class UnknownResponseFormat(Exception):\n pass\n", "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0, 1 ] }
[ 0, 1 ]
import numpy as np class LayerBase(object): def __init__(self, units_count, activation_func): self.current_layer_dim = units_count self.activation_func = activation_func self.weights = None self.bias = None self.pre_activation = None self.activation_layer = None self.activation = None self.d_weights = None self.d_bias = None self.d_activation = None def __if_params_not_initialized(self): return (self.weights is None) or (self.bias is None) def __init_parameters(self, size_of_previous_layer): self.weights = np.random.randn(self.current_layer_dim, size_of_previous_layer) \ * np.sqrt(2. / size_of_previous_layer) self.bias = np.zeros((self.current_layer_dim, 1)) def __forward_linear(self): if self.__if_params_not_initialized(): self.__init_parameters(self.activation.shape[0]) self.pre_activation = self.weights.dot(self.activation) + self.bias def forward(self, activation): self.activation = activation self.__forward_linear() self.activation_layer = self.activation_func.forward(self.pre_activation) return self.activation_layer def __backward_linear(self, d_pre_activation): m = self.activation.shape[1] self.d_weights = 1. / m * np.dot(d_pre_activation, self.activation.T) self.d_bias = 1. / m * np.sum(d_pre_activation, axis=1, keepdims=True) self.d_activation = np.dot(self.weights.T, d_pre_activation) def backward(self, dA): dZ = self.activation_func.backward(self.pre_activation, dA) self.__backward_linear(dZ) return self.d_activation def get_parameters_slope(self): return self.d_weights, self.d_bias def update_parameters(self, delta_weights, delta_bias): self.weights -= delta_weights self.bias -= delta_bias
normal
{ "blob_id": "389ccddcbe2214ae5c012bc82a404a81942792d8", "index": 1770, "step-1": "<mask token>\n\n\nclass LayerBase(object):\n\n def __init__(self, units_count, activation_func):\n self.current_layer_dim = units_count\n self.activation_func = activation_func\n self.weights = None\n self.bias = None\n self.pre_activation = None\n self.activation_layer = None\n self.activation = None\n self.d_weights = None\n self.d_bias = None\n self.d_activation = None\n\n def __if_params_not_initialized(self):\n return self.weights is None or self.bias is None\n <mask token>\n\n def __forward_linear(self):\n if self.__if_params_not_initialized():\n self.__init_parameters(self.activation.shape[0])\n self.pre_activation = self.weights.dot(self.activation) + self.bias\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass LayerBase(object):\n\n def __init__(self, units_count, activation_func):\n self.current_layer_dim = units_count\n self.activation_func = activation_func\n self.weights = None\n self.bias = None\n self.pre_activation = None\n self.activation_layer = None\n self.activation = None\n self.d_weights = None\n self.d_bias = None\n self.d_activation = None\n\n def __if_params_not_initialized(self):\n return self.weights is None or self.bias is None\n <mask token>\n\n def __forward_linear(self):\n if self.__if_params_not_initialized():\n self.__init_parameters(self.activation.shape[0])\n self.pre_activation = self.weights.dot(self.activation) + self.bias\n\n def forward(self, activation):\n self.activation = activation\n self.__forward_linear()\n self.activation_layer = self.activation_func.forward(self.\n pre_activation)\n return self.activation_layer\n <mask token>\n\n def backward(self, dA):\n dZ = self.activation_func.backward(self.pre_activation, dA)\n self.__backward_linear(dZ)\n return self.d_activation\n\n def get_parameters_slope(self):\n return self.d_weights, self.d_bias\n <mask token>\n", "step-3": "<mask token>\n\n\nclass LayerBase(object):\n\n def __init__(self, units_count, activation_func):\n self.current_layer_dim = units_count\n self.activation_func = activation_func\n self.weights = None\n self.bias = None\n self.pre_activation = None\n self.activation_layer = None\n self.activation = None\n self.d_weights = None\n self.d_bias = None\n self.d_activation = None\n\n def __if_params_not_initialized(self):\n return self.weights is None or self.bias is None\n\n def __init_parameters(self, size_of_previous_layer):\n self.weights = np.random.randn(self.current_layer_dim,\n size_of_previous_layer) * np.sqrt(2.0 / size_of_previous_layer)\n self.bias = np.zeros((self.current_layer_dim, 1))\n\n def __forward_linear(self):\n if self.__if_params_not_initialized():\n self.__init_parameters(self.activation.shape[0])\n self.pre_activation = self.weights.dot(self.activation) + self.bias\n\n def forward(self, activation):\n self.activation = activation\n self.__forward_linear()\n self.activation_layer = self.activation_func.forward(self.\n pre_activation)\n return self.activation_layer\n\n def __backward_linear(self, d_pre_activation):\n m = self.activation.shape[1]\n self.d_weights = 1.0 / m * np.dot(d_pre_activation, self.activation.T)\n self.d_bias = 1.0 / m * np.sum(d_pre_activation, axis=1, keepdims=True)\n self.d_activation = np.dot(self.weights.T, d_pre_activation)\n\n def backward(self, dA):\n dZ = self.activation_func.backward(self.pre_activation, dA)\n self.__backward_linear(dZ)\n return self.d_activation\n\n def get_parameters_slope(self):\n return self.d_weights, self.d_bias\n <mask token>\n", "step-4": "<mask token>\n\n\nclass LayerBase(object):\n\n def __init__(self, units_count, activation_func):\n self.current_layer_dim = units_count\n self.activation_func = activation_func\n self.weights = None\n self.bias = None\n self.pre_activation = None\n self.activation_layer = None\n self.activation = None\n self.d_weights = None\n self.d_bias = None\n self.d_activation = None\n\n def __if_params_not_initialized(self):\n return self.weights is None or self.bias is None\n\n def __init_parameters(self, size_of_previous_layer):\n self.weights = np.random.randn(self.current_layer_dim,\n size_of_previous_layer) * np.sqrt(2.0 / size_of_previous_layer)\n self.bias = np.zeros((self.current_layer_dim, 1))\n\n def __forward_linear(self):\n if self.__if_params_not_initialized():\n self.__init_parameters(self.activation.shape[0])\n self.pre_activation = self.weights.dot(self.activation) + self.bias\n\n def forward(self, activation):\n self.activation = activation\n self.__forward_linear()\n self.activation_layer = self.activation_func.forward(self.\n pre_activation)\n return self.activation_layer\n\n def __backward_linear(self, d_pre_activation):\n m = self.activation.shape[1]\n self.d_weights = 1.0 / m * np.dot(d_pre_activation, self.activation.T)\n self.d_bias = 1.0 / m * np.sum(d_pre_activation, axis=1, keepdims=True)\n self.d_activation = np.dot(self.weights.T, d_pre_activation)\n\n def backward(self, dA):\n dZ = self.activation_func.backward(self.pre_activation, dA)\n self.__backward_linear(dZ)\n return self.d_activation\n\n def get_parameters_slope(self):\n return self.d_weights, self.d_bias\n\n def update_parameters(self, delta_weights, delta_bias):\n self.weights -= delta_weights\n self.bias -= delta_bias\n", "step-5": "import numpy as np\n\n\nclass LayerBase(object):\n def __init__(self, units_count, activation_func):\n self.current_layer_dim = units_count\n self.activation_func = activation_func\n\n self.weights = None\n self.bias = None\n self.pre_activation = None\n self.activation_layer = None\n self.activation = None\n self.d_weights = None\n self.d_bias = None\n self.d_activation = None\n\n def __if_params_not_initialized(self):\n return (self.weights is None) or (self.bias is None)\n\n def __init_parameters(self, size_of_previous_layer):\n self.weights = np.random.randn(self.current_layer_dim, size_of_previous_layer) \\\n * np.sqrt(2. / size_of_previous_layer)\n self.bias = np.zeros((self.current_layer_dim, 1))\n\n def __forward_linear(self):\n if self.__if_params_not_initialized():\n self.__init_parameters(self.activation.shape[0])\n\n self.pre_activation = self.weights.dot(self.activation) + self.bias\n\n def forward(self, activation):\n self.activation = activation\n self.__forward_linear()\n self.activation_layer = self.activation_func.forward(self.pre_activation)\n return self.activation_layer\n\n def __backward_linear(self, d_pre_activation):\n m = self.activation.shape[1]\n\n self.d_weights = 1. / m * np.dot(d_pre_activation, self.activation.T)\n self.d_bias = 1. / m * np.sum(d_pre_activation, axis=1, keepdims=True)\n self.d_activation = np.dot(self.weights.T, d_pre_activation)\n\n def backward(self, dA):\n dZ = self.activation_func.backward(self.pre_activation, dA)\n self.__backward_linear(dZ)\n return self.d_activation\n\n def get_parameters_slope(self):\n return self.d_weights, self.d_bias\n\n def update_parameters(self, delta_weights, delta_bias):\n self.weights -= delta_weights\n self.bias -= delta_bias\n", "step-ids": [ 4, 7, 9, 10, 12 ] }
[ 4, 7, 9, 10, 12 ]
def Return(): s = raw_input('Enter a s: ') i = 0 s1 = '' leng = len(s) while i < leng: if s[i] == s[i].lower(): s1 += s[i].upper() else: s1 += s[i].lower() i += 1 return s1 if __name__ == '__main__': print Return()
normal
{ "blob_id": "6dafb60b79a389499ae2a0f17f9618426faf45a9", "index": 8880, "step-1": "def Return():\n s = raw_input('Enter a s: ')\n i = 0\n s1 = ''\n leng = len(s)\n while i < leng:\n if s[i] == s[i].lower():\n s1 += s[i].upper()\n else:\n s1 += s[i].lower()\n i += 1\n \n return s1\n\nif __name__ == '__main__':\n \n print Return()\n\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> def register(locator: str, entry_point, **kwargs): """Register an AgentSpec with the zoo. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'locator-name' entry_point: A callable that returns an AgentSpec or an AgentSpec object For example: .. code-block:: python register( locator="motion-planner-agent-v0", entry_point=lambda **kwargs: AgentSpec( interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose), agent_builder=MotionPlannerAgent, ), ) """ agent_registry.register(name=locator, entry_point=entry_point, **kwargs) def make(locator: str, **kwargs): """Create an AgentSpec from the given locator. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'path.to.file:locator-name' where the path is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. Returns: AgentSpec: The agent specifications needed to instantiate and configure an agent. """ from smarts.zoo.agent_spec import AgentSpec agent_spec = agent_registry.make(locator, **kwargs) assert isinstance(agent_spec, AgentSpec ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}' return agent_spec <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def register(locator: str, entry_point, **kwargs): """Register an AgentSpec with the zoo. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'locator-name' entry_point: A callable that returns an AgentSpec or an AgentSpec object For example: .. code-block:: python register( locator="motion-planner-agent-v0", entry_point=lambda **kwargs: AgentSpec( interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose), agent_builder=MotionPlannerAgent, ), ) """ agent_registry.register(name=locator, entry_point=entry_point, **kwargs) def make(locator: str, **kwargs): """Create an AgentSpec from the given locator. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'path.to.file:locator-name' where the path is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. Returns: AgentSpec: The agent specifications needed to instantiate and configure an agent. """ from smarts.zoo.agent_spec import AgentSpec agent_spec = agent_registry.make(locator, **kwargs) assert isinstance(agent_spec, AgentSpec ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}' return agent_spec def make_agent(locator: str, **kwargs): """Create an Agent from the given agent spec locator. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'path.to.file:locator-name' where the path is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. Returns: Tuple[Agent, AgentInterface]: The agent and its interface. """ agent_spec = make(locator, **kwargs) return agent_spec.build_agent(), agent_spec.interface <|reserved_special_token_1|> <|reserved_special_token_0|> agent_registry = ClassRegister() def register(locator: str, entry_point, **kwargs): """Register an AgentSpec with the zoo. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'locator-name' entry_point: A callable that returns an AgentSpec or an AgentSpec object For example: .. code-block:: python register( locator="motion-planner-agent-v0", entry_point=lambda **kwargs: AgentSpec( interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose), agent_builder=MotionPlannerAgent, ), ) """ agent_registry.register(name=locator, entry_point=entry_point, **kwargs) def make(locator: str, **kwargs): """Create an AgentSpec from the given locator. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'path.to.file:locator-name' where the path is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. Returns: AgentSpec: The agent specifications needed to instantiate and configure an agent. """ from smarts.zoo.agent_spec import AgentSpec agent_spec = agent_registry.make(locator, **kwargs) assert isinstance(agent_spec, AgentSpec ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}' return agent_spec def make_agent(locator: str, **kwargs): """Create an Agent from the given agent spec locator. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'path.to.file:locator-name' where the path is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. Returns: Tuple[Agent, AgentInterface]: The agent and its interface. """ agent_spec = make(locator, **kwargs) return agent_spec.build_agent(), agent_spec.interface <|reserved_special_token_1|> from smarts.core.utils.class_factory import ClassRegister agent_registry = ClassRegister() def register(locator: str, entry_point, **kwargs): """Register an AgentSpec with the zoo. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'locator-name' entry_point: A callable that returns an AgentSpec or an AgentSpec object For example: .. code-block:: python register( locator="motion-planner-agent-v0", entry_point=lambda **kwargs: AgentSpec( interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose), agent_builder=MotionPlannerAgent, ), ) """ agent_registry.register(name=locator, entry_point=entry_point, **kwargs) def make(locator: str, **kwargs): """Create an AgentSpec from the given locator. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'path.to.file:locator-name' where the path is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. Returns: AgentSpec: The agent specifications needed to instantiate and configure an agent. """ from smarts.zoo.agent_spec import AgentSpec agent_spec = agent_registry.make(locator, **kwargs) assert isinstance(agent_spec, AgentSpec ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}' return agent_spec def make_agent(locator: str, **kwargs): """Create an Agent from the given agent spec locator. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'path.to.file:locator-name' where the path is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. Returns: Tuple[Agent, AgentInterface]: The agent and its interface. """ agent_spec = make(locator, **kwargs) return agent_spec.build_agent(), agent_spec.interface <|reserved_special_token_1|> # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from smarts.core.utils.class_factory import ClassRegister agent_registry = ClassRegister() def register(locator: str, entry_point, **kwargs): """Register an AgentSpec with the zoo. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'locator-name' entry_point: A callable that returns an AgentSpec or an AgentSpec object For example: .. code-block:: python register( locator="motion-planner-agent-v0", entry_point=lambda **kwargs: AgentSpec( interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose), agent_builder=MotionPlannerAgent, ), ) """ agent_registry.register(name=locator, entry_point=entry_point, **kwargs) def make(locator: str, **kwargs): """Create an AgentSpec from the given locator. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'path.to.file:locator-name' where the path is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. Returns: AgentSpec: The agent specifications needed to instantiate and configure an agent. """ from smarts.zoo.agent_spec import AgentSpec agent_spec = agent_registry.make(locator, **kwargs) assert isinstance( agent_spec, AgentSpec ), f"Expected make to produce an instance of AgentSpec, got: {agent_spec}" return agent_spec def make_agent(locator: str, **kwargs): """Create an Agent from the given agent spec locator. In order to load a registered AgentSpec it needs to be reachable from a directory contained in the PYTHONPATH. Args: locator: A string in the format of 'path.to.file:locator-name' where the path is in the form `{PYTHONPATH}[n]/path/to/file.py` kwargs: Additional arguments to be passed to the constructed class. Returns: Tuple[Agent, AgentInterface]: The agent and its interface. """ agent_spec = make(locator, **kwargs) return agent_spec.build_agent(), agent_spec.interface
flexible
{ "blob_id": "b77c40c89c88b49c851e9a14c67cf0799d6de847", "index": 9235, "step-1": "<mask token>\n\n\ndef register(locator: str, entry_point, **kwargs):\n \"\"\"Register an AgentSpec with the zoo.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'locator-name'\n entry_point:\n A callable that returns an AgentSpec or an AgentSpec object\n\n For example:\n\n .. code-block:: python\n\n register(\n locator=\"motion-planner-agent-v0\",\n entry_point=lambda **kwargs: AgentSpec(\n interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose),\n agent_builder=MotionPlannerAgent,\n ),\n )\n \"\"\"\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)\n\n\ndef make(locator: str, **kwargs):\n \"\"\"Create an AgentSpec from the given locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n AgentSpec: The agent specifications needed to instantiate and configure an agent.\n \"\"\"\n from smarts.zoo.agent_spec import AgentSpec\n agent_spec = agent_registry.make(locator, **kwargs)\n assert isinstance(agent_spec, AgentSpec\n ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}'\n return agent_spec\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef register(locator: str, entry_point, **kwargs):\n \"\"\"Register an AgentSpec with the zoo.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'locator-name'\n entry_point:\n A callable that returns an AgentSpec or an AgentSpec object\n\n For example:\n\n .. code-block:: python\n\n register(\n locator=\"motion-planner-agent-v0\",\n entry_point=lambda **kwargs: AgentSpec(\n interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose),\n agent_builder=MotionPlannerAgent,\n ),\n )\n \"\"\"\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)\n\n\ndef make(locator: str, **kwargs):\n \"\"\"Create an AgentSpec from the given locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n AgentSpec: The agent specifications needed to instantiate and configure an agent.\n \"\"\"\n from smarts.zoo.agent_spec import AgentSpec\n agent_spec = agent_registry.make(locator, **kwargs)\n assert isinstance(agent_spec, AgentSpec\n ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}'\n return agent_spec\n\n\ndef make_agent(locator: str, **kwargs):\n \"\"\"Create an Agent from the given agent spec locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n Tuple[Agent, AgentInterface]: The agent and its interface.\n \"\"\"\n agent_spec = make(locator, **kwargs)\n return agent_spec.build_agent(), agent_spec.interface\n", "step-3": "<mask token>\nagent_registry = ClassRegister()\n\n\ndef register(locator: str, entry_point, **kwargs):\n \"\"\"Register an AgentSpec with the zoo.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'locator-name'\n entry_point:\n A callable that returns an AgentSpec or an AgentSpec object\n\n For example:\n\n .. code-block:: python\n\n register(\n locator=\"motion-planner-agent-v0\",\n entry_point=lambda **kwargs: AgentSpec(\n interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose),\n agent_builder=MotionPlannerAgent,\n ),\n )\n \"\"\"\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)\n\n\ndef make(locator: str, **kwargs):\n \"\"\"Create an AgentSpec from the given locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n AgentSpec: The agent specifications needed to instantiate and configure an agent.\n \"\"\"\n from smarts.zoo.agent_spec import AgentSpec\n agent_spec = agent_registry.make(locator, **kwargs)\n assert isinstance(agent_spec, AgentSpec\n ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}'\n return agent_spec\n\n\ndef make_agent(locator: str, **kwargs):\n \"\"\"Create an Agent from the given agent spec locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n Tuple[Agent, AgentInterface]: The agent and its interface.\n \"\"\"\n agent_spec = make(locator, **kwargs)\n return agent_spec.build_agent(), agent_spec.interface\n", "step-4": "from smarts.core.utils.class_factory import ClassRegister\nagent_registry = ClassRegister()\n\n\ndef register(locator: str, entry_point, **kwargs):\n \"\"\"Register an AgentSpec with the zoo.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'locator-name'\n entry_point:\n A callable that returns an AgentSpec or an AgentSpec object\n\n For example:\n\n .. code-block:: python\n\n register(\n locator=\"motion-planner-agent-v0\",\n entry_point=lambda **kwargs: AgentSpec(\n interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose),\n agent_builder=MotionPlannerAgent,\n ),\n )\n \"\"\"\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)\n\n\ndef make(locator: str, **kwargs):\n \"\"\"Create an AgentSpec from the given locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n AgentSpec: The agent specifications needed to instantiate and configure an agent.\n \"\"\"\n from smarts.zoo.agent_spec import AgentSpec\n agent_spec = agent_registry.make(locator, **kwargs)\n assert isinstance(agent_spec, AgentSpec\n ), f'Expected make to produce an instance of AgentSpec, got: {agent_spec}'\n return agent_spec\n\n\ndef make_agent(locator: str, **kwargs):\n \"\"\"Create an Agent from the given agent spec locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n Tuple[Agent, AgentInterface]: The agent and its interface.\n \"\"\"\n agent_spec = make(locator, **kwargs)\n return agent_spec.build_agent(), agent_spec.interface\n", "step-5": "# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nfrom smarts.core.utils.class_factory import ClassRegister\n\nagent_registry = ClassRegister()\n\n\ndef register(locator: str, entry_point, **kwargs):\n \"\"\"Register an AgentSpec with the zoo.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'locator-name'\n entry_point:\n A callable that returns an AgentSpec or an AgentSpec object\n\n For example:\n\n .. code-block:: python\n\n register(\n locator=\"motion-planner-agent-v0\",\n entry_point=lambda **kwargs: AgentSpec(\n interface=AgentInterface(waypoint_paths=True, action=ActionSpaceType.TargetPose),\n agent_builder=MotionPlannerAgent,\n ),\n )\n \"\"\"\n\n agent_registry.register(name=locator, entry_point=entry_point, **kwargs)\n\n\ndef make(locator: str, **kwargs):\n \"\"\"Create an AgentSpec from the given locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n AgentSpec: The agent specifications needed to instantiate and configure an agent.\n \"\"\"\n\n from smarts.zoo.agent_spec import AgentSpec\n\n agent_spec = agent_registry.make(locator, **kwargs)\n assert isinstance(\n agent_spec, AgentSpec\n ), f\"Expected make to produce an instance of AgentSpec, got: {agent_spec}\"\n\n return agent_spec\n\n\ndef make_agent(locator: str, **kwargs):\n \"\"\"Create an Agent from the given agent spec locator.\n\n In order to load a registered AgentSpec it needs to be reachable from a\n directory contained in the PYTHONPATH.\n\n Args:\n locator:\n A string in the format of 'path.to.file:locator-name' where the path\n is in the form `{PYTHONPATH}[n]/path/to/file.py`\n kwargs:\n Additional arguments to be passed to the constructed class.\n Returns:\n Tuple[Agent, AgentInterface]: The agent and its interface.\n \"\"\"\n\n agent_spec = make(locator, **kwargs)\n\n return agent_spec.build_agent(), agent_spec.interface\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from math import log2 from egosplit.benchmarks.data_structures.cover_benchmark import * from egosplit.benchmarks.evaluation.utility import create_line from networkit.stopwatch import clockit # Analyse the result cover of a benchmark run @clockit def analyze_cover(benchmarks, result_dir, calc_f1, append): if not append: print_headers(result_dir) for benchmark in benchmarks: count_benchmark_cover(result_dir, calc_f1, benchmark) # Print output file headers def print_headers(result_dir): with open(result_dir + 'cover_num_comms.result', 'w') as f: f.write(create_line(*CoverBenchmark.output_header(), 'Number of Communities')) with open(result_dir + 'cover_comm_sizes.result', 'w') as f: f.write(create_line(*CoverBenchmark.output_header(), 'Community Size', 'F1 Score')) with open(result_dir + 'cover_node_comms.result', 'w') as f: f.write(create_line(*CoverBenchmark.output_header(), 'Number of Communities per Node')) # Count the number of communities and their sizes def count_benchmark_cover(result_dir, calc_f1, benchmark): cover = benchmark.get_cover() ground_truth = benchmark.get_ground_truth() comm_map = get_communities(benchmark.get_graph(), cover) gt_map = get_communities(benchmark.get_graph(), ground_truth) comm_sizes = cover.subsetSizeMap() # Number of communities with open(result_dir + 'cover_num_comms.result', 'a') as f: f.write(create_line(*benchmark.output_line(), cover.numberOfSubsets())) # Community sizes and F1 scores with open(result_dir + 'cover_comm_sizes.result', 'a') as f: for u in cover.getSubsetIds(): comm = comm_map[u] size = comm_sizes[u] f1 = f1_score(comm, gt_map) if calc_f1 else 0 f.write(create_line(*benchmark.output_line(), log2(size), f1)) # Number of Communities per Node with open(result_dir + 'cover_node_comms.result', 'a') as f: for u in benchmark.get_graph().nodes(): num_comms = len(cover.subsetsOf(u)) if num_comms > 0: f.write(create_line(*benchmark.output_line(), log2(num_comms))) def get_communities(graph, cover): comm_map = defaultdict(lambda: set()) for u in graph.nodes(): comms = cover.subsetsOf(u) for c in comms: comm_map[c].add(u) return comm_map def f1_score(community, ground_truth): max_f1 = 0.0 for gt_comm in ground_truth.values(): overlap = len(gt_comm.intersection(community)) if overlap == 0: continue precision = overlap / len(community) recall = overlap / len(gt_comm) f1 = 2 * precision * recall / (precision + recall) max_f1 = max(max_f1, f1) return max_f1
normal
{ "blob_id": "dc5b9600828857cc5ea434a7b010cd8aa2589d22", "index": 6568, "step-1": "<mask token>\n\n\n@clockit\ndef analyze_cover(benchmarks, result_dir, calc_f1, append):\n if not append:\n print_headers(result_dir)\n for benchmark in benchmarks:\n count_benchmark_cover(result_dir, calc_f1, benchmark)\n\n\n<mask token>\n\n\ndef f1_score(community, ground_truth):\n max_f1 = 0.0\n for gt_comm in ground_truth.values():\n overlap = len(gt_comm.intersection(community))\n if overlap == 0:\n continue\n precision = overlap / len(community)\n recall = overlap / len(gt_comm)\n f1 = 2 * precision * recall / (precision + recall)\n max_f1 = max(max_f1, f1)\n return max_f1\n", "step-2": "<mask token>\n\n\n@clockit\ndef analyze_cover(benchmarks, result_dir, calc_f1, append):\n if not append:\n print_headers(result_dir)\n for benchmark in benchmarks:\n count_benchmark_cover(result_dir, calc_f1, benchmark)\n\n\n<mask token>\n\n\ndef get_communities(graph, cover):\n comm_map = defaultdict(lambda : set())\n for u in graph.nodes():\n comms = cover.subsetsOf(u)\n for c in comms:\n comm_map[c].add(u)\n return comm_map\n\n\ndef f1_score(community, ground_truth):\n max_f1 = 0.0\n for gt_comm in ground_truth.values():\n overlap = len(gt_comm.intersection(community))\n if overlap == 0:\n continue\n precision = overlap / len(community)\n recall = overlap / len(gt_comm)\n f1 = 2 * precision * recall / (precision + recall)\n max_f1 = max(max_f1, f1)\n return max_f1\n", "step-3": "<mask token>\n\n\n@clockit\ndef analyze_cover(benchmarks, result_dir, calc_f1, append):\n if not append:\n print_headers(result_dir)\n for benchmark in benchmarks:\n count_benchmark_cover(result_dir, calc_f1, benchmark)\n\n\ndef print_headers(result_dir):\n with open(result_dir + 'cover_num_comms.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Number of Communities'))\n with open(result_dir + 'cover_comm_sizes.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Community Size', 'F1 Score'))\n with open(result_dir + 'cover_node_comms.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Number of Communities per Node'))\n\n\ndef count_benchmark_cover(result_dir, calc_f1, benchmark):\n cover = benchmark.get_cover()\n ground_truth = benchmark.get_ground_truth()\n comm_map = get_communities(benchmark.get_graph(), cover)\n gt_map = get_communities(benchmark.get_graph(), ground_truth)\n comm_sizes = cover.subsetSizeMap()\n with open(result_dir + 'cover_num_comms.result', 'a') as f:\n f.write(create_line(*benchmark.output_line(), cover.numberOfSubsets()))\n with open(result_dir + 'cover_comm_sizes.result', 'a') as f:\n for u in cover.getSubsetIds():\n comm = comm_map[u]\n size = comm_sizes[u]\n f1 = f1_score(comm, gt_map) if calc_f1 else 0\n f.write(create_line(*benchmark.output_line(), log2(size), f1))\n with open(result_dir + 'cover_node_comms.result', 'a') as f:\n for u in benchmark.get_graph().nodes():\n num_comms = len(cover.subsetsOf(u))\n if num_comms > 0:\n f.write(create_line(*benchmark.output_line(), log2(num_comms)))\n\n\ndef get_communities(graph, cover):\n comm_map = defaultdict(lambda : set())\n for u in graph.nodes():\n comms = cover.subsetsOf(u)\n for c in comms:\n comm_map[c].add(u)\n return comm_map\n\n\ndef f1_score(community, ground_truth):\n max_f1 = 0.0\n for gt_comm in ground_truth.values():\n overlap = len(gt_comm.intersection(community))\n if overlap == 0:\n continue\n precision = overlap / len(community)\n recall = overlap / len(gt_comm)\n f1 = 2 * precision * recall / (precision + recall)\n max_f1 = max(max_f1, f1)\n return max_f1\n", "step-4": "from math import log2\nfrom egosplit.benchmarks.data_structures.cover_benchmark import *\nfrom egosplit.benchmarks.evaluation.utility import create_line\nfrom networkit.stopwatch import clockit\n\n\n@clockit\ndef analyze_cover(benchmarks, result_dir, calc_f1, append):\n if not append:\n print_headers(result_dir)\n for benchmark in benchmarks:\n count_benchmark_cover(result_dir, calc_f1, benchmark)\n\n\ndef print_headers(result_dir):\n with open(result_dir + 'cover_num_comms.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Number of Communities'))\n with open(result_dir + 'cover_comm_sizes.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Community Size', 'F1 Score'))\n with open(result_dir + 'cover_node_comms.result', 'w') as f:\n f.write(create_line(*CoverBenchmark.output_header(),\n 'Number of Communities per Node'))\n\n\ndef count_benchmark_cover(result_dir, calc_f1, benchmark):\n cover = benchmark.get_cover()\n ground_truth = benchmark.get_ground_truth()\n comm_map = get_communities(benchmark.get_graph(), cover)\n gt_map = get_communities(benchmark.get_graph(), ground_truth)\n comm_sizes = cover.subsetSizeMap()\n with open(result_dir + 'cover_num_comms.result', 'a') as f:\n f.write(create_line(*benchmark.output_line(), cover.numberOfSubsets()))\n with open(result_dir + 'cover_comm_sizes.result', 'a') as f:\n for u in cover.getSubsetIds():\n comm = comm_map[u]\n size = comm_sizes[u]\n f1 = f1_score(comm, gt_map) if calc_f1 else 0\n f.write(create_line(*benchmark.output_line(), log2(size), f1))\n with open(result_dir + 'cover_node_comms.result', 'a') as f:\n for u in benchmark.get_graph().nodes():\n num_comms = len(cover.subsetsOf(u))\n if num_comms > 0:\n f.write(create_line(*benchmark.output_line(), log2(num_comms)))\n\n\ndef get_communities(graph, cover):\n comm_map = defaultdict(lambda : set())\n for u in graph.nodes():\n comms = cover.subsetsOf(u)\n for c in comms:\n comm_map[c].add(u)\n return comm_map\n\n\ndef f1_score(community, ground_truth):\n max_f1 = 0.0\n for gt_comm in ground_truth.values():\n overlap = len(gt_comm.intersection(community))\n if overlap == 0:\n continue\n precision = overlap / len(community)\n recall = overlap / len(gt_comm)\n f1 = 2 * precision * recall / (precision + recall)\n max_f1 = max(max_f1, f1)\n return max_f1\n", "step-5": "from math import log2\n\nfrom egosplit.benchmarks.data_structures.cover_benchmark import *\nfrom egosplit.benchmarks.evaluation.utility import create_line\nfrom networkit.stopwatch import clockit\n\n\n# Analyse the result cover of a benchmark run\n@clockit\ndef analyze_cover(benchmarks, result_dir, calc_f1, append):\n\tif not append:\n\t\tprint_headers(result_dir)\n\n\tfor benchmark in benchmarks:\n\t\tcount_benchmark_cover(result_dir, calc_f1, benchmark)\n\n\n# Print output file headers\ndef print_headers(result_dir):\n\twith open(result_dir + 'cover_num_comms.result', 'w') as f:\n\t\tf.write(create_line(*CoverBenchmark.output_header(), 'Number of Communities'))\n\twith open(result_dir + 'cover_comm_sizes.result', 'w') as f:\n\t\tf.write(create_line(*CoverBenchmark.output_header(), 'Community Size', 'F1 Score'))\n\twith open(result_dir + 'cover_node_comms.result', 'w') as f:\n\t\tf.write(create_line(*CoverBenchmark.output_header(), 'Number of Communities per Node'))\n\n\n# Count the number of communities and their sizes\ndef count_benchmark_cover(result_dir, calc_f1, benchmark):\n\tcover = benchmark.get_cover()\n\tground_truth = benchmark.get_ground_truth()\n\tcomm_map = get_communities(benchmark.get_graph(), cover)\n\tgt_map = get_communities(benchmark.get_graph(), ground_truth)\n\tcomm_sizes = cover.subsetSizeMap()\n\n\t# Number of communities\n\twith open(result_dir + 'cover_num_comms.result', 'a') as f:\n\t\tf.write(create_line(*benchmark.output_line(), cover.numberOfSubsets()))\n\n\t# Community sizes and F1 scores\n\twith open(result_dir + 'cover_comm_sizes.result', 'a') as f:\n\t\tfor u in cover.getSubsetIds():\n\t\t\tcomm = comm_map[u]\n\t\t\tsize = comm_sizes[u]\n\t\t\tf1 = f1_score(comm, gt_map) if calc_f1 else 0\n\t\t\tf.write(create_line(*benchmark.output_line(), log2(size), f1))\n\n\t# Number of Communities per Node\n\twith open(result_dir + 'cover_node_comms.result', 'a') as f:\n\t\tfor u in benchmark.get_graph().nodes():\n\t\t\tnum_comms = len(cover.subsetsOf(u))\n\t\t\tif num_comms > 0:\n\t\t\t\tf.write(create_line(*benchmark.output_line(), log2(num_comms)))\n\n\ndef get_communities(graph, cover):\n\tcomm_map = defaultdict(lambda: set())\n\tfor u in graph.nodes():\n\t\tcomms = cover.subsetsOf(u)\n\t\tfor c in comms:\n\t\t\tcomm_map[c].add(u)\n\n\treturn comm_map\n\n\ndef f1_score(community, ground_truth):\n\tmax_f1 = 0.0\n\tfor gt_comm in ground_truth.values():\n\t\toverlap = len(gt_comm.intersection(community))\n\t\tif overlap == 0:\n\t\t\tcontinue\n\t\tprecision = overlap / len(community)\n\t\trecall = overlap / len(gt_comm)\n\t\tf1 = 2 * precision * recall / (precision + recall)\n\t\tmax_f1 = max(max_f1, f1)\n\n\treturn max_f1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "step-ids": [ 2, 3, 5, 6, 7 ] }
[ 2, 3, 5, 6, 7 ]
def sort(L): n = len(L) if n < 2: return L L1, L2 = L[:n // 2], L[n // 2:] return merge(sort(L1), sort(L2)) def merge(L1, L2): if L1 == []: return L2 if L2 == []: return L1 x1, R1 = L1[0], L1[1:] x2, R2 = L2[0], L2[1:] if x1 <= x2: return [x1] + merge(R1, L2) else: return [x2] + merge(L1, R2) print(sort([9, 7, 8, 0, 5, 6, 4, 1, 2, 3]))
normal
{ "blob_id": "056636e2220e529d3f66872a4a48c0984cda1ce4", "index": 6617, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef merge(L1, L2):\n if L1 == []:\n return L2\n if L2 == []:\n return L1\n x1, R1 = L1[0], L1[1:]\n x2, R2 = L2[0], L2[1:]\n if x1 <= x2:\n return [x1] + merge(R1, L2)\n else:\n return [x2] + merge(L1, R2)\n\n\n<mask token>\n", "step-3": "def sort(L):\n n = len(L)\n if n < 2:\n return L\n L1, L2 = L[:n // 2], L[n // 2:]\n return merge(sort(L1), sort(L2))\n\n\ndef merge(L1, L2):\n if L1 == []:\n return L2\n if L2 == []:\n return L1\n x1, R1 = L1[0], L1[1:]\n x2, R2 = L2[0], L2[1:]\n if x1 <= x2:\n return [x1] + merge(R1, L2)\n else:\n return [x2] + merge(L1, R2)\n\n\n<mask token>\n", "step-4": "def sort(L):\n n = len(L)\n if n < 2:\n return L\n L1, L2 = L[:n // 2], L[n // 2:]\n return merge(sort(L1), sort(L2))\n\n\ndef merge(L1, L2):\n if L1 == []:\n return L2\n if L2 == []:\n return L1\n x1, R1 = L1[0], L1[1:]\n x2, R2 = L2[0], L2[1:]\n if x1 <= x2:\n return [x1] + merge(R1, L2)\n else:\n return [x2] + merge(L1, R2)\n\n\nprint(sort([9, 7, 8, 0, 5, 6, 4, 1, 2, 3]))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class ChainedTest(BitcoinTestFramework): def set_test_params(self): """ our test network requires a peer node so that getblocktemplate succeeds """ self.num_nodes = 2 chained_args = ['-limitancestorcount=2000', '-limitdescendantcount=2000', '-limitancestorsize=1000', '-limitdescendantsize=1000', '-magneticanomalyactivationtime=%d' % MAGNETIC_ANOMALY_START_TIME] config_node2 = chained_args.copy() if DEBUG_MODE: chained_args.append(DEBUG_MODE) self.extra_args = [chained_args, config_node2] def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): send_value = satoshi_round((value - fee) / num_outputs) inputs = [{'txid': parent_txid, 'vout': vout}] outputs = {} for i in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransaction(rawtx) sendtx_start = time.perf_counter() new_txid = node.sendrawtransaction(signedtx['hex']) sendtx_stop = time.perf_counter() fulltx = node.getrawtransaction(new_txid, 1) assert len(fulltx['vout']) == num_outputs return new_txid, send_value, sendtx_stop - sendtx_start, fulltx['size'] def mine_blocks(self): """ Mine some blocks and have them mature. """ self.nodes[0].generate(101) self.utxo = self.nodes[0].listunspent(10) self.txid = self.utxo[0]['txid'] self.coinbasetx = self.txid self.vout = self.utxo[0]['vout'] self.value = self.utxo[0]['amount'] self.fee = Decimal('0.0001') self.tip = int('0x' + self.nodes[0].getbestblockhash(), 0) self.block_time = int(time.time()) + 1 def send_chain_to_node(self): """ Generates tx chain and send it to node """ for i in range(CHAINED_TX): sent_txid, sent_value, this_sendtx, tx_size = (self. chain_transaction(self.nodes[0], self.txid, 0, self.value, self.fee, 1)) if not self.chain_top: self.chain_top = sent_txid self.txid = sent_txid self.value = sent_value self.chain.append(sent_txid) self.mempool_send += this_sendtx self.mempool_size += tx_size def create_new_block(self): """ Create a new block with an anyone-can-spend coinbase """ block = create_block(self.tip, create_coinbase(self.height), self. block_time) self.block_time += 1 block.solve() return block <|reserved_special_token_0|> <|reserved_special_token_0|> def run_test(self): self.log.info('Starting Test with {0} Chained Transactions'.format( CHAINED_TX)) self.chain_top = None self.mine_blocks() self.mempool_send = 0 self.mempool_size = 0 self.chain = [] self.send_chain_to_node() assert self.mempool_count() == CHAINED_TX mempool = self.nodes[0].getrawmempool(True) self.log.info('tx at top has {} descendants'.format(mempool[self. chain_top]['descendantcount'])) assert mempool[self.chain_top]['descendantcount'] == CHAINED_TX self.height = 1 self.block1 = self.create_new_block() self.tip = self.block1.sha256 self.height += 1 for i in range(100): block = self.create_new_block() self.tip = block.sha256 self.height += 1 self.runs = [] for test_iteration in range(TEST_ITERATIONS): gbt_start = time.perf_counter() templat = self.nodes[0].getblocktemplate() gbt_stop = time.perf_counter() assert len(templat['transactions']) == CHAINED_TX self.runs.append(gbt_stop - gbt_start) self.log.info('Mempool size {0}'.format(self.mempool_size)) self.log.info('Send Tx took {0:.5f}s'.format(self.mempool_send)) if len(self.runs) > 1: self.log.info('run times {}'.format(self.runs)) self.log.info('GetBlkT took {0:.5f}s'.format(sum(self.runs) / len( self.runs))) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class ChainedTest(BitcoinTestFramework): def set_test_params(self): """ our test network requires a peer node so that getblocktemplate succeeds """ self.num_nodes = 2 chained_args = ['-limitancestorcount=2000', '-limitdescendantcount=2000', '-limitancestorsize=1000', '-limitdescendantsize=1000', '-magneticanomalyactivationtime=%d' % MAGNETIC_ANOMALY_START_TIME] config_node2 = chained_args.copy() if DEBUG_MODE: chained_args.append(DEBUG_MODE) self.extra_args = [chained_args, config_node2] def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): send_value = satoshi_round((value - fee) / num_outputs) inputs = [{'txid': parent_txid, 'vout': vout}] outputs = {} for i in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransaction(rawtx) sendtx_start = time.perf_counter() new_txid = node.sendrawtransaction(signedtx['hex']) sendtx_stop = time.perf_counter() fulltx = node.getrawtransaction(new_txid, 1) assert len(fulltx['vout']) == num_outputs return new_txid, send_value, sendtx_stop - sendtx_start, fulltx['size'] def mine_blocks(self): """ Mine some blocks and have them mature. """ self.nodes[0].generate(101) self.utxo = self.nodes[0].listunspent(10) self.txid = self.utxo[0]['txid'] self.coinbasetx = self.txid self.vout = self.utxo[0]['vout'] self.value = self.utxo[0]['amount'] self.fee = Decimal('0.0001') self.tip = int('0x' + self.nodes[0].getbestblockhash(), 0) self.block_time = int(time.time()) + 1 def send_chain_to_node(self): """ Generates tx chain and send it to node """ for i in range(CHAINED_TX): sent_txid, sent_value, this_sendtx, tx_size = (self. chain_transaction(self.nodes[0], self.txid, 0, self.value, self.fee, 1)) if not self.chain_top: self.chain_top = sent_txid self.txid = sent_txid self.value = sent_value self.chain.append(sent_txid) self.mempool_send += this_sendtx self.mempool_size += tx_size def create_new_block(self): """ Create a new block with an anyone-can-spend coinbase """ block = create_block(self.tip, create_coinbase(self.height), self. block_time) self.block_time += 1 block.solve() return block <|reserved_special_token_0|> def dumppool(self, mempool): """ Show list of chained tx in mempool with parent(depends) """ def sortdepends(e): return e['descendantcount'] sortedlist = [[k, v] for k, v in mempool.items()] sortedlist = sorted(sortedlist, key=lambda l: l[1][ 'descendantcount'], reverse=True) for memkv in sortedlist: memtx = memkv[1] self.log.info('{} {} {}'.format(memkv[0], memtx[ 'descendantcount'], memtx['depends'])) def run_test(self): self.log.info('Starting Test with {0} Chained Transactions'.format( CHAINED_TX)) self.chain_top = None self.mine_blocks() self.mempool_send = 0 self.mempool_size = 0 self.chain = [] self.send_chain_to_node() assert self.mempool_count() == CHAINED_TX mempool = self.nodes[0].getrawmempool(True) self.log.info('tx at top has {} descendants'.format(mempool[self. chain_top]['descendantcount'])) assert mempool[self.chain_top]['descendantcount'] == CHAINED_TX self.height = 1 self.block1 = self.create_new_block() self.tip = self.block1.sha256 self.height += 1 for i in range(100): block = self.create_new_block() self.tip = block.sha256 self.height += 1 self.runs = [] for test_iteration in range(TEST_ITERATIONS): gbt_start = time.perf_counter() templat = self.nodes[0].getblocktemplate() gbt_stop = time.perf_counter() assert len(templat['transactions']) == CHAINED_TX self.runs.append(gbt_stop - gbt_start) self.log.info('Mempool size {0}'.format(self.mempool_size)) self.log.info('Send Tx took {0:.5f}s'.format(self.mempool_send)) if len(self.runs) > 1: self.log.info('run times {}'.format(self.runs)) self.log.info('GetBlkT took {0:.5f}s'.format(sum(self.runs) / len( self.runs))) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class ChainedTest(BitcoinTestFramework): def set_test_params(self): """ our test network requires a peer node so that getblocktemplate succeeds """ self.num_nodes = 2 chained_args = ['-limitancestorcount=2000', '-limitdescendantcount=2000', '-limitancestorsize=1000', '-limitdescendantsize=1000', '-magneticanomalyactivationtime=%d' % MAGNETIC_ANOMALY_START_TIME] config_node2 = chained_args.copy() if DEBUG_MODE: chained_args.append(DEBUG_MODE) self.extra_args = [chained_args, config_node2] def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): send_value = satoshi_round((value - fee) / num_outputs) inputs = [{'txid': parent_txid, 'vout': vout}] outputs = {} for i in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransaction(rawtx) sendtx_start = time.perf_counter() new_txid = node.sendrawtransaction(signedtx['hex']) sendtx_stop = time.perf_counter() fulltx = node.getrawtransaction(new_txid, 1) assert len(fulltx['vout']) == num_outputs return new_txid, send_value, sendtx_stop - sendtx_start, fulltx['size'] def mine_blocks(self): """ Mine some blocks and have them mature. """ self.nodes[0].generate(101) self.utxo = self.nodes[0].listunspent(10) self.txid = self.utxo[0]['txid'] self.coinbasetx = self.txid self.vout = self.utxo[0]['vout'] self.value = self.utxo[0]['amount'] self.fee = Decimal('0.0001') self.tip = int('0x' + self.nodes[0].getbestblockhash(), 0) self.block_time = int(time.time()) + 1 def send_chain_to_node(self): """ Generates tx chain and send it to node """ for i in range(CHAINED_TX): sent_txid, sent_value, this_sendtx, tx_size = (self. chain_transaction(self.nodes[0], self.txid, 0, self.value, self.fee, 1)) if not self.chain_top: self.chain_top = sent_txid self.txid = sent_txid self.value = sent_value self.chain.append(sent_txid) self.mempool_send += this_sendtx self.mempool_size += tx_size def create_new_block(self): """ Create a new block with an anyone-can-spend coinbase """ block = create_block(self.tip, create_coinbase(self.height), self. block_time) self.block_time += 1 block.solve() return block def mempool_count(self): """ get count of tx in mempool """ mininginfo = self.nodes[0].getmininginfo() return mininginfo['pooledtx'] def dumppool(self, mempool): """ Show list of chained tx in mempool with parent(depends) """ def sortdepends(e): return e['descendantcount'] sortedlist = [[k, v] for k, v in mempool.items()] sortedlist = sorted(sortedlist, key=lambda l: l[1][ 'descendantcount'], reverse=True) for memkv in sortedlist: memtx = memkv[1] self.log.info('{} {} {}'.format(memkv[0], memtx[ 'descendantcount'], memtx['depends'])) def run_test(self): self.log.info('Starting Test with {0} Chained Transactions'.format( CHAINED_TX)) self.chain_top = None self.mine_blocks() self.mempool_send = 0 self.mempool_size = 0 self.chain = [] self.send_chain_to_node() assert self.mempool_count() == CHAINED_TX mempool = self.nodes[0].getrawmempool(True) self.log.info('tx at top has {} descendants'.format(mempool[self. chain_top]['descendantcount'])) assert mempool[self.chain_top]['descendantcount'] == CHAINED_TX self.height = 1 self.block1 = self.create_new_block() self.tip = self.block1.sha256 self.height += 1 for i in range(100): block = self.create_new_block() self.tip = block.sha256 self.height += 1 self.runs = [] for test_iteration in range(TEST_ITERATIONS): gbt_start = time.perf_counter() templat = self.nodes[0].getblocktemplate() gbt_stop = time.perf_counter() assert len(templat['transactions']) == CHAINED_TX self.runs.append(gbt_stop - gbt_start) self.log.info('Mempool size {0}'.format(self.mempool_size)) self.log.info('Send Tx took {0:.5f}s'.format(self.mempool_send)) if len(self.runs) > 1: self.log.info('run times {}'.format(self.runs)) self.log.info('GetBlkT took {0:.5f}s'.format(sum(self.runs) / len( self.runs))) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> if len(sys.argv) > 1: CHAINED_TX = int(sys.argv[1]) <|reserved_special_token_0|> if len(sys.argv) > 2: TEST_ITERATIONS = int(sys.argv[2]) <|reserved_special_token_0|> class ChainedTest(BitcoinTestFramework): def set_test_params(self): """ our test network requires a peer node so that getblocktemplate succeeds """ self.num_nodes = 2 chained_args = ['-limitancestorcount=2000', '-limitdescendantcount=2000', '-limitancestorsize=1000', '-limitdescendantsize=1000', '-magneticanomalyactivationtime=%d' % MAGNETIC_ANOMALY_START_TIME] config_node2 = chained_args.copy() if DEBUG_MODE: chained_args.append(DEBUG_MODE) self.extra_args = [chained_args, config_node2] def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): send_value = satoshi_round((value - fee) / num_outputs) inputs = [{'txid': parent_txid, 'vout': vout}] outputs = {} for i in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransaction(rawtx) sendtx_start = time.perf_counter() new_txid = node.sendrawtransaction(signedtx['hex']) sendtx_stop = time.perf_counter() fulltx = node.getrawtransaction(new_txid, 1) assert len(fulltx['vout']) == num_outputs return new_txid, send_value, sendtx_stop - sendtx_start, fulltx['size'] def mine_blocks(self): """ Mine some blocks and have them mature. """ self.nodes[0].generate(101) self.utxo = self.nodes[0].listunspent(10) self.txid = self.utxo[0]['txid'] self.coinbasetx = self.txid self.vout = self.utxo[0]['vout'] self.value = self.utxo[0]['amount'] self.fee = Decimal('0.0001') self.tip = int('0x' + self.nodes[0].getbestblockhash(), 0) self.block_time = int(time.time()) + 1 def send_chain_to_node(self): """ Generates tx chain and send it to node """ for i in range(CHAINED_TX): sent_txid, sent_value, this_sendtx, tx_size = (self. chain_transaction(self.nodes[0], self.txid, 0, self.value, self.fee, 1)) if not self.chain_top: self.chain_top = sent_txid self.txid = sent_txid self.value = sent_value self.chain.append(sent_txid) self.mempool_send += this_sendtx self.mempool_size += tx_size def create_new_block(self): """ Create a new block with an anyone-can-spend coinbase """ block = create_block(self.tip, create_coinbase(self.height), self. block_time) self.block_time += 1 block.solve() return block def mempool_count(self): """ get count of tx in mempool """ mininginfo = self.nodes[0].getmininginfo() return mininginfo['pooledtx'] def dumppool(self, mempool): """ Show list of chained tx in mempool with parent(depends) """ def sortdepends(e): return e['descendantcount'] sortedlist = [[k, v] for k, v in mempool.items()] sortedlist = sorted(sortedlist, key=lambda l: l[1][ 'descendantcount'], reverse=True) for memkv in sortedlist: memtx = memkv[1] self.log.info('{} {} {}'.format(memkv[0], memtx[ 'descendantcount'], memtx['depends'])) def run_test(self): self.log.info('Starting Test with {0} Chained Transactions'.format( CHAINED_TX)) self.chain_top = None self.mine_blocks() self.mempool_send = 0 self.mempool_size = 0 self.chain = [] self.send_chain_to_node() assert self.mempool_count() == CHAINED_TX mempool = self.nodes[0].getrawmempool(True) self.log.info('tx at top has {} descendants'.format(mempool[self. chain_top]['descendantcount'])) assert mempool[self.chain_top]['descendantcount'] == CHAINED_TX self.height = 1 self.block1 = self.create_new_block() self.tip = self.block1.sha256 self.height += 1 for i in range(100): block = self.create_new_block() self.tip = block.sha256 self.height += 1 self.runs = [] for test_iteration in range(TEST_ITERATIONS): gbt_start = time.perf_counter() templat = self.nodes[0].getblocktemplate() gbt_stop = time.perf_counter() assert len(templat['transactions']) == CHAINED_TX self.runs.append(gbt_stop - gbt_start) self.log.info('Mempool size {0}'.format(self.mempool_size)) self.log.info('Send Tx took {0:.5f}s'.format(self.mempool_send)) if len(self.runs) > 1: self.log.info('run times {}'.format(self.runs)) self.log.info('GetBlkT took {0:.5f}s'.format(sum(self.runs) / len( self.runs))) if __name__ == '__main__': ChainedTest().main() <|reserved_special_token_1|> #!/usr/bin/env python3 # Copyright (c) 2018 Nobody # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test perforance of descendant package (chained transactions)""" import time import copy from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.mininode import COIN from test_framework.blocktools import * """Read optional arguments from command line""" CHAINED_TX = 25 if len(sys.argv)>1: CHAINED_TX = int(sys.argv[1]) TEST_ITERATIONS = 1 if len(sys.argv)>2: TEST_ITERATIONS = int(sys.argv[2]) DEBUG_MODE = '-printtoconsole' MAX_ANCESTORS = CHAINED_TX MAX_DESCENDANTS = CHAINED_TX MAGNETIC_ANOMALY_START_TIME = 2000000000 class ChainedTest(BitcoinTestFramework): def set_test_params(self): ''' our test network requires a peer node so that getblocktemplate succeeds ''' self.num_nodes = 2 chained_args = ["-limitancestorcount=2000", "-limitdescendantcount=2000", "-limitancestorsize=1000", "-limitdescendantsize=1000", "-magneticanomalyactivationtime=%d" % MAGNETIC_ANOMALY_START_TIME ] config_node2 = chained_args.copy() if DEBUG_MODE: chained_args.append(DEBUG_MODE) self.extra_args = [chained_args, config_node2] # Build a transaction that spends parent_txid:vout # Return amount sent def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): send_value = satoshi_round((value - fee) / num_outputs) inputs = [{'txid': parent_txid, 'vout': vout}] outputs = {} for i in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransaction(rawtx) #measure the performance of sending the raw transaction to the node sendtx_start = time.perf_counter() new_txid = node.sendrawtransaction(signedtx['hex']) sendtx_stop = time.perf_counter() fulltx = node.getrawtransaction(new_txid, 1) #self.log.info('{0} => {1}'.format(parent_txid, fulltx['vout'][0])) # make sure we didn't generate a change output assert(len(fulltx['vout']) == num_outputs) return (new_txid, send_value, sendtx_stop - sendtx_start, fulltx['size']) def mine_blocks(self): ''' Mine some blocks and have them mature. ''' self.nodes[0].generate(101) self.utxo = self.nodes[0].listunspent(10) self.txid = self.utxo[0]['txid'] self.coinbasetx = self.txid self.vout = self.utxo[0]['vout'] self.value = self.utxo[0]['amount'] self.fee = Decimal("0.0001") self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0) self.block_time = int(time.time()) + 1 def send_chain_to_node(self): ''' Generates tx chain and send it to node ''' for i in range(CHAINED_TX): (sent_txid, sent_value, this_sendtx, tx_size) = self.chain_transaction( self.nodes[0], self.txid, 0, self.value, self.fee, 1) if not self.chain_top: self.chain_top = sent_txid self.txid = sent_txid self.value = sent_value self.chain.append(sent_txid) self.mempool_send += this_sendtx self.mempool_size += tx_size def create_new_block(self): ''' Create a new block with an anyone-can-spend coinbase ''' block = create_block( self.tip, create_coinbase(self.height), self.block_time) self.block_time += 1 block.solve() return block def mempool_count(self): ''' get count of tx in mempool ''' mininginfo = self.nodes[0].getmininginfo() return mininginfo['pooledtx'] def dumppool(self, mempool): ''' Show list of chained tx in mempool with parent(depends) ''' def sortdepends(e): return e['descendantcount'] sortedlist = [[k,v] for k,v in mempool.items()] sortedlist = sorted(sortedlist, key=lambda l: l[1]['descendantcount'], reverse=True) for memkv in sortedlist: memtx = memkv[1] self.log.info('{} {} {}'.format(memkv[0], memtx['descendantcount'], memtx['depends'])) def run_test(self): self.log.info('Starting Test with {0} Chained Transactions'.format(CHAINED_TX)) self.chain_top = None self.mine_blocks() self.mempool_send = 0 self.mempool_size = 0 self.chain = [] self.send_chain_to_node() # mempool should have all our tx assert(self.mempool_count() == CHAINED_TX) mempool = self.nodes[0].getrawmempool(True) self.log.info('tx at top has {} descendants'.format(mempool[self.chain_top]["descendantcount"])) assert(mempool[self.chain_top]["descendantcount"] == CHAINED_TX) #self.dumppool(mempool) self.height = 1 # create new block and save coinbase self.block1 = self.create_new_block() self.tip = self.block1.sha256 self.height += 1 #mature the block so we can spend the coinbase for i in range(100): block = self.create_new_block() self.tip = block.sha256 self.height += 1 #sync pool not needed as long as we are using node 0 which has all the tx we sent to it #sync_mempools(self.nodes, wait=1, timeout=100) self.runs=[] for test_iteration in range(TEST_ITERATIONS): # do not use perf_counter. use timer from -printtoconsole instead gbt_start = time.perf_counter() # assemble a block and validate all tx in it templat = self.nodes[0].getblocktemplate() gbt_stop = time.perf_counter() # make sure all tx got mined assert(len(templat['transactions']) == CHAINED_TX) self.runs.append(gbt_stop - gbt_start) #assert(self.mempool_count() == 0) self.log.info('Mempool size {0}'.format(self.mempool_size)) self.log.info('Send Tx took {0:.5f}s'.format(self.mempool_send)) if len(self.runs) > 1: self.log.info('run times {}'.format(self.runs)) self.log.info('GetBlkT took {0:.5f}s'.format(sum(self.runs)/len(self.runs))) if __name__ == '__main__': ChainedTest().main()
flexible
{ "blob_id": "661eef8500309191514fd760b7518014dee2bb5f", "index": 9225, "step-1": "<mask token>\n\n\nclass ChainedTest(BitcoinTestFramework):\n\n def set_test_params(self):\n \"\"\" our test network requires a peer node so that getblocktemplate succeeds \"\"\"\n self.num_nodes = 2\n chained_args = ['-limitancestorcount=2000',\n '-limitdescendantcount=2000', '-limitancestorsize=1000',\n '-limitdescendantsize=1000', \n '-magneticanomalyactivationtime=%d' % MAGNETIC_ANOMALY_START_TIME]\n config_node2 = chained_args.copy()\n if DEBUG_MODE:\n chained_args.append(DEBUG_MODE)\n self.extra_args = [chained_args, config_node2]\n\n def chain_transaction(self, node, parent_txid, vout, value, fee,\n num_outputs):\n send_value = satoshi_round((value - fee) / num_outputs)\n inputs = [{'txid': parent_txid, 'vout': vout}]\n outputs = {}\n for i in range(num_outputs):\n outputs[node.getnewaddress()] = send_value\n rawtx = node.createrawtransaction(inputs, outputs)\n signedtx = node.signrawtransaction(rawtx)\n sendtx_start = time.perf_counter()\n new_txid = node.sendrawtransaction(signedtx['hex'])\n sendtx_stop = time.perf_counter()\n fulltx = node.getrawtransaction(new_txid, 1)\n assert len(fulltx['vout']) == num_outputs\n return new_txid, send_value, sendtx_stop - sendtx_start, fulltx['size']\n\n def mine_blocks(self):\n \"\"\" Mine some blocks and have them mature. \"\"\"\n self.nodes[0].generate(101)\n self.utxo = self.nodes[0].listunspent(10)\n self.txid = self.utxo[0]['txid']\n self.coinbasetx = self.txid\n self.vout = self.utxo[0]['vout']\n self.value = self.utxo[0]['amount']\n self.fee = Decimal('0.0001')\n self.tip = int('0x' + self.nodes[0].getbestblockhash(), 0)\n self.block_time = int(time.time()) + 1\n\n def send_chain_to_node(self):\n \"\"\" Generates tx chain and send it to node \"\"\"\n for i in range(CHAINED_TX):\n sent_txid, sent_value, this_sendtx, tx_size = (self.\n chain_transaction(self.nodes[0], self.txid, 0, self.value,\n self.fee, 1))\n if not self.chain_top:\n self.chain_top = sent_txid\n self.txid = sent_txid\n self.value = sent_value\n self.chain.append(sent_txid)\n self.mempool_send += this_sendtx\n self.mempool_size += tx_size\n\n def create_new_block(self):\n \"\"\" Create a new block with an anyone-can-spend coinbase \"\"\"\n block = create_block(self.tip, create_coinbase(self.height), self.\n block_time)\n self.block_time += 1\n block.solve()\n return block\n <mask token>\n <mask token>\n\n def run_test(self):\n self.log.info('Starting Test with {0} Chained Transactions'.format(\n CHAINED_TX))\n self.chain_top = None\n self.mine_blocks()\n self.mempool_send = 0\n self.mempool_size = 0\n self.chain = []\n self.send_chain_to_node()\n assert self.mempool_count() == CHAINED_TX\n mempool = self.nodes[0].getrawmempool(True)\n self.log.info('tx at top has {} descendants'.format(mempool[self.\n chain_top]['descendantcount']))\n assert mempool[self.chain_top]['descendantcount'] == CHAINED_TX\n self.height = 1\n self.block1 = self.create_new_block()\n self.tip = self.block1.sha256\n self.height += 1\n for i in range(100):\n block = self.create_new_block()\n self.tip = block.sha256\n self.height += 1\n self.runs = []\n for test_iteration in range(TEST_ITERATIONS):\n gbt_start = time.perf_counter()\n templat = self.nodes[0].getblocktemplate()\n gbt_stop = time.perf_counter()\n assert len(templat['transactions']) == CHAINED_TX\n self.runs.append(gbt_stop - gbt_start)\n self.log.info('Mempool size {0}'.format(self.mempool_size))\n self.log.info('Send Tx took {0:.5f}s'.format(self.mempool_send))\n if len(self.runs) > 1:\n self.log.info('run times {}'.format(self.runs))\n self.log.info('GetBlkT took {0:.5f}s'.format(sum(self.runs) / len(\n self.runs)))\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass ChainedTest(BitcoinTestFramework):\n\n def set_test_params(self):\n \"\"\" our test network requires a peer node so that getblocktemplate succeeds \"\"\"\n self.num_nodes = 2\n chained_args = ['-limitancestorcount=2000',\n '-limitdescendantcount=2000', '-limitancestorsize=1000',\n '-limitdescendantsize=1000', \n '-magneticanomalyactivationtime=%d' % MAGNETIC_ANOMALY_START_TIME]\n config_node2 = chained_args.copy()\n if DEBUG_MODE:\n chained_args.append(DEBUG_MODE)\n self.extra_args = [chained_args, config_node2]\n\n def chain_transaction(self, node, parent_txid, vout, value, fee,\n num_outputs):\n send_value = satoshi_round((value - fee) / num_outputs)\n inputs = [{'txid': parent_txid, 'vout': vout}]\n outputs = {}\n for i in range(num_outputs):\n outputs[node.getnewaddress()] = send_value\n rawtx = node.createrawtransaction(inputs, outputs)\n signedtx = node.signrawtransaction(rawtx)\n sendtx_start = time.perf_counter()\n new_txid = node.sendrawtransaction(signedtx['hex'])\n sendtx_stop = time.perf_counter()\n fulltx = node.getrawtransaction(new_txid, 1)\n assert len(fulltx['vout']) == num_outputs\n return new_txid, send_value, sendtx_stop - sendtx_start, fulltx['size']\n\n def mine_blocks(self):\n \"\"\" Mine some blocks and have them mature. \"\"\"\n self.nodes[0].generate(101)\n self.utxo = self.nodes[0].listunspent(10)\n self.txid = self.utxo[0]['txid']\n self.coinbasetx = self.txid\n self.vout = self.utxo[0]['vout']\n self.value = self.utxo[0]['amount']\n self.fee = Decimal('0.0001')\n self.tip = int('0x' + self.nodes[0].getbestblockhash(), 0)\n self.block_time = int(time.time()) + 1\n\n def send_chain_to_node(self):\n \"\"\" Generates tx chain and send it to node \"\"\"\n for i in range(CHAINED_TX):\n sent_txid, sent_value, this_sendtx, tx_size = (self.\n chain_transaction(self.nodes[0], self.txid, 0, self.value,\n self.fee, 1))\n if not self.chain_top:\n self.chain_top = sent_txid\n self.txid = sent_txid\n self.value = sent_value\n self.chain.append(sent_txid)\n self.mempool_send += this_sendtx\n self.mempool_size += tx_size\n\n def create_new_block(self):\n \"\"\" Create a new block with an anyone-can-spend coinbase \"\"\"\n block = create_block(self.tip, create_coinbase(self.height), self.\n block_time)\n self.block_time += 1\n block.solve()\n return block\n <mask token>\n\n def dumppool(self, mempool):\n \"\"\" Show list of chained tx in mempool with parent(depends) \"\"\"\n\n def sortdepends(e):\n return e['descendantcount']\n sortedlist = [[k, v] for k, v in mempool.items()]\n sortedlist = sorted(sortedlist, key=lambda l: l[1][\n 'descendantcount'], reverse=True)\n for memkv in sortedlist:\n memtx = memkv[1]\n self.log.info('{} {} {}'.format(memkv[0], memtx[\n 'descendantcount'], memtx['depends']))\n\n def run_test(self):\n self.log.info('Starting Test with {0} Chained Transactions'.format(\n CHAINED_TX))\n self.chain_top = None\n self.mine_blocks()\n self.mempool_send = 0\n self.mempool_size = 0\n self.chain = []\n self.send_chain_to_node()\n assert self.mempool_count() == CHAINED_TX\n mempool = self.nodes[0].getrawmempool(True)\n self.log.info('tx at top has {} descendants'.format(mempool[self.\n chain_top]['descendantcount']))\n assert mempool[self.chain_top]['descendantcount'] == CHAINED_TX\n self.height = 1\n self.block1 = self.create_new_block()\n self.tip = self.block1.sha256\n self.height += 1\n for i in range(100):\n block = self.create_new_block()\n self.tip = block.sha256\n self.height += 1\n self.runs = []\n for test_iteration in range(TEST_ITERATIONS):\n gbt_start = time.perf_counter()\n templat = self.nodes[0].getblocktemplate()\n gbt_stop = time.perf_counter()\n assert len(templat['transactions']) == CHAINED_TX\n self.runs.append(gbt_stop - gbt_start)\n self.log.info('Mempool size {0}'.format(self.mempool_size))\n self.log.info('Send Tx took {0:.5f}s'.format(self.mempool_send))\n if len(self.runs) > 1:\n self.log.info('run times {}'.format(self.runs))\n self.log.info('GetBlkT took {0:.5f}s'.format(sum(self.runs) / len(\n self.runs)))\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass ChainedTest(BitcoinTestFramework):\n\n def set_test_params(self):\n \"\"\" our test network requires a peer node so that getblocktemplate succeeds \"\"\"\n self.num_nodes = 2\n chained_args = ['-limitancestorcount=2000',\n '-limitdescendantcount=2000', '-limitancestorsize=1000',\n '-limitdescendantsize=1000', \n '-magneticanomalyactivationtime=%d' % MAGNETIC_ANOMALY_START_TIME]\n config_node2 = chained_args.copy()\n if DEBUG_MODE:\n chained_args.append(DEBUG_MODE)\n self.extra_args = [chained_args, config_node2]\n\n def chain_transaction(self, node, parent_txid, vout, value, fee,\n num_outputs):\n send_value = satoshi_round((value - fee) / num_outputs)\n inputs = [{'txid': parent_txid, 'vout': vout}]\n outputs = {}\n for i in range(num_outputs):\n outputs[node.getnewaddress()] = send_value\n rawtx = node.createrawtransaction(inputs, outputs)\n signedtx = node.signrawtransaction(rawtx)\n sendtx_start = time.perf_counter()\n new_txid = node.sendrawtransaction(signedtx['hex'])\n sendtx_stop = time.perf_counter()\n fulltx = node.getrawtransaction(new_txid, 1)\n assert len(fulltx['vout']) == num_outputs\n return new_txid, send_value, sendtx_stop - sendtx_start, fulltx['size']\n\n def mine_blocks(self):\n \"\"\" Mine some blocks and have them mature. \"\"\"\n self.nodes[0].generate(101)\n self.utxo = self.nodes[0].listunspent(10)\n self.txid = self.utxo[0]['txid']\n self.coinbasetx = self.txid\n self.vout = self.utxo[0]['vout']\n self.value = self.utxo[0]['amount']\n self.fee = Decimal('0.0001')\n self.tip = int('0x' + self.nodes[0].getbestblockhash(), 0)\n self.block_time = int(time.time()) + 1\n\n def send_chain_to_node(self):\n \"\"\" Generates tx chain and send it to node \"\"\"\n for i in range(CHAINED_TX):\n sent_txid, sent_value, this_sendtx, tx_size = (self.\n chain_transaction(self.nodes[0], self.txid, 0, self.value,\n self.fee, 1))\n if not self.chain_top:\n self.chain_top = sent_txid\n self.txid = sent_txid\n self.value = sent_value\n self.chain.append(sent_txid)\n self.mempool_send += this_sendtx\n self.mempool_size += tx_size\n\n def create_new_block(self):\n \"\"\" Create a new block with an anyone-can-spend coinbase \"\"\"\n block = create_block(self.tip, create_coinbase(self.height), self.\n block_time)\n self.block_time += 1\n block.solve()\n return block\n\n def mempool_count(self):\n \"\"\" get count of tx in mempool \"\"\"\n mininginfo = self.nodes[0].getmininginfo()\n return mininginfo['pooledtx']\n\n def dumppool(self, mempool):\n \"\"\" Show list of chained tx in mempool with parent(depends) \"\"\"\n\n def sortdepends(e):\n return e['descendantcount']\n sortedlist = [[k, v] for k, v in mempool.items()]\n sortedlist = sorted(sortedlist, key=lambda l: l[1][\n 'descendantcount'], reverse=True)\n for memkv in sortedlist:\n memtx = memkv[1]\n self.log.info('{} {} {}'.format(memkv[0], memtx[\n 'descendantcount'], memtx['depends']))\n\n def run_test(self):\n self.log.info('Starting Test with {0} Chained Transactions'.format(\n CHAINED_TX))\n self.chain_top = None\n self.mine_blocks()\n self.mempool_send = 0\n self.mempool_size = 0\n self.chain = []\n self.send_chain_to_node()\n assert self.mempool_count() == CHAINED_TX\n mempool = self.nodes[0].getrawmempool(True)\n self.log.info('tx at top has {} descendants'.format(mempool[self.\n chain_top]['descendantcount']))\n assert mempool[self.chain_top]['descendantcount'] == CHAINED_TX\n self.height = 1\n self.block1 = self.create_new_block()\n self.tip = self.block1.sha256\n self.height += 1\n for i in range(100):\n block = self.create_new_block()\n self.tip = block.sha256\n self.height += 1\n self.runs = []\n for test_iteration in range(TEST_ITERATIONS):\n gbt_start = time.perf_counter()\n templat = self.nodes[0].getblocktemplate()\n gbt_stop = time.perf_counter()\n assert len(templat['transactions']) == CHAINED_TX\n self.runs.append(gbt_stop - gbt_start)\n self.log.info('Mempool size {0}'.format(self.mempool_size))\n self.log.info('Send Tx took {0:.5f}s'.format(self.mempool_send))\n if len(self.runs) > 1:\n self.log.info('run times {}'.format(self.runs))\n self.log.info('GetBlkT took {0:.5f}s'.format(sum(self.runs) / len(\n self.runs)))\n\n\n<mask token>\n", "step-4": "<mask token>\nif len(sys.argv) > 1:\n CHAINED_TX = int(sys.argv[1])\n<mask token>\nif len(sys.argv) > 2:\n TEST_ITERATIONS = int(sys.argv[2])\n<mask token>\n\n\nclass ChainedTest(BitcoinTestFramework):\n\n def set_test_params(self):\n \"\"\" our test network requires a peer node so that getblocktemplate succeeds \"\"\"\n self.num_nodes = 2\n chained_args = ['-limitancestorcount=2000',\n '-limitdescendantcount=2000', '-limitancestorsize=1000',\n '-limitdescendantsize=1000', \n '-magneticanomalyactivationtime=%d' % MAGNETIC_ANOMALY_START_TIME]\n config_node2 = chained_args.copy()\n if DEBUG_MODE:\n chained_args.append(DEBUG_MODE)\n self.extra_args = [chained_args, config_node2]\n\n def chain_transaction(self, node, parent_txid, vout, value, fee,\n num_outputs):\n send_value = satoshi_round((value - fee) / num_outputs)\n inputs = [{'txid': parent_txid, 'vout': vout}]\n outputs = {}\n for i in range(num_outputs):\n outputs[node.getnewaddress()] = send_value\n rawtx = node.createrawtransaction(inputs, outputs)\n signedtx = node.signrawtransaction(rawtx)\n sendtx_start = time.perf_counter()\n new_txid = node.sendrawtransaction(signedtx['hex'])\n sendtx_stop = time.perf_counter()\n fulltx = node.getrawtransaction(new_txid, 1)\n assert len(fulltx['vout']) == num_outputs\n return new_txid, send_value, sendtx_stop - sendtx_start, fulltx['size']\n\n def mine_blocks(self):\n \"\"\" Mine some blocks and have them mature. \"\"\"\n self.nodes[0].generate(101)\n self.utxo = self.nodes[0].listunspent(10)\n self.txid = self.utxo[0]['txid']\n self.coinbasetx = self.txid\n self.vout = self.utxo[0]['vout']\n self.value = self.utxo[0]['amount']\n self.fee = Decimal('0.0001')\n self.tip = int('0x' + self.nodes[0].getbestblockhash(), 0)\n self.block_time = int(time.time()) + 1\n\n def send_chain_to_node(self):\n \"\"\" Generates tx chain and send it to node \"\"\"\n for i in range(CHAINED_TX):\n sent_txid, sent_value, this_sendtx, tx_size = (self.\n chain_transaction(self.nodes[0], self.txid, 0, self.value,\n self.fee, 1))\n if not self.chain_top:\n self.chain_top = sent_txid\n self.txid = sent_txid\n self.value = sent_value\n self.chain.append(sent_txid)\n self.mempool_send += this_sendtx\n self.mempool_size += tx_size\n\n def create_new_block(self):\n \"\"\" Create a new block with an anyone-can-spend coinbase \"\"\"\n block = create_block(self.tip, create_coinbase(self.height), self.\n block_time)\n self.block_time += 1\n block.solve()\n return block\n\n def mempool_count(self):\n \"\"\" get count of tx in mempool \"\"\"\n mininginfo = self.nodes[0].getmininginfo()\n return mininginfo['pooledtx']\n\n def dumppool(self, mempool):\n \"\"\" Show list of chained tx in mempool with parent(depends) \"\"\"\n\n def sortdepends(e):\n return e['descendantcount']\n sortedlist = [[k, v] for k, v in mempool.items()]\n sortedlist = sorted(sortedlist, key=lambda l: l[1][\n 'descendantcount'], reverse=True)\n for memkv in sortedlist:\n memtx = memkv[1]\n self.log.info('{} {} {}'.format(memkv[0], memtx[\n 'descendantcount'], memtx['depends']))\n\n def run_test(self):\n self.log.info('Starting Test with {0} Chained Transactions'.format(\n CHAINED_TX))\n self.chain_top = None\n self.mine_blocks()\n self.mempool_send = 0\n self.mempool_size = 0\n self.chain = []\n self.send_chain_to_node()\n assert self.mempool_count() == CHAINED_TX\n mempool = self.nodes[0].getrawmempool(True)\n self.log.info('tx at top has {} descendants'.format(mempool[self.\n chain_top]['descendantcount']))\n assert mempool[self.chain_top]['descendantcount'] == CHAINED_TX\n self.height = 1\n self.block1 = self.create_new_block()\n self.tip = self.block1.sha256\n self.height += 1\n for i in range(100):\n block = self.create_new_block()\n self.tip = block.sha256\n self.height += 1\n self.runs = []\n for test_iteration in range(TEST_ITERATIONS):\n gbt_start = time.perf_counter()\n templat = self.nodes[0].getblocktemplate()\n gbt_stop = time.perf_counter()\n assert len(templat['transactions']) == CHAINED_TX\n self.runs.append(gbt_stop - gbt_start)\n self.log.info('Mempool size {0}'.format(self.mempool_size))\n self.log.info('Send Tx took {0:.5f}s'.format(self.mempool_send))\n if len(self.runs) > 1:\n self.log.info('run times {}'.format(self.runs))\n self.log.info('GetBlkT took {0:.5f}s'.format(sum(self.runs) / len(\n self.runs)))\n\n\nif __name__ == '__main__':\n ChainedTest().main()\n", "step-5": "#!/usr/bin/env python3\n# Copyright (c) 2018 Nobody\n# Distributed under the MIT software license, see the accompanying\n# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n\"\"\"Test perforance of descendant package (chained transactions)\"\"\"\nimport time\nimport copy\nfrom test_framework.test_framework import BitcoinTestFramework\nfrom test_framework.util import *\nfrom test_framework.mininode import COIN\nfrom test_framework.blocktools import *\n\n\"\"\"Read optional arguments from command line\"\"\"\nCHAINED_TX = 25\nif len(sys.argv)>1:\n CHAINED_TX = int(sys.argv[1])\nTEST_ITERATIONS = 1\nif len(sys.argv)>2:\n TEST_ITERATIONS = int(sys.argv[2])\nDEBUG_MODE = '-printtoconsole'\n\nMAX_ANCESTORS = CHAINED_TX\nMAX_DESCENDANTS = CHAINED_TX\n\nMAGNETIC_ANOMALY_START_TIME = 2000000000\n\nclass ChainedTest(BitcoinTestFramework):\n\n def set_test_params(self):\n ''' our test network requires a peer node so that getblocktemplate succeeds '''\n self.num_nodes = 2\n chained_args = [\"-limitancestorcount=2000\", \"-limitdescendantcount=2000\",\n \"-limitancestorsize=1000\", \"-limitdescendantsize=1000\",\n \"-magneticanomalyactivationtime=%d\" % MAGNETIC_ANOMALY_START_TIME\n ]\n config_node2 = chained_args.copy()\n if DEBUG_MODE:\n chained_args.append(DEBUG_MODE)\n self.extra_args = [chained_args, config_node2]\n\n # Build a transaction that spends parent_txid:vout\n # Return amount sent\n def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):\n send_value = satoshi_round((value - fee) / num_outputs)\n inputs = [{'txid': parent_txid, 'vout': vout}]\n outputs = {}\n for i in range(num_outputs):\n outputs[node.getnewaddress()] = send_value\n rawtx = node.createrawtransaction(inputs, outputs)\n signedtx = node.signrawtransaction(rawtx)\n\n #measure the performance of sending the raw transaction to the node\n sendtx_start = time.perf_counter()\n new_txid = node.sendrawtransaction(signedtx['hex'])\n sendtx_stop = time.perf_counter()\n fulltx = node.getrawtransaction(new_txid, 1)\n\n #self.log.info('{0} => {1}'.format(parent_txid, fulltx['vout'][0]))\n\n # make sure we didn't generate a change output\n assert(len(fulltx['vout']) == num_outputs)\n return (new_txid, send_value, sendtx_stop - sendtx_start, fulltx['size'])\n\n def mine_blocks(self):\n ''' Mine some blocks and have them mature. '''\n self.nodes[0].generate(101)\n self.utxo = self.nodes[0].listunspent(10)\n self.txid = self.utxo[0]['txid']\n self.coinbasetx = self.txid\n self.vout = self.utxo[0]['vout']\n self.value = self.utxo[0]['amount']\n self.fee = Decimal(\"0.0001\")\n self.tip = int(\"0x\" + self.nodes[0].getbestblockhash(), 0)\n self.block_time = int(time.time()) + 1\n\n def send_chain_to_node(self):\n ''' Generates tx chain and send it to node '''\n for i in range(CHAINED_TX):\n (sent_txid, sent_value, this_sendtx, tx_size) = self.chain_transaction(\n self.nodes[0], self.txid, 0, self.value, self.fee, 1)\n if not self.chain_top:\n self.chain_top = sent_txid\n self.txid = sent_txid\n self.value = sent_value\n self.chain.append(sent_txid)\n self.mempool_send += this_sendtx\n self.mempool_size += tx_size\n\n def create_new_block(self):\n ''' Create a new block with an anyone-can-spend coinbase '''\n block = create_block(\n self.tip, create_coinbase(self.height), self.block_time)\n self.block_time += 1\n block.solve()\n return block\n\n def mempool_count(self):\n ''' get count of tx in mempool '''\n mininginfo = self.nodes[0].getmininginfo()\n return mininginfo['pooledtx']\n\n def dumppool(self, mempool):\n ''' Show list of chained tx in mempool with parent(depends) '''\n def sortdepends(e):\n return e['descendantcount']\n sortedlist = [[k,v] for k,v in mempool.items()]\n sortedlist = sorted(sortedlist, key=lambda l: l[1]['descendantcount'], reverse=True)\n for memkv in sortedlist:\n memtx = memkv[1]\n self.log.info('{} {} {}'.format(memkv[0], memtx['descendantcount'], memtx['depends']))\n\n def run_test(self):\n self.log.info('Starting Test with {0} Chained Transactions'.format(CHAINED_TX))\n self.chain_top = None\n\n self.mine_blocks()\n\n self.mempool_send = 0\n self.mempool_size = 0\n self.chain = []\n\n self.send_chain_to_node()\n\n # mempool should have all our tx\n assert(self.mempool_count() == CHAINED_TX)\n mempool = self.nodes[0].getrawmempool(True)\n self.log.info('tx at top has {} descendants'.format(mempool[self.chain_top][\"descendantcount\"]))\n assert(mempool[self.chain_top][\"descendantcount\"] == CHAINED_TX)\n\n #self.dumppool(mempool)\n\n self.height = 1\n\n # create new block and save coinbase\n self.block1 = self.create_new_block()\n self.tip = self.block1.sha256\n self.height += 1\n\n #mature the block so we can spend the coinbase\n for i in range(100):\n block = self.create_new_block()\n self.tip = block.sha256\n self.height += 1\n\n #sync pool not needed as long as we are using node 0 which has all the tx we sent to it\n #sync_mempools(self.nodes, wait=1, timeout=100)\n\n self.runs=[]\n for test_iteration in range(TEST_ITERATIONS):\n # do not use perf_counter. use timer from -printtoconsole instead\n gbt_start = time.perf_counter()\n # assemble a block and validate all tx in it\n templat = self.nodes[0].getblocktemplate()\n gbt_stop = time.perf_counter()\n # make sure all tx got mined\n assert(len(templat['transactions']) == CHAINED_TX)\n self.runs.append(gbt_stop - gbt_start)\n\n #assert(self.mempool_count() == 0)\n\n self.log.info('Mempool size {0}'.format(self.mempool_size))\n self.log.info('Send Tx took {0:.5f}s'.format(self.mempool_send))\n if len(self.runs) > 1:\n self.log.info('run times {}'.format(self.runs))\n self.log.info('GetBlkT took {0:.5f}s'.format(sum(self.runs)/len(self.runs)))\n\nif __name__ == '__main__':\n ChainedTest().main()\n", "step-ids": [ 7, 8, 9, 10, 13 ] }
[ 7, 8, 9, 10, 13 ]
# -*- coding: utf-8 -*- ########### SVN repository information ################### # $Date: $ # $Author: $ # $Revision: $ # $URL: $ # $Id: $ ########### SVN repository information ################### ''' *GSASIIfpaGUI: Fundamental Parameters Routines* =============================================== This module contains routines for getting Fundamental Parameters Approach (FPA) input, setting up for running the NIST XRD Fundamental Parameters Code, plotting the convolutors and computing a set of peaks generated by that code. ''' from __future__ import division, print_function import wx import os.path import numpy as np import NIST_profile as FP import GSASIIpath import GSASIIctrlGUI as G2G import GSASIIdataGUI as G2gd import GSASIIplot as G2plt import GSASIImath as G2mth import GSASIIpwd as G2pwd simParms = {} '''Parameters to set range for pattern simulation ''' parmDict = {'numWave':2} '''Parameter dict used for reading Topas-style values. These are converted to SI units and placed into :data:`NISTparms` ''' NISTparms = {} '''Parameters in a nested dict, with an entry for each concolutor. Entries in those dicts have values in SI units (of course). NISTparms can be can be input directly or can be from created from :data:`parmDict` by :func:`XferFPAsettings` ''' BraggBrentanoParms = [ ('divergence', 0.5, 'Bragg-Brentano divergence angle (degrees)'), ('soller_angle', 2.0, 'Soller slit axial divergence (degrees)'), ('Rs', 220, 'Diffractometer radius (mm)'), ('filament_length', 12., 'X-ray tube line focus length (mm)'), ('sample_length', 12., 'Illuminated sample length in axial direction (mm)'), ('receiving_slit_length', 12., 'Length of receiving slit in axial direction (mm)'), ('LAC_cm', 0.,'Linear absorption coef. adjusted for packing density (cm-1)'), ('sample_thickness', 1., 'Depth of sample (mm)'), ('convolution_steps', 8, 'Number of Fourier-space bins per two-theta step'), ('tube-tails_width', 0.04,'Tube filament width, in projection at takeoff angle (mm)'), ('tube-tails_L-tail', -1.,'Left-side tube tails width, in projection (mm)'), ('tube-tails_R-tail', 1.,'Right-side tube tails width, in projection (mm)'), ('tube-tails_rel-I', 0.001,'Tube tails fractional intensity (no units)'), ] '''FPA dict entries used in :func:`MakeTopasFPASizer`. Tuple contains a dict key, a default value and a description. These are the parameters needed for all Bragg Brentano instruments ''' BBPointDetector = [ ('receiving_slit_width', 0.2, 'Width of receiving slit (mm)'),] '''Additional FPA dict entries used in :func:`MakeTopasFPASizer` needed for Bragg Brentano instruments with point detectors. ''' BBPSDDetector = [ ('lpsd_th2_angular_range', 3.0, 'Angular range observed by PSD (degrees 2Theta)'), ('lpsd_equitorial_divergence', 0.1, 'Equatorial divergence of the primary beam (degrees)'),] '''Additional FPA dict entries used in :func:`MakeTopasFPASizer` needed for Bragg Brentano instruments with linear (1-D) PSD detectors. ''' Citation = '''MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014. ''' def SetCu2Wave(): '''Set the parameters to the two-line Cu K alpha 1+2 spectrum ''' parmDict['wave'] = {i:v for i,v in enumerate((1.540596,1.544493))} parmDict['int'] = {i:v for i,v in enumerate((0.653817, 0.346183))} parmDict['lwidth'] = {i:v for i,v in enumerate((0.501844,0.626579))} SetCu2Wave() # use these as default def MakeTopasFPASizer(G2frame,FPdlg,mode,SetButtonStatus): '''Create a GUI with parameters for the NIST XRD Fundamental Parameters Code. Parameter input is modeled after Topas input parameters. :param wx.Window FPdlg: Frame or Dialog where GUI will appear :param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or (linear) position sensitive detector :param dict parmDict: dict to place parameters. If empty, default values from globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in the array. :returns: a sizer with the GUI controls ''' def _onOK(event): XferFPAsettings(parmDict) SetButtonStatus(done=True) # done=True triggers the simulation FPdlg.Destroy() def _onClose(event): SetButtonStatus() FPdlg.Destroy() def _onAddWave(event): parmDict['numWave'] += 1 wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus) def _onRemWave(event): parmDict['numWave'] -= 1 wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus) def _onSetCu5Wave(event): parmDict['wave'] = {i:v for i,v in enumerate((1.534753,1.540596,1.541058,1.54441,1.544721))} parmDict['int'] = {i:v for i,v in enumerate((0.0159, 0.5791, 0.0762, 0.2417, 0.0871))} parmDict['lwidth'] = {i:v for i,v in enumerate((3.6854, 0.437, 0.6, 0.52, 0.62))} parmDict['numWave'] = 5 wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus) def _onSetCu2Wave(event): SetCu2Wave() parmDict['numWave'] = 2 wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus) def _onSetPoint(event): wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,'BBpoint',SetButtonStatus) def _onSetPSD(event): wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,'BBPSD',SetButtonStatus) def PlotTopasFPA(event): XferFPAsettings(parmDict) ttArr = np.arange(max(0.5, simParms['plotpos']-simParms['calcwid']), simParms['plotpos']+simParms['calcwid'], simParms['step']) intArr = np.zeros_like(ttArr) NISTpk = setupFPAcalc() try: center_bin_idx,peakObj = doFPAcalc( NISTpk,ttArr,simParms['plotpos'],simParms['calcwid'], simParms['step']) except Exception as err: msg = "Error computing convolution, revise input" print(msg) print(err) return G2plt.PlotFPAconvolutors(G2frame,NISTpk) pkPts = len(peakObj.peak) pkMax = peakObj.peak.max() startInd = center_bin_idx-(pkPts//2) #this should be the aligned start of the new data # scale peak so max I=10,000 and add into intensity array if startInd < 0: intArr[:startInd+pkPts] += 10000 * peakObj.peak[-startInd:]/pkMax elif startInd > len(intArr): return elif startInd+pkPts >= len(intArr): offset = pkPts - len( intArr[startInd:] ) intArr[startInd:startInd+pkPts-offset] += 10000 * peakObj.peak[:-offset]/pkMax else: intArr[startInd:startInd+pkPts] += 10000 * peakObj.peak/pkMax G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX=r'$2\theta, deg$', labelY=r'Intensity (arbitrary)', Title='FPA peak', newPlot=True, lines=True) if FPdlg.GetSizer(): FPdlg.GetSizer().Clear(True) numWave = parmDict['numWave'] if mode == 'BBpoint': itemList = BraggBrentanoParms+BBPointDetector elif mode == 'BBPSD': itemList = BraggBrentanoParms+BBPSDDetector else: raise Exception('Unknown mode in MakeTopasFPASizer: '+mode) MainSizer = wx.BoxSizer(wx.VERTICAL) MainSizer.Add((-1,5)) waveSizer = wx.FlexGridSizer(cols=numWave+1,hgap=3,vgap=5) for lbl,prm,defVal in zip( (u'Wavelength (\u212b)','Rel. Intensity',u'Lorentz Width\n(\u212b/1000)'), ('wave','int','lwidth'), (0.0, 1.0, 0.1), ): text = wx.StaticText(FPdlg,wx.ID_ANY,lbl,style=wx.ALIGN_CENTER) text.SetBackgroundColour(wx.WHITE) waveSizer.Add(text,0,wx.EXPAND) if prm not in parmDict: parmDict[prm] = {} for i in range(numWave): if i not in parmDict[prm]: parmDict[prm][i] = defVal ctrl = G2G.ValidatedTxtCtrl(FPdlg,parmDict[prm],i,size=(90,-1)) waveSizer.Add(ctrl,1,wx.ALIGN_CENTER_VERTICAL,1) MainSizer.Add(waveSizer) MainSizer.Add((-1,5)) btnsizer = wx.BoxSizer(wx.HORIZONTAL) btn = wx.Button(FPdlg, wx.ID_ANY,'Add col') btnsizer.Add(btn) btn.Bind(wx.EVT_BUTTON,_onAddWave) btn = wx.Button(FPdlg, wx.ID_ANY,'Remove col') btnsizer.Add(btn) btn.Bind(wx.EVT_BUTTON,_onRemWave) btn = wx.Button(FPdlg, wx.ID_ANY,'CuKa1+2') btnsizer.Add(btn) btn.Bind(wx.EVT_BUTTON,_onSetCu2Wave) btn = wx.Button(FPdlg, wx.ID_ANY,'CuKa-5wave') btnsizer.Add(btn) btn.Bind(wx.EVT_BUTTON,_onSetCu5Wave) MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0) MainSizer.Add((-1,5)) btnsizer = wx.BoxSizer(wx.HORIZONTAL) btn = wx.Button(FPdlg, wx.ID_ANY,'Point Dect.') btn.Enable(not mode == 'BBpoint') btnsizer.Add(btn) btn.Bind(wx.EVT_BUTTON,_onSetPoint) btn = wx.Button(FPdlg, wx.ID_ANY,'PSD') btn.Enable(not mode == 'BBPSD') btnsizer.Add(btn) btn.Bind(wx.EVT_BUTTON,_onSetPSD) MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0) MainSizer.Add((-1,5)) prmSizer = wx.FlexGridSizer(cols=3,hgap=3,vgap=5) text = wx.StaticText(FPdlg,wx.ID_ANY,'label',style=wx.ALIGN_CENTER) text.SetBackgroundColour(wx.WHITE) prmSizer.Add(text,0,wx.EXPAND) text = wx.StaticText(FPdlg,wx.ID_ANY,'value',style=wx.ALIGN_CENTER) text.SetBackgroundColour(wx.WHITE) prmSizer.Add(text,0,wx.EXPAND) text = wx.StaticText(FPdlg,wx.ID_ANY,'explanation',style=wx.ALIGN_CENTER) text.SetBackgroundColour(wx.WHITE) prmSizer.Add(text,0,wx.EXPAND) for lbl,defVal,text in itemList: prmSizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,lbl),1,wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL,1) if lbl not in parmDict: parmDict[lbl] = defVal ctrl = G2G.ValidatedTxtCtrl(FPdlg,parmDict,lbl,size=(70,-1)) prmSizer.Add(ctrl,1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,1) txt = wx.StaticText(FPdlg,wx.ID_ANY,text,size=(400,-1)) txt.Wrap(380) prmSizer.Add(txt) MainSizer.Add(prmSizer) MainSizer.Add((-1,4),1,wx.EXPAND,1) btnsizer = wx.BoxSizer(wx.HORIZONTAL) btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak') btnsizer.Add(btn) btn.Bind(wx.EVT_BUTTON,PlotTopasFPA) btnsizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,' at ')) if 'plotpos' not in simParms: simParms['plotpos'] = simParms['minTT'] ctrl = G2G.ValidatedTxtCtrl(FPdlg,simParms,'plotpos',size=(70,-1)) btnsizer.Add(ctrl) btnsizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,' deg.')) MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0) MainSizer.Add((-1,4),1,wx.EXPAND,1) btnsizer = wx.BoxSizer(wx.HORIZONTAL) OKbtn = wx.Button(FPdlg, wx.ID_OK) OKbtn.SetDefault() btnsizer.Add(OKbtn) Cbtn = wx.Button(FPdlg, wx.ID_CLOSE,"Cancel") btnsizer.Add(Cbtn) MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0) MainSizer.Add((-1,4),1,wx.EXPAND,1) # bindings for close of window OKbtn.Bind(wx.EVT_BUTTON,_onOK) Cbtn.Bind(wx.EVT_BUTTON,_onClose) FPdlg.SetSizer(MainSizer) MainSizer.Layout() MainSizer.Fit(FPdlg) FPdlg.SetMinSize(FPdlg.GetSize()) FPdlg.SendSizeEvent() def XferFPAsettings(InpParms): '''convert Topas-type parameters to SI units for NIST and place in a dict sorted according to use in each convoluter :param dict InpParms: a dict with Topas-like parameters, as set in :func:`MakeTopasFPASizer` :returns: a nested dict with global parameters and those for each convolution ''' wavenums = range(InpParms['numWave']) source_wavelengths_m = 1.e-10 * np.array([InpParms['wave'][i] for i in wavenums]) la = [InpParms['int'][i] for i in wavenums] source_intensities = np.array(la)/max(la) source_lor_widths_m = 1.e-10 * 1.e-3 * np.array([InpParms['lwidth'][i] for i in wavenums]) source_gauss_widths_m = 1.e-10 * 1.e-3 * np.array([0.001 for i in wavenums]) NISTparms["emission"] = {'emiss_wavelengths' : source_wavelengths_m, 'emiss_intensities' : source_intensities, 'emiss_gauss_widths' : source_gauss_widths_m, 'emiss_lor_widths' : source_lor_widths_m, 'crystallite_size_gauss' : 1.e-9 * InpParms.get('Size_G',1e6), 'crystallite_size_lor' : 1.e-9 * InpParms.get('Size_L',1e6)} if InpParms['filament_length'] == InpParms['receiving_slit_length']: # workaround: InpParms['receiving_slit_length'] *= 1.00001 # avoid bug when slit lengths are identical NISTparms["axial"] = { 'axDiv':"full", 'slit_length_source' : 1e-3*InpParms['filament_length'], 'slit_length_target' : 1e-3*InpParms['receiving_slit_length'], 'length_sample' : 1e-3 * InpParms['sample_length'], 'n_integral_points' : 10, 'angI_deg' : InpParms['soller_angle'], 'angD_deg': InpParms['soller_angle'] } if InpParms.get('LAC_cm',0) > 0: NISTparms["absorption"] = { 'absorption_coefficient': InpParms['LAC_cm']*100, #like LaB6, in m^(-1) 'sample_thickness': 1e-3 * InpParms['sample_thickness'], } elif "absorption" in NISTparms: del NISTparms["absorption"] if InpParms.get('lpsd_equitorial_divergence',0) > 0 and InpParms.get( 'lpsd_th2_angular_range',0) > 0: PSDdetector_length_mm=np.arcsin(np.pi*InpParms['lpsd_th2_angular_range']/180. )*InpParms['Rs'] # mm NISTparms["si_psd"] = { 'equatorial_divergence_deg': InpParms['lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.,PSDdetector_length_mm/1000.) } elif "si_psd" in NISTparms: del NISTparms["si_psd"] if InpParms.get('Specimen_Displacement'): NISTparms["displacement"] = {'specimen_displacement': 1e-3 * InpParms['Specimen_Displacement']} elif "displacement" in NISTparms: del NISTparms["displacement"] if InpParms.get('receiving_slit_width'): NISTparms["receiver_slit"] = {'slit_width':1e-3*InpParms['receiving_slit_width']} elif "receiver_slit" in NISTparms: del NISTparms["receiver_slit"] if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get( 'tube-tails_rel-I',0) > 0: NISTparms["tube_tails"] = { 'main_width' : 1e-3 * InpParms.get('tube-tails_width', 0.), 'tail_left' : -1e-3 * InpParms.get('tube-tails_L-tail',0.), 'tail_right' : 1e-3 * InpParms.get('tube-tails_R-tail',0.), 'tail_intens' : InpParms.get('tube-tails_rel-I',0.),} elif "tube_tails" in NISTparms: del NISTparms["tube_tails"] # set Global parameters max_wavelength = source_wavelengths_m[np.argmax(source_intensities)] NISTparms[""] = { 'equatorial_divergence_deg' : InpParms['divergence'], 'dominant_wavelength' : max_wavelength, 'diffractometer_radius' : 1e-3* InpParms['Rs'], 'oversampling' : InpParms['convolution_steps'], } def setupFPAcalc(): '''Create a peak profile object using the NIST XRD Fundamental Parameters Code. :returns: a profile object that can provide information on each convolution or compute the composite peak shape. ''' p=FP.FP_profile(anglemode="twotheta", output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get('oversampling',10)) p.debug_cache=False #set parameters for each convolver for key in NISTparms: if key: p.set_parameters(convolver=key,**NISTparms[key]) else: p.set_parameters(**NISTparms[key]) return p def doFPAcalc(NISTpk,ttArr,twotheta,calcwid,step): '''Compute a single peak using a NIST profile object :param object NISTpk: a peak profile computational object from the NIST XRD Fundamental Parameters Code, typically established from a call to :func:`SetupFPAcalc` :param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees) :param float twotheta: nominal center of peak (degrees) :param float calcwid: width to perform convolution (degrees) :param float step: step size ''' # find closest point to twotheta (may be outside limits of the array) center_bin_idx=min(ttArr.searchsorted(twotheta),len(ttArr)-1) NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step, twotheta_window_center_deg=ttArr[center_bin_idx], twotheta_approx_window_fullwidth_deg=calcwid, ) NISTpk.set_parameters(twotheta0_deg=twotheta) return center_bin_idx,NISTpk.compute_line_profile() def MakeSimSizer(G2frame, dlg): '''Create a GUI to get simulation with parameters for Fundamental Parameters fitting. :param wx.Window dlg: Frame or Dialog where GUI will appear :returns: a sizer with the GUI controls ''' def _onOK(event): msg = '' if simParms['minTT']-simParms['calcwid']/1.5 < 0.1: msg += 'First peak minus half the calc width is too low' if simParms['maxTT']+simParms['calcwid']/1.5 > 175: if msg: msg += '\n' msg += 'Last peak plus half the calc width is too high' if simParms['npeaks'] < 8: if msg: msg += '\n' msg += 'At least 8 peaks are needed' if msg: G2G.G2MessageBox(dlg,msg,'Bad input, try again') return # compute "obs" pattern ttArr = np.arange(max(0.5, simParms['minTT']-simParms['calcwid']/1.5), simParms['maxTT']+simParms['calcwid']/1.5, simParms['step']) intArr = np.zeros_like(ttArr) peaklist = np.linspace(simParms['minTT'],simParms['maxTT'], simParms['npeaks'],endpoint=True) peakSpacing = (peaklist[-1]-peaklist[0])/(len(peaklist)-1) NISTpk = setupFPAcalc() minPtsHM = len(intArr) # initialize points above half-max maxPtsHM = 0 for num,twoth_peak in enumerate(peaklist): try: center_bin_idx,peakObj = doFPAcalc( NISTpk,ttArr,twoth_peak,simParms['calcwid'], simParms['step']) except: if msg: msg += '\n' msg = "Error computing convolution, revise input" continue if num == 0: G2plt.PlotFPAconvolutors(G2frame,NISTpk) pkMax = peakObj.peak.max() pkPts = len(peakObj.peak) minPtsHM = min(minPtsHM,sum(peakObj.peak >= 0.5*pkMax)) # points above half-max maxPtsHM = max(maxPtsHM,sum(peakObj.peak >= 0.5*pkMax)) # points above half-max startInd = center_bin_idx-(pkPts//2) #this should be the aligned start of the new data # scale peak so max I=10,000 and add into intensity array if startInd < 0: intArr[:startInd+pkPts] += 10000 * peakObj.peak[-startInd:]/pkMax elif startInd > len(intArr): break elif startInd+pkPts >= len(intArr): offset = pkPts - len( intArr[startInd:] ) intArr[startInd:startInd+pkPts-offset] += 10000 * peakObj.peak[:-offset]/pkMax else: intArr[startInd:startInd+pkPts] += 10000 * peakObj.peak/pkMax # check if peaks are too closely spaced if maxPtsHM*simParms['step'] > peakSpacing/4: if msg: msg += '\n' msg += 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'.format( maxPtsHM*simParms['step'], peakSpacing) # check if too few points across Hmax if minPtsHM < 10: if msg: msg += '\n' msg += 'There are only {} points above the half-max. 10 are needed. Dropping step size.'.format(minPtsHM) simParms['step'] *= 0.5 if msg: G2G.G2MessageBox(dlg,msg,'Bad input, try again') wx.CallAfter(MakeSimSizer,G2frame, dlg) return # pattern has been computed successfully dlg.Destroy() wx.CallAfter(FitFPApeaks,ttArr, intArr, peaklist, maxPtsHM) # do peakfit outside event callback def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM): '''Perform a peak fit to the FP simulated pattern ''' plswait = wx.Dialog(G2frame,style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER) vbox = wx.BoxSizer(wx.VERTICAL) vbox.Add((1,1),1,wx.ALL|wx.EXPAND,1) txt = wx.StaticText(plswait,wx.ID_ANY, 'Fitting peaks...\nPlease wait...', style=wx.ALIGN_CENTER) vbox.Add(txt,0,wx.ALL|wx.EXPAND) vbox.Add((1,1),1,wx.ALL|wx.EXPAND,1) plswait.SetSizer(vbox) plswait.Layout() plswait.CenterOnParent() plswait.Show() # post "please wait" wx.BeginBusyCursor() # pick out one or two most intense wavelengths ints = list(NISTparms['emission']['emiss_intensities']) Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)]*1e10 if len(ints) > 1: ints[np.argmax(ints)] = -1 Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)]*1e10 else: Lam2 = None histId = G2frame.AddSimulatedPowder(ttArr,intArr, 'NIST Fundamental Parameters simulation', Lam1,Lam2) controls = G2frame.GPXtree.GetItemPyData( G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Controls')) controldat = controls.get('data', {'deriv type':'analytic','min dM/M':0.001,}) #fil Parms,Parms2 = G2frame.GPXtree.GetItemPyData( G2gd.GetGPXtreeItemId(G2frame,histId,'Instrument Parameters')) peakData = G2frame.GPXtree.GetItemPyData( G2gd.GetGPXtreeItemId(G2frame,histId,'Peak List')) # set background to 0 with one term = 0; disable refinement bkg1,bkg2 = bkg = G2frame.GPXtree.GetItemPyData( G2gd.GetGPXtreeItemId(G2frame,histId,'Background')) bkg1[1]=False bkg1[2]=0 bkg1[3]=0.0 limits = G2frame.GPXtree.GetItemPyData( G2gd.GetGPXtreeItemId(G2frame,histId,'Limits')) # approximate asym correction try: Parms['SH/L'][1] = 0.25 * ( NISTparms['axial']['length_sample']+ NISTparms['axial']['slit_length_source'] ) / NISTparms['']['diffractometer_radius'] except: pass for pos in peaklist: i = ttArr.searchsorted(pos) area = sum(intArr[max(0,i-maxPtsHM):min(len(intArr),i+maxPtsHM)]) peakData['peaks'].append(G2mth.setPeakparms(Parms,Parms2,pos,area)) histData = G2frame.GPXtree.GetItemPyData(histId) # refine peak positions only bxye = np.zeros(len(histData[1][1])) peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'], bkg,limits[1], Parms,Parms2,histData[1],bxye,[], False,controldat,None)[0] # refine peak areas as well for pk in peakData['peaks']: pk[1] = True peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'], bkg,limits[1], Parms,Parms2,histData[1],bxye,[], False,controldat)[0] # refine profile function for p in ('U', 'V', 'W', 'X', 'Y'): Parms[p][2] = True peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'], bkg,limits[1], Parms,Parms2,histData[1],bxye,[], False,controldat)[0] # add in asymmetry Parms['SH/L'][2] = True peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'], bkg,limits[1], Parms,Parms2,histData[1],bxye,[], False,controldat)[0] # reset "initial" profile for p in Parms: if len(Parms[p]) == 3: Parms[p][0] = Parms[p][1] Parms[p][2] = False wx.EndBusyCursor() plswait.Destroy() # remove "please wait" # save Iparms pth = G2G.GetExportPath(G2frame) fldlg = wx.FileDialog(G2frame, 'Set name to save GSAS-II instrument parameters file', pth, '', 'instrument parameter files (*.instprm)|*.instprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT) try: if fldlg.ShowModal() == wx.ID_OK: filename = fldlg.GetPath() # make sure extension is .instprm filename = os.path.splitext(filename)[0]+'.instprm' File = open(filename,'w') File.write("#GSAS-II instrument parameter file; do not add/delete items!\n") for item in Parms: File.write(item+':'+str(Parms[item][1])+'\n') File.close() print ('Instrument parameters saved to: '+filename) finally: fldlg.Destroy() #GSASIIpath.IPyBreak() def _onClose(event): dlg.Destroy() def SetButtonStatus(done=False): OKbtn.Enable(bool(NISTparms)) saveBtn.Enable(bool(NISTparms)) if done: _onOK(None) def _onSetFPA(event): # Create a non-modal dialog for Topas-style FP input. FPdlg = wx.Dialog(dlg,wx.ID_ANY,'FPA parameters', style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER) MakeTopasFPASizer(G2frame,FPdlg,'BBpoint',SetButtonStatus) FPdlg.CenterOnParent() FPdlg.Raise() FPdlg.Show() def _onSaveFPA(event): filename = G2G.askSaveFile(G2frame,'','.NISTfpa', 'dict of NIST FPA values',dlg) if not filename: return fp = open(filename,'w') fp.write('# parameters to be used in the NIST XRD Fundamental Parameters program\n') fp.write('{\n') for key in sorted(NISTparms): fp.write(" '"+key+"' : "+str(NISTparms[key])+",") if not key: fp.write(' # global parameters') fp.write('\n') fp.write('}\n') fp.close() def _onReadFPA(event): filename = G2G.GetImportFile(G2frame, message='Read file with dict of values for NIST Fundamental Parameters', parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa') if not filename: return if not filename[0]: return try: txt = open(filename[0],'r').read() NISTparms.clear() array = np.array d = eval(txt) NISTparms.update(d) except Exception as err: G2G.G2MessageBox(dlg, u'Error reading file {}:{}\n'.format(filename,err), 'Bad dict input') #GSASIIpath.IPyBreak() SetButtonStatus() if dlg.GetSizer(): dlg.GetSizer().Clear(True) MainSizer = wx.BoxSizer(wx.VERTICAL) MainSizer.Add(wx.StaticText(dlg,wx.ID_ANY, 'Fit Profile Parameters to Peaks from Fundamental Parameters', style=wx.ALIGN_CENTER),0,wx.EXPAND) MainSizer.Add((-1,5)) prmSizer = wx.FlexGridSizer(cols=2,hgap=3,vgap=5) text = wx.StaticText(dlg,wx.ID_ANY,'value',style=wx.ALIGN_CENTER) text.SetBackgroundColour(wx.WHITE) prmSizer.Add(text,0,wx.EXPAND) text = wx.StaticText(dlg,wx.ID_ANY,'explanation',style=wx.ALIGN_CENTER) text.SetBackgroundColour(wx.WHITE) prmSizer.Add(text,0,wx.EXPAND) for key,defVal,text in ( ('minTT',3.,'Location of first peak in 2theta (deg)'), ('maxTT',123.,'Location of last peak in 2theta (deg)'), ('step',0.01,'Pattern step size (deg 2theta)'), ('npeaks',13.,'Number of peaks'), ('calcwid',2.,'Range to compute each peak (deg 2theta)'), ): if key not in simParms: simParms[key] = defVal ctrl = G2G.ValidatedTxtCtrl(dlg,simParms,key,size=(70,-1)) prmSizer.Add(ctrl,1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,1) txt = wx.StaticText(dlg,wx.ID_ANY,text,size=(300,-1)) txt.Wrap(280) prmSizer.Add(txt) MainSizer.Add(prmSizer) btnsizer = wx.BoxSizer(wx.HORIZONTAL) btn = wx.Button(dlg, wx.ID_ANY,'Input FP vals') btnsizer.Add(btn) btn.Bind(wx.EVT_BUTTON,_onSetFPA) saveBtn = wx.Button(dlg, wx.ID_ANY,'Save FPA dict') btnsizer.Add(saveBtn) saveBtn.Bind(wx.EVT_BUTTON,_onSaveFPA) readBtn = wx.Button(dlg, wx.ID_ANY,'Read FPA dict') btnsizer.Add(readBtn) readBtn.Bind(wx.EVT_BUTTON,_onReadFPA) MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0) MainSizer.Add((-1,4),1,wx.EXPAND,1) txt = wx.StaticText(dlg,wx.ID_ANY, 'If you use this, please cite: '+Citation, size=(350,-1)) txt.Wrap(340) MainSizer.Add(txt,0,wx.ALIGN_CENTER) btnsizer = wx.BoxSizer(wx.HORIZONTAL) OKbtn = wx.Button(dlg, wx.ID_OK) OKbtn.SetDefault() btnsizer.Add(OKbtn) Cbtn = wx.Button(dlg, wx.ID_CLOSE,"Cancel") btnsizer.Add(Cbtn) MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0) MainSizer.Add((-1,4),1,wx.EXPAND,1) # bindings for close of window OKbtn.Bind(wx.EVT_BUTTON,_onOK) Cbtn.Bind(wx.EVT_BUTTON,_onClose) SetButtonStatus() dlg.SetSizer(MainSizer) MainSizer.Layout() MainSizer.Fit(dlg) dlg.SetMinSize(dlg.GetSize()) dlg.SendSizeEvent() dlg.Raise() def GetFPAInput(G2frame): dlg = wx.Dialog(G2frame,wx.ID_ANY,'FPA input', style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER) MakeSimSizer(G2frame,dlg) dlg.CenterOnParent() dlg.Show() return
normal
{ "blob_id": "3b1426e0f29093e1e462765bcf1d351a064b9639", "index": 142, "step-1": "<mask token>\n\n\ndef SetCu2Wave():\n \"\"\"Set the parameters to the two-line Cu K alpha 1+2 spectrum\n \"\"\"\n parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}\n parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}\n\n\n<mask token>\n\n\ndef MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):\n \"\"\"Create a GUI with parameters for the NIST XRD Fundamental Parameters Code. \n Parameter input is modeled after Topas input parameters.\n\n :param wx.Window FPdlg: Frame or Dialog where GUI will appear\n :param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or \n (linear) position sensitive detector\n :param dict parmDict: dict to place parameters. If empty, default values from \n globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in \n the array. \n :returns: a sizer with the GUI controls\n \n \"\"\"\n\n def _onOK(event):\n XferFPAsettings(parmDict)\n SetButtonStatus(done=True)\n FPdlg.Destroy()\n\n def _onClose(event):\n SetButtonStatus()\n FPdlg.Destroy()\n\n def _onAddWave(event):\n parmDict['numWave'] += 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onRemWave(event):\n parmDict['numWave'] -= 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu5Wave(event):\n parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,\n 1.541058, 1.54441, 1.544721))}\n parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791, \n 0.0762, 0.2417, 0.0871))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437, \n 0.6, 0.52, 0.62))}\n parmDict['numWave'] = 5\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu2Wave(event):\n SetCu2Wave()\n parmDict['numWave'] = 2\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetPoint(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',\n SetButtonStatus)\n\n def _onSetPSD(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',\n SetButtonStatus)\n\n def PlotTopasFPA(event):\n XferFPAsettings(parmDict)\n ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'\n ]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])\n intArr = np.zeros_like(ttArr)\n NISTpk = setupFPAcalc()\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[\n 'plotpos'], simParms['calcwid'], simParms['step'])\n except Exception as err:\n msg = 'Error computing convolution, revise input'\n print(msg)\n print(err)\n return\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkPts = len(peakObj.peak)\n pkMax = peakObj.peak.max()\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n return\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[\n :-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax\n G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\\\theta, deg$',\n labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,\n lines=True)\n if FPdlg.GetSizer():\n FPdlg.GetSizer().Clear(True)\n numWave = parmDict['numWave']\n if mode == 'BBpoint':\n itemList = BraggBrentanoParms + BBPointDetector\n elif mode == 'BBPSD':\n itemList = BraggBrentanoParms + BBPSDDetector\n else:\n raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add((-1, 5))\n waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)\n for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',\n u'Lorentz Width\\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)\n ):\n text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n waveSizer.Add(text, 0, wx.EXPAND)\n if prm not in parmDict:\n parmDict[prm] = {}\n for i in range(numWave):\n if i not in parmDict[prm]:\n parmDict[prm][i] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))\n waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)\n MainSizer.Add(waveSizer)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onAddWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onRemWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')\n btn.Enable(not mode == 'BBpoint')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPoint)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')\n btn.Enable(not mode == 'BBPSD')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPSD)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER\n )\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for lbl, defVal, text in itemList:\n prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.\n ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)\n if lbl not in parmDict:\n parmDict[lbl] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))\n txt.Wrap(380)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))\n if 'plotpos' not in simParms:\n simParms['plotpos'] = simParms['minTT']\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))\n btnsizer.Add(ctrl)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(FPdlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n FPdlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(FPdlg)\n FPdlg.SetMinSize(FPdlg.GetSize())\n FPdlg.SendSizeEvent()\n\n\ndef XferFPAsettings(InpParms):\n \"\"\"convert Topas-type parameters to SI units for NIST and place in a dict sorted\n according to use in each convoluter\n\n :param dict InpParms: a dict with Topas-like parameters, as set in \n :func:`MakeTopasFPASizer`\n :returns: a nested dict with global parameters and those for each convolution\n \"\"\"\n wavenums = range(InpParms['numWave'])\n source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in\n wavenums])\n la = [InpParms['int'][i] for i in wavenums]\n source_intensities = np.array(la) / max(la)\n source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for\n i in wavenums])\n source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in\n wavenums])\n NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,\n 'emiss_intensities': source_intensities, 'emiss_gauss_widths':\n source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,\n 'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),\n 'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}\n if InpParms['filament_length'] == InpParms['receiving_slit_length']:\n InpParms['receiving_slit_length'] *= 1.00001\n NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *\n InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms\n ['receiving_slit_length'], 'length_sample': 0.001 * InpParms[\n 'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[\n 'soller_angle'], 'angD_deg': InpParms['soller_angle']}\n if InpParms.get('LAC_cm', 0) > 0:\n NISTparms['absorption'] = {'absorption_coefficient': InpParms[\n 'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[\n 'sample_thickness']}\n elif 'absorption' in NISTparms:\n del NISTparms['absorption']\n if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(\n 'lpsd_th2_angular_range', 0) > 0:\n PSDdetector_length_mm = np.arcsin(np.pi * InpParms[\n 'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']\n NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[\n 'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0, \n PSDdetector_length_mm / 1000.0)}\n elif 'si_psd' in NISTparms:\n del NISTparms['si_psd']\n if InpParms.get('Specimen_Displacement'):\n NISTparms['displacement'] = {'specimen_displacement': 0.001 *\n InpParms['Specimen_Displacement']}\n elif 'displacement' in NISTparms:\n del NISTparms['displacement']\n if InpParms.get('receiving_slit_width'):\n NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[\n 'receiving_slit_width']}\n elif 'receiver_slit' in NISTparms:\n del NISTparms['receiver_slit']\n if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(\n 'tube-tails_rel-I', 0) > 0:\n NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(\n 'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(\n 'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(\n 'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(\n 'tube-tails_rel-I', 0.0)}\n elif 'tube_tails' in NISTparms:\n del NISTparms['tube_tails']\n max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]\n NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],\n 'dominant_wavelength': max_wavelength, 'diffractometer_radius': \n 0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}\n\n\ndef setupFPAcalc():\n \"\"\"Create a peak profile object using the NIST XRD Fundamental \n Parameters Code. \n \n :returns: a profile object that can provide information on \n each convolution or compute the composite peak shape. \n \"\"\"\n p = FP.FP_profile(anglemode='twotheta',\n output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get\n ('oversampling', 10))\n p.debug_cache = False\n for key in NISTparms:\n if key:\n p.set_parameters(convolver=key, **NISTparms[key])\n else:\n p.set_parameters(**NISTparms[key])\n return p\n\n\ndef doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):\n \"\"\"Compute a single peak using a NIST profile object\n\n :param object NISTpk: a peak profile computational object from the \n NIST XRD Fundamental Parameters Code, typically established from\n a call to :func:`SetupFPAcalc`\n :param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)\n :param float twotheta: nominal center of peak (degrees)\n :param float calcwid: width to perform convolution (degrees)\n :param float step: step size\n \"\"\"\n center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)\n NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,\n twotheta_window_center_deg=ttArr[center_bin_idx],\n twotheta_approx_window_fullwidth_deg=calcwid)\n NISTpk.set_parameters(twotheta0_deg=twotheta)\n return center_bin_idx, NISTpk.compute_line_profile()\n\n\ndef MakeSimSizer(G2frame, dlg):\n \"\"\"Create a GUI to get simulation with parameters for Fundamental \n Parameters fitting. \n\n :param wx.Window dlg: Frame or Dialog where GUI will appear\n\n :returns: a sizer with the GUI controls \n \n \"\"\"\n\n def _onOK(event):\n msg = ''\n if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:\n msg += 'First peak minus half the calc width is too low'\n if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:\n if msg:\n msg += '\\n'\n msg += 'Last peak plus half the calc width is too high'\n if simParms['npeaks'] < 8:\n if msg:\n msg += '\\n'\n msg += 'At least 8 peaks are needed'\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n return\n ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /\n 1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[\n 'step'])\n intArr = np.zeros_like(ttArr)\n peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],\n simParms['npeaks'], endpoint=True)\n peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)\n NISTpk = setupFPAcalc()\n minPtsHM = len(intArr)\n maxPtsHM = 0\n for num, twoth_peak in enumerate(peaklist):\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,\n twoth_peak, simParms['calcwid'], simParms['step'])\n except:\n if msg:\n msg += '\\n'\n msg = 'Error computing convolution, revise input'\n continue\n if num == 0:\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkMax = peakObj.peak.max()\n pkPts = len(peakObj.peak)\n minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n break\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset\n ] += 10000 * peakObj.peak[:-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts\n ] += 10000 * peakObj.peak / pkMax\n if maxPtsHM * simParms['step'] > peakSpacing / 4:\n if msg:\n msg += '\\n'\n msg += (\n 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'\n .format(maxPtsHM * simParms['step'], peakSpacing))\n if minPtsHM < 10:\n if msg:\n msg += '\\n'\n msg += (\n 'There are only {} points above the half-max. 10 are needed. Dropping step size.'\n .format(minPtsHM))\n simParms['step'] *= 0.5\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n wx.CallAfter(MakeSimSizer, G2frame, dlg)\n return\n dlg.Destroy()\n wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)\n\n def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):\n \"\"\"Perform a peak fit to the FP simulated pattern\n \"\"\"\n plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.\n RESIZE_BORDER)\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n txt = wx.StaticText(plswait, wx.ID_ANY,\n 'Fitting peaks...\\nPlease wait...', style=wx.ALIGN_CENTER)\n vbox.Add(txt, 0, wx.ALL | wx.EXPAND)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n plswait.SetSizer(vbox)\n plswait.Layout()\n plswait.CenterOnParent()\n plswait.Show()\n wx.BeginBusyCursor()\n ints = list(NISTparms['emission']['emiss_intensities'])\n Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n if len(ints) > 1:\n ints[np.argmax(ints)] = -1\n Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n else:\n Lam2 = None\n histId = G2frame.AddSimulatedPowder(ttArr, intArr,\n 'NIST Fundamental Parameters simulation', Lam1, Lam2)\n controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, G2frame.root, 'Controls'))\n controldat = controls.get('data', {'deriv type': 'analytic',\n 'min dM/M': 0.001})\n Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId\n (G2frame, histId, 'Instrument Parameters'))\n peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Peak List'))\n bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.\n GetGPXtreeItemId(G2frame, histId, 'Background'))\n bkg1[1] = False\n bkg1[2] = 0\n bkg1[3] = 0.0\n limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Limits'))\n try:\n Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +\n NISTparms['axial']['slit_length_source']) / NISTparms[''][\n 'diffractometer_radius']\n except:\n pass\n for pos in peaklist:\n i = ttArr.searchsorted(pos)\n area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +\n maxPtsHM)])\n peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,\n area))\n histData = G2frame.GPXtree.GetItemPyData(histId)\n bxye = np.zeros(len(histData[1][1]))\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False,\n controldat, None)[0]\n for pk in peakData['peaks']:\n pk[1] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in ('U', 'V', 'W', 'X', 'Y'):\n Parms[p][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n Parms['SH/L'][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in Parms:\n if len(Parms[p]) == 3:\n Parms[p][0] = Parms[p][1]\n Parms[p][2] = False\n wx.EndBusyCursor()\n plswait.Destroy()\n pth = G2G.GetExportPath(G2frame)\n fldlg = wx.FileDialog(G2frame,\n 'Set name to save GSAS-II instrument parameters file', pth, '',\n 'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |\n wx.FD_OVERWRITE_PROMPT)\n try:\n if fldlg.ShowModal() == wx.ID_OK:\n filename = fldlg.GetPath()\n filename = os.path.splitext(filename)[0] + '.instprm'\n File = open(filename, 'w')\n File.write(\n '#GSAS-II instrument parameter file; do not add/delete items!\\n'\n )\n for item in Parms:\n File.write(item + ':' + str(Parms[item][1]) + '\\n')\n File.close()\n print('Instrument parameters saved to: ' + filename)\n finally:\n fldlg.Destroy()\n\n def _onClose(event):\n dlg.Destroy()\n\n def SetButtonStatus(done=False):\n OKbtn.Enable(bool(NISTparms))\n saveBtn.Enable(bool(NISTparms))\n if done:\n _onOK(None)\n\n def _onSetFPA(event):\n FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)\n FPdlg.CenterOnParent()\n FPdlg.Raise()\n FPdlg.Show()\n\n def _onSaveFPA(event):\n filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',\n 'dict of NIST FPA values', dlg)\n if not filename:\n return\n fp = open(filename, 'w')\n fp.write(\n '# parameters to be used in the NIST XRD Fundamental Parameters program\\n'\n )\n fp.write('{\\n')\n for key in sorted(NISTparms):\n fp.write(\" '\" + key + \"' : \" + str(NISTparms[key]) + ',')\n if not key:\n fp.write(' # global parameters')\n fp.write('\\n')\n fp.write('}\\n')\n fp.close()\n\n def _onReadFPA(event):\n filename = G2G.GetImportFile(G2frame, message=\n 'Read file with dict of values for NIST Fundamental Parameters',\n parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')\n if not filename:\n return\n if not filename[0]:\n return\n try:\n txt = open(filename[0], 'r').read()\n NISTparms.clear()\n array = np.array\n d = eval(txt)\n NISTparms.update(d)\n except Exception as err:\n G2G.G2MessageBox(dlg, u'Error reading file {}:{}\\n'.format(\n filename, err), 'Bad dict input')\n SetButtonStatus()\n if dlg.GetSizer():\n dlg.GetSizer().Clear(True)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,\n 'Fit Profile Parameters to Peaks from Fundamental Parameters',\n style=wx.ALIGN_CENTER), 0, wx.EXPAND)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)\n text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for key, defVal, text in (('minTT', 3.0,\n 'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,\n 'Location of last peak in 2theta (deg)'), ('step', 0.01,\n 'Pattern step size (deg 2theta)'), ('npeaks', 13.0,\n 'Number of peaks'), ('calcwid', 2.0,\n 'Range to compute each peak (deg 2theta)')):\n if key not in simParms:\n simParms[key] = defVal\n ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))\n txt.Wrap(280)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetFPA)\n saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')\n btnsizer.Add(saveBtn)\n saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)\n readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')\n btnsizer.Add(readBtn)\n readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +\n Citation, size=(350, -1))\n txt.Wrap(340)\n MainSizer.Add(txt, 0, wx.ALIGN_CENTER)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(dlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n SetButtonStatus()\n dlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(dlg)\n dlg.SetMinSize(dlg.GetSize())\n dlg.SendSizeEvent()\n dlg.Raise()\n\n\ndef GetFPAInput(G2frame):\n dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeSimSizer(G2frame, dlg)\n dlg.CenterOnParent()\n dlg.Show()\n return\n", "step-2": "<mask token>\n\n\ndef SetCu2Wave():\n \"\"\"Set the parameters to the two-line Cu K alpha 1+2 spectrum\n \"\"\"\n parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}\n parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}\n\n\nSetCu2Wave()\n\n\ndef MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):\n \"\"\"Create a GUI with parameters for the NIST XRD Fundamental Parameters Code. \n Parameter input is modeled after Topas input parameters.\n\n :param wx.Window FPdlg: Frame or Dialog where GUI will appear\n :param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or \n (linear) position sensitive detector\n :param dict parmDict: dict to place parameters. If empty, default values from \n globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in \n the array. \n :returns: a sizer with the GUI controls\n \n \"\"\"\n\n def _onOK(event):\n XferFPAsettings(parmDict)\n SetButtonStatus(done=True)\n FPdlg.Destroy()\n\n def _onClose(event):\n SetButtonStatus()\n FPdlg.Destroy()\n\n def _onAddWave(event):\n parmDict['numWave'] += 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onRemWave(event):\n parmDict['numWave'] -= 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu5Wave(event):\n parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,\n 1.541058, 1.54441, 1.544721))}\n parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791, \n 0.0762, 0.2417, 0.0871))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437, \n 0.6, 0.52, 0.62))}\n parmDict['numWave'] = 5\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu2Wave(event):\n SetCu2Wave()\n parmDict['numWave'] = 2\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetPoint(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',\n SetButtonStatus)\n\n def _onSetPSD(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',\n SetButtonStatus)\n\n def PlotTopasFPA(event):\n XferFPAsettings(parmDict)\n ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'\n ]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])\n intArr = np.zeros_like(ttArr)\n NISTpk = setupFPAcalc()\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[\n 'plotpos'], simParms['calcwid'], simParms['step'])\n except Exception as err:\n msg = 'Error computing convolution, revise input'\n print(msg)\n print(err)\n return\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkPts = len(peakObj.peak)\n pkMax = peakObj.peak.max()\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n return\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[\n :-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax\n G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\\\theta, deg$',\n labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,\n lines=True)\n if FPdlg.GetSizer():\n FPdlg.GetSizer().Clear(True)\n numWave = parmDict['numWave']\n if mode == 'BBpoint':\n itemList = BraggBrentanoParms + BBPointDetector\n elif mode == 'BBPSD':\n itemList = BraggBrentanoParms + BBPSDDetector\n else:\n raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add((-1, 5))\n waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)\n for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',\n u'Lorentz Width\\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)\n ):\n text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n waveSizer.Add(text, 0, wx.EXPAND)\n if prm not in parmDict:\n parmDict[prm] = {}\n for i in range(numWave):\n if i not in parmDict[prm]:\n parmDict[prm][i] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))\n waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)\n MainSizer.Add(waveSizer)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onAddWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onRemWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')\n btn.Enable(not mode == 'BBpoint')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPoint)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')\n btn.Enable(not mode == 'BBPSD')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPSD)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER\n )\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for lbl, defVal, text in itemList:\n prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.\n ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)\n if lbl not in parmDict:\n parmDict[lbl] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))\n txt.Wrap(380)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))\n if 'plotpos' not in simParms:\n simParms['plotpos'] = simParms['minTT']\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))\n btnsizer.Add(ctrl)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(FPdlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n FPdlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(FPdlg)\n FPdlg.SetMinSize(FPdlg.GetSize())\n FPdlg.SendSizeEvent()\n\n\ndef XferFPAsettings(InpParms):\n \"\"\"convert Topas-type parameters to SI units for NIST and place in a dict sorted\n according to use in each convoluter\n\n :param dict InpParms: a dict with Topas-like parameters, as set in \n :func:`MakeTopasFPASizer`\n :returns: a nested dict with global parameters and those for each convolution\n \"\"\"\n wavenums = range(InpParms['numWave'])\n source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in\n wavenums])\n la = [InpParms['int'][i] for i in wavenums]\n source_intensities = np.array(la) / max(la)\n source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for\n i in wavenums])\n source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in\n wavenums])\n NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,\n 'emiss_intensities': source_intensities, 'emiss_gauss_widths':\n source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,\n 'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),\n 'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}\n if InpParms['filament_length'] == InpParms['receiving_slit_length']:\n InpParms['receiving_slit_length'] *= 1.00001\n NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *\n InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms\n ['receiving_slit_length'], 'length_sample': 0.001 * InpParms[\n 'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[\n 'soller_angle'], 'angD_deg': InpParms['soller_angle']}\n if InpParms.get('LAC_cm', 0) > 0:\n NISTparms['absorption'] = {'absorption_coefficient': InpParms[\n 'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[\n 'sample_thickness']}\n elif 'absorption' in NISTparms:\n del NISTparms['absorption']\n if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(\n 'lpsd_th2_angular_range', 0) > 0:\n PSDdetector_length_mm = np.arcsin(np.pi * InpParms[\n 'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']\n NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[\n 'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0, \n PSDdetector_length_mm / 1000.0)}\n elif 'si_psd' in NISTparms:\n del NISTparms['si_psd']\n if InpParms.get('Specimen_Displacement'):\n NISTparms['displacement'] = {'specimen_displacement': 0.001 *\n InpParms['Specimen_Displacement']}\n elif 'displacement' in NISTparms:\n del NISTparms['displacement']\n if InpParms.get('receiving_slit_width'):\n NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[\n 'receiving_slit_width']}\n elif 'receiver_slit' in NISTparms:\n del NISTparms['receiver_slit']\n if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(\n 'tube-tails_rel-I', 0) > 0:\n NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(\n 'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(\n 'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(\n 'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(\n 'tube-tails_rel-I', 0.0)}\n elif 'tube_tails' in NISTparms:\n del NISTparms['tube_tails']\n max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]\n NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],\n 'dominant_wavelength': max_wavelength, 'diffractometer_radius': \n 0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}\n\n\ndef setupFPAcalc():\n \"\"\"Create a peak profile object using the NIST XRD Fundamental \n Parameters Code. \n \n :returns: a profile object that can provide information on \n each convolution or compute the composite peak shape. \n \"\"\"\n p = FP.FP_profile(anglemode='twotheta',\n output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get\n ('oversampling', 10))\n p.debug_cache = False\n for key in NISTparms:\n if key:\n p.set_parameters(convolver=key, **NISTparms[key])\n else:\n p.set_parameters(**NISTparms[key])\n return p\n\n\ndef doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):\n \"\"\"Compute a single peak using a NIST profile object\n\n :param object NISTpk: a peak profile computational object from the \n NIST XRD Fundamental Parameters Code, typically established from\n a call to :func:`SetupFPAcalc`\n :param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)\n :param float twotheta: nominal center of peak (degrees)\n :param float calcwid: width to perform convolution (degrees)\n :param float step: step size\n \"\"\"\n center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)\n NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,\n twotheta_window_center_deg=ttArr[center_bin_idx],\n twotheta_approx_window_fullwidth_deg=calcwid)\n NISTpk.set_parameters(twotheta0_deg=twotheta)\n return center_bin_idx, NISTpk.compute_line_profile()\n\n\ndef MakeSimSizer(G2frame, dlg):\n \"\"\"Create a GUI to get simulation with parameters for Fundamental \n Parameters fitting. \n\n :param wx.Window dlg: Frame or Dialog where GUI will appear\n\n :returns: a sizer with the GUI controls \n \n \"\"\"\n\n def _onOK(event):\n msg = ''\n if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:\n msg += 'First peak minus half the calc width is too low'\n if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:\n if msg:\n msg += '\\n'\n msg += 'Last peak plus half the calc width is too high'\n if simParms['npeaks'] < 8:\n if msg:\n msg += '\\n'\n msg += 'At least 8 peaks are needed'\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n return\n ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /\n 1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[\n 'step'])\n intArr = np.zeros_like(ttArr)\n peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],\n simParms['npeaks'], endpoint=True)\n peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)\n NISTpk = setupFPAcalc()\n minPtsHM = len(intArr)\n maxPtsHM = 0\n for num, twoth_peak in enumerate(peaklist):\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,\n twoth_peak, simParms['calcwid'], simParms['step'])\n except:\n if msg:\n msg += '\\n'\n msg = 'Error computing convolution, revise input'\n continue\n if num == 0:\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkMax = peakObj.peak.max()\n pkPts = len(peakObj.peak)\n minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n break\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset\n ] += 10000 * peakObj.peak[:-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts\n ] += 10000 * peakObj.peak / pkMax\n if maxPtsHM * simParms['step'] > peakSpacing / 4:\n if msg:\n msg += '\\n'\n msg += (\n 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'\n .format(maxPtsHM * simParms['step'], peakSpacing))\n if minPtsHM < 10:\n if msg:\n msg += '\\n'\n msg += (\n 'There are only {} points above the half-max. 10 are needed. Dropping step size.'\n .format(minPtsHM))\n simParms['step'] *= 0.5\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n wx.CallAfter(MakeSimSizer, G2frame, dlg)\n return\n dlg.Destroy()\n wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)\n\n def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):\n \"\"\"Perform a peak fit to the FP simulated pattern\n \"\"\"\n plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.\n RESIZE_BORDER)\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n txt = wx.StaticText(plswait, wx.ID_ANY,\n 'Fitting peaks...\\nPlease wait...', style=wx.ALIGN_CENTER)\n vbox.Add(txt, 0, wx.ALL | wx.EXPAND)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n plswait.SetSizer(vbox)\n plswait.Layout()\n plswait.CenterOnParent()\n plswait.Show()\n wx.BeginBusyCursor()\n ints = list(NISTparms['emission']['emiss_intensities'])\n Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n if len(ints) > 1:\n ints[np.argmax(ints)] = -1\n Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n else:\n Lam2 = None\n histId = G2frame.AddSimulatedPowder(ttArr, intArr,\n 'NIST Fundamental Parameters simulation', Lam1, Lam2)\n controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, G2frame.root, 'Controls'))\n controldat = controls.get('data', {'deriv type': 'analytic',\n 'min dM/M': 0.001})\n Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId\n (G2frame, histId, 'Instrument Parameters'))\n peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Peak List'))\n bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.\n GetGPXtreeItemId(G2frame, histId, 'Background'))\n bkg1[1] = False\n bkg1[2] = 0\n bkg1[3] = 0.0\n limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Limits'))\n try:\n Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +\n NISTparms['axial']['slit_length_source']) / NISTparms[''][\n 'diffractometer_radius']\n except:\n pass\n for pos in peaklist:\n i = ttArr.searchsorted(pos)\n area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +\n maxPtsHM)])\n peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,\n area))\n histData = G2frame.GPXtree.GetItemPyData(histId)\n bxye = np.zeros(len(histData[1][1]))\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False,\n controldat, None)[0]\n for pk in peakData['peaks']:\n pk[1] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in ('U', 'V', 'W', 'X', 'Y'):\n Parms[p][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n Parms['SH/L'][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in Parms:\n if len(Parms[p]) == 3:\n Parms[p][0] = Parms[p][1]\n Parms[p][2] = False\n wx.EndBusyCursor()\n plswait.Destroy()\n pth = G2G.GetExportPath(G2frame)\n fldlg = wx.FileDialog(G2frame,\n 'Set name to save GSAS-II instrument parameters file', pth, '',\n 'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |\n wx.FD_OVERWRITE_PROMPT)\n try:\n if fldlg.ShowModal() == wx.ID_OK:\n filename = fldlg.GetPath()\n filename = os.path.splitext(filename)[0] + '.instprm'\n File = open(filename, 'w')\n File.write(\n '#GSAS-II instrument parameter file; do not add/delete items!\\n'\n )\n for item in Parms:\n File.write(item + ':' + str(Parms[item][1]) + '\\n')\n File.close()\n print('Instrument parameters saved to: ' + filename)\n finally:\n fldlg.Destroy()\n\n def _onClose(event):\n dlg.Destroy()\n\n def SetButtonStatus(done=False):\n OKbtn.Enable(bool(NISTparms))\n saveBtn.Enable(bool(NISTparms))\n if done:\n _onOK(None)\n\n def _onSetFPA(event):\n FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)\n FPdlg.CenterOnParent()\n FPdlg.Raise()\n FPdlg.Show()\n\n def _onSaveFPA(event):\n filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',\n 'dict of NIST FPA values', dlg)\n if not filename:\n return\n fp = open(filename, 'w')\n fp.write(\n '# parameters to be used in the NIST XRD Fundamental Parameters program\\n'\n )\n fp.write('{\\n')\n for key in sorted(NISTparms):\n fp.write(\" '\" + key + \"' : \" + str(NISTparms[key]) + ',')\n if not key:\n fp.write(' # global parameters')\n fp.write('\\n')\n fp.write('}\\n')\n fp.close()\n\n def _onReadFPA(event):\n filename = G2G.GetImportFile(G2frame, message=\n 'Read file with dict of values for NIST Fundamental Parameters',\n parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')\n if not filename:\n return\n if not filename[0]:\n return\n try:\n txt = open(filename[0], 'r').read()\n NISTparms.clear()\n array = np.array\n d = eval(txt)\n NISTparms.update(d)\n except Exception as err:\n G2G.G2MessageBox(dlg, u'Error reading file {}:{}\\n'.format(\n filename, err), 'Bad dict input')\n SetButtonStatus()\n if dlg.GetSizer():\n dlg.GetSizer().Clear(True)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,\n 'Fit Profile Parameters to Peaks from Fundamental Parameters',\n style=wx.ALIGN_CENTER), 0, wx.EXPAND)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)\n text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for key, defVal, text in (('minTT', 3.0,\n 'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,\n 'Location of last peak in 2theta (deg)'), ('step', 0.01,\n 'Pattern step size (deg 2theta)'), ('npeaks', 13.0,\n 'Number of peaks'), ('calcwid', 2.0,\n 'Range to compute each peak (deg 2theta)')):\n if key not in simParms:\n simParms[key] = defVal\n ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))\n txt.Wrap(280)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetFPA)\n saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')\n btnsizer.Add(saveBtn)\n saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)\n readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')\n btnsizer.Add(readBtn)\n readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +\n Citation, size=(350, -1))\n txt.Wrap(340)\n MainSizer.Add(txt, 0, wx.ALIGN_CENTER)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(dlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n SetButtonStatus()\n dlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(dlg)\n dlg.SetMinSize(dlg.GetSize())\n dlg.SendSizeEvent()\n dlg.Raise()\n\n\ndef GetFPAInput(G2frame):\n dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeSimSizer(G2frame, dlg)\n dlg.CenterOnParent()\n dlg.Show()\n return\n", "step-3": "<mask token>\nsimParms = {}\n<mask token>\nparmDict = {'numWave': 2}\n<mask token>\nNISTparms = {}\n<mask token>\nBraggBrentanoParms = [('divergence', 0.5,\n 'Bragg-Brentano divergence angle (degrees)'), ('soller_angle', 2.0,\n 'Soller slit axial divergence (degrees)'), ('Rs', 220,\n 'Diffractometer radius (mm)'), ('filament_length', 12.0,\n 'X-ray tube line focus length (mm)'), ('sample_length', 12.0,\n 'Illuminated sample length in axial direction (mm)'), (\n 'receiving_slit_length', 12.0,\n 'Length of receiving slit in axial direction (mm)'), ('LAC_cm', 0.0,\n 'Linear absorption coef. adjusted for packing density (cm-1)'), (\n 'sample_thickness', 1.0, 'Depth of sample (mm)'), ('convolution_steps',\n 8, 'Number of Fourier-space bins per two-theta step'), (\n 'tube-tails_width', 0.04,\n 'Tube filament width, in projection at takeoff angle (mm)'), (\n 'tube-tails_L-tail', -1.0,\n 'Left-side tube tails width, in projection (mm)'), ('tube-tails_R-tail',\n 1.0, 'Right-side tube tails width, in projection (mm)'), (\n 'tube-tails_rel-I', 0.001, 'Tube tails fractional intensity (no units)')]\n<mask token>\nBBPointDetector = [('receiving_slit_width', 0.2,\n 'Width of receiving slit (mm)')]\n<mask token>\nBBPSDDetector = [('lpsd_th2_angular_range', 3.0,\n 'Angular range observed by PSD (degrees 2Theta)'), (\n 'lpsd_equitorial_divergence', 0.1,\n 'Equatorial divergence of the primary beam (degrees)')]\n<mask token>\nCitation = \"\"\"MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.\n\"\"\"\n\n\ndef SetCu2Wave():\n \"\"\"Set the parameters to the two-line Cu K alpha 1+2 spectrum\n \"\"\"\n parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}\n parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}\n\n\nSetCu2Wave()\n\n\ndef MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):\n \"\"\"Create a GUI with parameters for the NIST XRD Fundamental Parameters Code. \n Parameter input is modeled after Topas input parameters.\n\n :param wx.Window FPdlg: Frame or Dialog where GUI will appear\n :param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or \n (linear) position sensitive detector\n :param dict parmDict: dict to place parameters. If empty, default values from \n globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in \n the array. \n :returns: a sizer with the GUI controls\n \n \"\"\"\n\n def _onOK(event):\n XferFPAsettings(parmDict)\n SetButtonStatus(done=True)\n FPdlg.Destroy()\n\n def _onClose(event):\n SetButtonStatus()\n FPdlg.Destroy()\n\n def _onAddWave(event):\n parmDict['numWave'] += 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onRemWave(event):\n parmDict['numWave'] -= 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu5Wave(event):\n parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,\n 1.541058, 1.54441, 1.544721))}\n parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791, \n 0.0762, 0.2417, 0.0871))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437, \n 0.6, 0.52, 0.62))}\n parmDict['numWave'] = 5\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu2Wave(event):\n SetCu2Wave()\n parmDict['numWave'] = 2\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetPoint(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',\n SetButtonStatus)\n\n def _onSetPSD(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',\n SetButtonStatus)\n\n def PlotTopasFPA(event):\n XferFPAsettings(parmDict)\n ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'\n ]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])\n intArr = np.zeros_like(ttArr)\n NISTpk = setupFPAcalc()\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[\n 'plotpos'], simParms['calcwid'], simParms['step'])\n except Exception as err:\n msg = 'Error computing convolution, revise input'\n print(msg)\n print(err)\n return\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkPts = len(peakObj.peak)\n pkMax = peakObj.peak.max()\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n return\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[\n :-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax\n G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\\\theta, deg$',\n labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,\n lines=True)\n if FPdlg.GetSizer():\n FPdlg.GetSizer().Clear(True)\n numWave = parmDict['numWave']\n if mode == 'BBpoint':\n itemList = BraggBrentanoParms + BBPointDetector\n elif mode == 'BBPSD':\n itemList = BraggBrentanoParms + BBPSDDetector\n else:\n raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add((-1, 5))\n waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)\n for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',\n u'Lorentz Width\\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)\n ):\n text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n waveSizer.Add(text, 0, wx.EXPAND)\n if prm not in parmDict:\n parmDict[prm] = {}\n for i in range(numWave):\n if i not in parmDict[prm]:\n parmDict[prm][i] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))\n waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)\n MainSizer.Add(waveSizer)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onAddWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onRemWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')\n btn.Enable(not mode == 'BBpoint')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPoint)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')\n btn.Enable(not mode == 'BBPSD')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPSD)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER\n )\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for lbl, defVal, text in itemList:\n prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.\n ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)\n if lbl not in parmDict:\n parmDict[lbl] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))\n txt.Wrap(380)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))\n if 'plotpos' not in simParms:\n simParms['plotpos'] = simParms['minTT']\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))\n btnsizer.Add(ctrl)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(FPdlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n FPdlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(FPdlg)\n FPdlg.SetMinSize(FPdlg.GetSize())\n FPdlg.SendSizeEvent()\n\n\ndef XferFPAsettings(InpParms):\n \"\"\"convert Topas-type parameters to SI units for NIST and place in a dict sorted\n according to use in each convoluter\n\n :param dict InpParms: a dict with Topas-like parameters, as set in \n :func:`MakeTopasFPASizer`\n :returns: a nested dict with global parameters and those for each convolution\n \"\"\"\n wavenums = range(InpParms['numWave'])\n source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in\n wavenums])\n la = [InpParms['int'][i] for i in wavenums]\n source_intensities = np.array(la) / max(la)\n source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for\n i in wavenums])\n source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in\n wavenums])\n NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,\n 'emiss_intensities': source_intensities, 'emiss_gauss_widths':\n source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,\n 'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),\n 'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}\n if InpParms['filament_length'] == InpParms['receiving_slit_length']:\n InpParms['receiving_slit_length'] *= 1.00001\n NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *\n InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms\n ['receiving_slit_length'], 'length_sample': 0.001 * InpParms[\n 'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[\n 'soller_angle'], 'angD_deg': InpParms['soller_angle']}\n if InpParms.get('LAC_cm', 0) > 0:\n NISTparms['absorption'] = {'absorption_coefficient': InpParms[\n 'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[\n 'sample_thickness']}\n elif 'absorption' in NISTparms:\n del NISTparms['absorption']\n if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(\n 'lpsd_th2_angular_range', 0) > 0:\n PSDdetector_length_mm = np.arcsin(np.pi * InpParms[\n 'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']\n NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[\n 'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0, \n PSDdetector_length_mm / 1000.0)}\n elif 'si_psd' in NISTparms:\n del NISTparms['si_psd']\n if InpParms.get('Specimen_Displacement'):\n NISTparms['displacement'] = {'specimen_displacement': 0.001 *\n InpParms['Specimen_Displacement']}\n elif 'displacement' in NISTparms:\n del NISTparms['displacement']\n if InpParms.get('receiving_slit_width'):\n NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[\n 'receiving_slit_width']}\n elif 'receiver_slit' in NISTparms:\n del NISTparms['receiver_slit']\n if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(\n 'tube-tails_rel-I', 0) > 0:\n NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(\n 'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(\n 'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(\n 'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(\n 'tube-tails_rel-I', 0.0)}\n elif 'tube_tails' in NISTparms:\n del NISTparms['tube_tails']\n max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]\n NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],\n 'dominant_wavelength': max_wavelength, 'diffractometer_radius': \n 0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}\n\n\ndef setupFPAcalc():\n \"\"\"Create a peak profile object using the NIST XRD Fundamental \n Parameters Code. \n \n :returns: a profile object that can provide information on \n each convolution or compute the composite peak shape. \n \"\"\"\n p = FP.FP_profile(anglemode='twotheta',\n output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get\n ('oversampling', 10))\n p.debug_cache = False\n for key in NISTparms:\n if key:\n p.set_parameters(convolver=key, **NISTparms[key])\n else:\n p.set_parameters(**NISTparms[key])\n return p\n\n\ndef doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):\n \"\"\"Compute a single peak using a NIST profile object\n\n :param object NISTpk: a peak profile computational object from the \n NIST XRD Fundamental Parameters Code, typically established from\n a call to :func:`SetupFPAcalc`\n :param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)\n :param float twotheta: nominal center of peak (degrees)\n :param float calcwid: width to perform convolution (degrees)\n :param float step: step size\n \"\"\"\n center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)\n NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,\n twotheta_window_center_deg=ttArr[center_bin_idx],\n twotheta_approx_window_fullwidth_deg=calcwid)\n NISTpk.set_parameters(twotheta0_deg=twotheta)\n return center_bin_idx, NISTpk.compute_line_profile()\n\n\ndef MakeSimSizer(G2frame, dlg):\n \"\"\"Create a GUI to get simulation with parameters for Fundamental \n Parameters fitting. \n\n :param wx.Window dlg: Frame or Dialog where GUI will appear\n\n :returns: a sizer with the GUI controls \n \n \"\"\"\n\n def _onOK(event):\n msg = ''\n if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:\n msg += 'First peak minus half the calc width is too low'\n if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:\n if msg:\n msg += '\\n'\n msg += 'Last peak plus half the calc width is too high'\n if simParms['npeaks'] < 8:\n if msg:\n msg += '\\n'\n msg += 'At least 8 peaks are needed'\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n return\n ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /\n 1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[\n 'step'])\n intArr = np.zeros_like(ttArr)\n peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],\n simParms['npeaks'], endpoint=True)\n peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)\n NISTpk = setupFPAcalc()\n minPtsHM = len(intArr)\n maxPtsHM = 0\n for num, twoth_peak in enumerate(peaklist):\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,\n twoth_peak, simParms['calcwid'], simParms['step'])\n except:\n if msg:\n msg += '\\n'\n msg = 'Error computing convolution, revise input'\n continue\n if num == 0:\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkMax = peakObj.peak.max()\n pkPts = len(peakObj.peak)\n minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n break\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset\n ] += 10000 * peakObj.peak[:-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts\n ] += 10000 * peakObj.peak / pkMax\n if maxPtsHM * simParms['step'] > peakSpacing / 4:\n if msg:\n msg += '\\n'\n msg += (\n 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'\n .format(maxPtsHM * simParms['step'], peakSpacing))\n if minPtsHM < 10:\n if msg:\n msg += '\\n'\n msg += (\n 'There are only {} points above the half-max. 10 are needed. Dropping step size.'\n .format(minPtsHM))\n simParms['step'] *= 0.5\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n wx.CallAfter(MakeSimSizer, G2frame, dlg)\n return\n dlg.Destroy()\n wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)\n\n def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):\n \"\"\"Perform a peak fit to the FP simulated pattern\n \"\"\"\n plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.\n RESIZE_BORDER)\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n txt = wx.StaticText(plswait, wx.ID_ANY,\n 'Fitting peaks...\\nPlease wait...', style=wx.ALIGN_CENTER)\n vbox.Add(txt, 0, wx.ALL | wx.EXPAND)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n plswait.SetSizer(vbox)\n plswait.Layout()\n plswait.CenterOnParent()\n plswait.Show()\n wx.BeginBusyCursor()\n ints = list(NISTparms['emission']['emiss_intensities'])\n Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n if len(ints) > 1:\n ints[np.argmax(ints)] = -1\n Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n else:\n Lam2 = None\n histId = G2frame.AddSimulatedPowder(ttArr, intArr,\n 'NIST Fundamental Parameters simulation', Lam1, Lam2)\n controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, G2frame.root, 'Controls'))\n controldat = controls.get('data', {'deriv type': 'analytic',\n 'min dM/M': 0.001})\n Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId\n (G2frame, histId, 'Instrument Parameters'))\n peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Peak List'))\n bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.\n GetGPXtreeItemId(G2frame, histId, 'Background'))\n bkg1[1] = False\n bkg1[2] = 0\n bkg1[3] = 0.0\n limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Limits'))\n try:\n Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +\n NISTparms['axial']['slit_length_source']) / NISTparms[''][\n 'diffractometer_radius']\n except:\n pass\n for pos in peaklist:\n i = ttArr.searchsorted(pos)\n area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +\n maxPtsHM)])\n peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,\n area))\n histData = G2frame.GPXtree.GetItemPyData(histId)\n bxye = np.zeros(len(histData[1][1]))\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False,\n controldat, None)[0]\n for pk in peakData['peaks']:\n pk[1] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in ('U', 'V', 'W', 'X', 'Y'):\n Parms[p][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n Parms['SH/L'][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in Parms:\n if len(Parms[p]) == 3:\n Parms[p][0] = Parms[p][1]\n Parms[p][2] = False\n wx.EndBusyCursor()\n plswait.Destroy()\n pth = G2G.GetExportPath(G2frame)\n fldlg = wx.FileDialog(G2frame,\n 'Set name to save GSAS-II instrument parameters file', pth, '',\n 'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |\n wx.FD_OVERWRITE_PROMPT)\n try:\n if fldlg.ShowModal() == wx.ID_OK:\n filename = fldlg.GetPath()\n filename = os.path.splitext(filename)[0] + '.instprm'\n File = open(filename, 'w')\n File.write(\n '#GSAS-II instrument parameter file; do not add/delete items!\\n'\n )\n for item in Parms:\n File.write(item + ':' + str(Parms[item][1]) + '\\n')\n File.close()\n print('Instrument parameters saved to: ' + filename)\n finally:\n fldlg.Destroy()\n\n def _onClose(event):\n dlg.Destroy()\n\n def SetButtonStatus(done=False):\n OKbtn.Enable(bool(NISTparms))\n saveBtn.Enable(bool(NISTparms))\n if done:\n _onOK(None)\n\n def _onSetFPA(event):\n FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)\n FPdlg.CenterOnParent()\n FPdlg.Raise()\n FPdlg.Show()\n\n def _onSaveFPA(event):\n filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',\n 'dict of NIST FPA values', dlg)\n if not filename:\n return\n fp = open(filename, 'w')\n fp.write(\n '# parameters to be used in the NIST XRD Fundamental Parameters program\\n'\n )\n fp.write('{\\n')\n for key in sorted(NISTparms):\n fp.write(\" '\" + key + \"' : \" + str(NISTparms[key]) + ',')\n if not key:\n fp.write(' # global parameters')\n fp.write('\\n')\n fp.write('}\\n')\n fp.close()\n\n def _onReadFPA(event):\n filename = G2G.GetImportFile(G2frame, message=\n 'Read file with dict of values for NIST Fundamental Parameters',\n parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')\n if not filename:\n return\n if not filename[0]:\n return\n try:\n txt = open(filename[0], 'r').read()\n NISTparms.clear()\n array = np.array\n d = eval(txt)\n NISTparms.update(d)\n except Exception as err:\n G2G.G2MessageBox(dlg, u'Error reading file {}:{}\\n'.format(\n filename, err), 'Bad dict input')\n SetButtonStatus()\n if dlg.GetSizer():\n dlg.GetSizer().Clear(True)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,\n 'Fit Profile Parameters to Peaks from Fundamental Parameters',\n style=wx.ALIGN_CENTER), 0, wx.EXPAND)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)\n text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for key, defVal, text in (('minTT', 3.0,\n 'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,\n 'Location of last peak in 2theta (deg)'), ('step', 0.01,\n 'Pattern step size (deg 2theta)'), ('npeaks', 13.0,\n 'Number of peaks'), ('calcwid', 2.0,\n 'Range to compute each peak (deg 2theta)')):\n if key not in simParms:\n simParms[key] = defVal\n ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))\n txt.Wrap(280)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetFPA)\n saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')\n btnsizer.Add(saveBtn)\n saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)\n readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')\n btnsizer.Add(readBtn)\n readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +\n Citation, size=(350, -1))\n txt.Wrap(340)\n MainSizer.Add(txt, 0, wx.ALIGN_CENTER)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(dlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n SetButtonStatus()\n dlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(dlg)\n dlg.SetMinSize(dlg.GetSize())\n dlg.SendSizeEvent()\n dlg.Raise()\n\n\ndef GetFPAInput(G2frame):\n dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeSimSizer(G2frame, dlg)\n dlg.CenterOnParent()\n dlg.Show()\n return\n", "step-4": "<mask token>\nfrom __future__ import division, print_function\nimport wx\nimport os.path\nimport numpy as np\nimport NIST_profile as FP\nimport GSASIIpath\nimport GSASIIctrlGUI as G2G\nimport GSASIIdataGUI as G2gd\nimport GSASIIplot as G2plt\nimport GSASIImath as G2mth\nimport GSASIIpwd as G2pwd\nsimParms = {}\n<mask token>\nparmDict = {'numWave': 2}\n<mask token>\nNISTparms = {}\n<mask token>\nBraggBrentanoParms = [('divergence', 0.5,\n 'Bragg-Brentano divergence angle (degrees)'), ('soller_angle', 2.0,\n 'Soller slit axial divergence (degrees)'), ('Rs', 220,\n 'Diffractometer radius (mm)'), ('filament_length', 12.0,\n 'X-ray tube line focus length (mm)'), ('sample_length', 12.0,\n 'Illuminated sample length in axial direction (mm)'), (\n 'receiving_slit_length', 12.0,\n 'Length of receiving slit in axial direction (mm)'), ('LAC_cm', 0.0,\n 'Linear absorption coef. adjusted for packing density (cm-1)'), (\n 'sample_thickness', 1.0, 'Depth of sample (mm)'), ('convolution_steps',\n 8, 'Number of Fourier-space bins per two-theta step'), (\n 'tube-tails_width', 0.04,\n 'Tube filament width, in projection at takeoff angle (mm)'), (\n 'tube-tails_L-tail', -1.0,\n 'Left-side tube tails width, in projection (mm)'), ('tube-tails_R-tail',\n 1.0, 'Right-side tube tails width, in projection (mm)'), (\n 'tube-tails_rel-I', 0.001, 'Tube tails fractional intensity (no units)')]\n<mask token>\nBBPointDetector = [('receiving_slit_width', 0.2,\n 'Width of receiving slit (mm)')]\n<mask token>\nBBPSDDetector = [('lpsd_th2_angular_range', 3.0,\n 'Angular range observed by PSD (degrees 2Theta)'), (\n 'lpsd_equitorial_divergence', 0.1,\n 'Equatorial divergence of the primary beam (degrees)')]\n<mask token>\nCitation = \"\"\"MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.\n\"\"\"\n\n\ndef SetCu2Wave():\n \"\"\"Set the parameters to the two-line Cu K alpha 1+2 spectrum\n \"\"\"\n parmDict['wave'] = {i: v for i, v in enumerate((1.540596, 1.544493))}\n parmDict['int'] = {i: v for i, v in enumerate((0.653817, 0.346183))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((0.501844, 0.626579))}\n\n\nSetCu2Wave()\n\n\ndef MakeTopasFPASizer(G2frame, FPdlg, mode, SetButtonStatus):\n \"\"\"Create a GUI with parameters for the NIST XRD Fundamental Parameters Code. \n Parameter input is modeled after Topas input parameters.\n\n :param wx.Window FPdlg: Frame or Dialog where GUI will appear\n :param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or \n (linear) position sensitive detector\n :param dict parmDict: dict to place parameters. If empty, default values from \n globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in \n the array. \n :returns: a sizer with the GUI controls\n \n \"\"\"\n\n def _onOK(event):\n XferFPAsettings(parmDict)\n SetButtonStatus(done=True)\n FPdlg.Destroy()\n\n def _onClose(event):\n SetButtonStatus()\n FPdlg.Destroy()\n\n def _onAddWave(event):\n parmDict['numWave'] += 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onRemWave(event):\n parmDict['numWave'] -= 1\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu5Wave(event):\n parmDict['wave'] = {i: v for i, v in enumerate((1.534753, 1.540596,\n 1.541058, 1.54441, 1.544721))}\n parmDict['int'] = {i: v for i, v in enumerate((0.0159, 0.5791, \n 0.0762, 0.2417, 0.0871))}\n parmDict['lwidth'] = {i: v for i, v in enumerate((3.6854, 0.437, \n 0.6, 0.52, 0.62))}\n parmDict['numWave'] = 5\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetCu2Wave(event):\n SetCu2Wave()\n parmDict['numWave'] = 2\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, mode, SetButtonStatus)\n\n def _onSetPoint(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBpoint',\n SetButtonStatus)\n\n def _onSetPSD(event):\n wx.CallAfter(MakeTopasFPASizer, G2frame, FPdlg, 'BBPSD',\n SetButtonStatus)\n\n def PlotTopasFPA(event):\n XferFPAsettings(parmDict)\n ttArr = np.arange(max(0.5, simParms['plotpos'] - simParms['calcwid'\n ]), simParms['plotpos'] + simParms['calcwid'], simParms['step'])\n intArr = np.zeros_like(ttArr)\n NISTpk = setupFPAcalc()\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr, simParms[\n 'plotpos'], simParms['calcwid'], simParms['step'])\n except Exception as err:\n msg = 'Error computing convolution, revise input'\n print(msg)\n print(err)\n return\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkPts = len(peakObj.peak)\n pkMax = peakObj.peak.max()\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n return\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset] += 10000 * peakObj.peak[\n :-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts] += 10000 * peakObj.peak / pkMax\n G2plt.PlotXY(G2frame, [(ttArr, intArr)], labelX='$2\\\\theta, deg$',\n labelY='Intensity (arbitrary)', Title='FPA peak', newPlot=True,\n lines=True)\n if FPdlg.GetSizer():\n FPdlg.GetSizer().Clear(True)\n numWave = parmDict['numWave']\n if mode == 'BBpoint':\n itemList = BraggBrentanoParms + BBPointDetector\n elif mode == 'BBPSD':\n itemList = BraggBrentanoParms + BBPSDDetector\n else:\n raise Exception('Unknown mode in MakeTopasFPASizer: ' + mode)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add((-1, 5))\n waveSizer = wx.FlexGridSizer(cols=numWave + 1, hgap=3, vgap=5)\n for lbl, prm, defVal in zip((u'Wavelength (Å)', 'Rel. Intensity',\n u'Lorentz Width\\n(Å/1000)'), ('wave', 'int', 'lwidth'), (0.0, 1.0, 0.1)\n ):\n text = wx.StaticText(FPdlg, wx.ID_ANY, lbl, style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n waveSizer.Add(text, 0, wx.EXPAND)\n if prm not in parmDict:\n parmDict[prm] = {}\n for i in range(numWave):\n if i not in parmDict[prm]:\n parmDict[prm][i] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict[prm], i, size=(90, -1))\n waveSizer.Add(ctrl, 1, wx.ALIGN_CENTER_VERTICAL, 1)\n MainSizer.Add(waveSizer)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Add col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onAddWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Remove col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onRemWave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa1+2')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu2Wave)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'CuKa-5wave')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetCu5Wave)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Point Dect.')\n btn.Enable(not mode == 'BBpoint')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPoint)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'PSD')\n btn.Enable(not mode == 'BBPSD')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetPSD)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=3, hgap=3, vgap=5)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'label', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(FPdlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER\n )\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for lbl, defVal, text in itemList:\n prmSizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, lbl), 1, wx.\n ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 1)\n if lbl not in parmDict:\n parmDict[lbl] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, parmDict, lbl, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(FPdlg, wx.ID_ANY, text, size=(400, -1))\n txt.Wrap(380)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, PlotTopasFPA)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' at '))\n if 'plotpos' not in simParms:\n simParms['plotpos'] = simParms['minTT']\n ctrl = G2G.ValidatedTxtCtrl(FPdlg, simParms, 'plotpos', size=(70, -1))\n btnsizer.Add(ctrl)\n btnsizer.Add(wx.StaticText(FPdlg, wx.ID_ANY, ' deg.'))\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(FPdlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(FPdlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n FPdlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(FPdlg)\n FPdlg.SetMinSize(FPdlg.GetSize())\n FPdlg.SendSizeEvent()\n\n\ndef XferFPAsettings(InpParms):\n \"\"\"convert Topas-type parameters to SI units for NIST and place in a dict sorted\n according to use in each convoluter\n\n :param dict InpParms: a dict with Topas-like parameters, as set in \n :func:`MakeTopasFPASizer`\n :returns: a nested dict with global parameters and those for each convolution\n \"\"\"\n wavenums = range(InpParms['numWave'])\n source_wavelengths_m = 1e-10 * np.array([InpParms['wave'][i] for i in\n wavenums])\n la = [InpParms['int'][i] for i in wavenums]\n source_intensities = np.array(la) / max(la)\n source_lor_widths_m = 1e-10 * 0.001 * np.array([InpParms['lwidth'][i] for\n i in wavenums])\n source_gauss_widths_m = 1e-10 * 0.001 * np.array([(0.001) for i in\n wavenums])\n NISTparms['emission'] = {'emiss_wavelengths': source_wavelengths_m,\n 'emiss_intensities': source_intensities, 'emiss_gauss_widths':\n source_gauss_widths_m, 'emiss_lor_widths': source_lor_widths_m,\n 'crystallite_size_gauss': 1e-09 * InpParms.get('Size_G', 1000000.0),\n 'crystallite_size_lor': 1e-09 * InpParms.get('Size_L', 1000000.0)}\n if InpParms['filament_length'] == InpParms['receiving_slit_length']:\n InpParms['receiving_slit_length'] *= 1.00001\n NISTparms['axial'] = {'axDiv': 'full', 'slit_length_source': 0.001 *\n InpParms['filament_length'], 'slit_length_target': 0.001 * InpParms\n ['receiving_slit_length'], 'length_sample': 0.001 * InpParms[\n 'sample_length'], 'n_integral_points': 10, 'angI_deg': InpParms[\n 'soller_angle'], 'angD_deg': InpParms['soller_angle']}\n if InpParms.get('LAC_cm', 0) > 0:\n NISTparms['absorption'] = {'absorption_coefficient': InpParms[\n 'LAC_cm'] * 100, 'sample_thickness': 0.001 * InpParms[\n 'sample_thickness']}\n elif 'absorption' in NISTparms:\n del NISTparms['absorption']\n if InpParms.get('lpsd_equitorial_divergence', 0) > 0 and InpParms.get(\n 'lpsd_th2_angular_range', 0) > 0:\n PSDdetector_length_mm = np.arcsin(np.pi * InpParms[\n 'lpsd_th2_angular_range'] / 180.0) * InpParms['Rs']\n NISTparms['si_psd'] = {'equatorial_divergence_deg': InpParms[\n 'lpsd_equitorial_divergence'], 'si_psd_window_bounds': (0.0, \n PSDdetector_length_mm / 1000.0)}\n elif 'si_psd' in NISTparms:\n del NISTparms['si_psd']\n if InpParms.get('Specimen_Displacement'):\n NISTparms['displacement'] = {'specimen_displacement': 0.001 *\n InpParms['Specimen_Displacement']}\n elif 'displacement' in NISTparms:\n del NISTparms['displacement']\n if InpParms.get('receiving_slit_width'):\n NISTparms['receiver_slit'] = {'slit_width': 0.001 * InpParms[\n 'receiving_slit_width']}\n elif 'receiver_slit' in NISTparms:\n del NISTparms['receiver_slit']\n if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(\n 'tube-tails_rel-I', 0) > 0:\n NISTparms['tube_tails'] = {'main_width': 0.001 * InpParms.get(\n 'tube-tails_width', 0.0), 'tail_left': -0.001 * InpParms.get(\n 'tube-tails_L-tail', 0.0), 'tail_right': 0.001 * InpParms.get(\n 'tube-tails_R-tail', 0.0), 'tail_intens': InpParms.get(\n 'tube-tails_rel-I', 0.0)}\n elif 'tube_tails' in NISTparms:\n del NISTparms['tube_tails']\n max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]\n NISTparms[''] = {'equatorial_divergence_deg': InpParms['divergence'],\n 'dominant_wavelength': max_wavelength, 'diffractometer_radius': \n 0.001 * InpParms['Rs'], 'oversampling': InpParms['convolution_steps']}\n\n\ndef setupFPAcalc():\n \"\"\"Create a peak profile object using the NIST XRD Fundamental \n Parameters Code. \n \n :returns: a profile object that can provide information on \n each convolution or compute the composite peak shape. \n \"\"\"\n p = FP.FP_profile(anglemode='twotheta',\n output_gaussian_smoother_bins_sigma=1.0, oversampling=NISTparms.get\n ('oversampling', 10))\n p.debug_cache = False\n for key in NISTparms:\n if key:\n p.set_parameters(convolver=key, **NISTparms[key])\n else:\n p.set_parameters(**NISTparms[key])\n return p\n\n\ndef doFPAcalc(NISTpk, ttArr, twotheta, calcwid, step):\n \"\"\"Compute a single peak using a NIST profile object\n\n :param object NISTpk: a peak profile computational object from the \n NIST XRD Fundamental Parameters Code, typically established from\n a call to :func:`SetupFPAcalc`\n :param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)\n :param float twotheta: nominal center of peak (degrees)\n :param float calcwid: width to perform convolution (degrees)\n :param float step: step size\n \"\"\"\n center_bin_idx = min(ttArr.searchsorted(twotheta), len(ttArr) - 1)\n NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,\n twotheta_window_center_deg=ttArr[center_bin_idx],\n twotheta_approx_window_fullwidth_deg=calcwid)\n NISTpk.set_parameters(twotheta0_deg=twotheta)\n return center_bin_idx, NISTpk.compute_line_profile()\n\n\ndef MakeSimSizer(G2frame, dlg):\n \"\"\"Create a GUI to get simulation with parameters for Fundamental \n Parameters fitting. \n\n :param wx.Window dlg: Frame or Dialog where GUI will appear\n\n :returns: a sizer with the GUI controls \n \n \"\"\"\n\n def _onOK(event):\n msg = ''\n if simParms['minTT'] - simParms['calcwid'] / 1.5 < 0.1:\n msg += 'First peak minus half the calc width is too low'\n if simParms['maxTT'] + simParms['calcwid'] / 1.5 > 175:\n if msg:\n msg += '\\n'\n msg += 'Last peak plus half the calc width is too high'\n if simParms['npeaks'] < 8:\n if msg:\n msg += '\\n'\n msg += 'At least 8 peaks are needed'\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n return\n ttArr = np.arange(max(0.5, simParms['minTT'] - simParms['calcwid'] /\n 1.5), simParms['maxTT'] + simParms['calcwid'] / 1.5, simParms[\n 'step'])\n intArr = np.zeros_like(ttArr)\n peaklist = np.linspace(simParms['minTT'], simParms['maxTT'],\n simParms['npeaks'], endpoint=True)\n peakSpacing = (peaklist[-1] - peaklist[0]) / (len(peaklist) - 1)\n NISTpk = setupFPAcalc()\n minPtsHM = len(intArr)\n maxPtsHM = 0\n for num, twoth_peak in enumerate(peaklist):\n try:\n center_bin_idx, peakObj = doFPAcalc(NISTpk, ttArr,\n twoth_peak, simParms['calcwid'], simParms['step'])\n except:\n if msg:\n msg += '\\n'\n msg = 'Error computing convolution, revise input'\n continue\n if num == 0:\n G2plt.PlotFPAconvolutors(G2frame, NISTpk)\n pkMax = peakObj.peak.max()\n pkPts = len(peakObj.peak)\n minPtsHM = min(minPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n maxPtsHM = max(maxPtsHM, sum(peakObj.peak >= 0.5 * pkMax))\n startInd = center_bin_idx - pkPts // 2\n if startInd < 0:\n intArr[:startInd + pkPts] += 10000 * peakObj.peak[-startInd:\n ] / pkMax\n elif startInd > len(intArr):\n break\n elif startInd + pkPts >= len(intArr):\n offset = pkPts - len(intArr[startInd:])\n intArr[startInd:startInd + pkPts - offset\n ] += 10000 * peakObj.peak[:-offset] / pkMax\n else:\n intArr[startInd:startInd + pkPts\n ] += 10000 * peakObj.peak / pkMax\n if maxPtsHM * simParms['step'] > peakSpacing / 4:\n if msg:\n msg += '\\n'\n msg += (\n 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'\n .format(maxPtsHM * simParms['step'], peakSpacing))\n if minPtsHM < 10:\n if msg:\n msg += '\\n'\n msg += (\n 'There are only {} points above the half-max. 10 are needed. Dropping step size.'\n .format(minPtsHM))\n simParms['step'] *= 0.5\n if msg:\n G2G.G2MessageBox(dlg, msg, 'Bad input, try again')\n wx.CallAfter(MakeSimSizer, G2frame, dlg)\n return\n dlg.Destroy()\n wx.CallAfter(FitFPApeaks, ttArr, intArr, peaklist, maxPtsHM)\n\n def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):\n \"\"\"Perform a peak fit to the FP simulated pattern\n \"\"\"\n plswait = wx.Dialog(G2frame, style=wx.DEFAULT_DIALOG_STYLE | wx.\n RESIZE_BORDER)\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n txt = wx.StaticText(plswait, wx.ID_ANY,\n 'Fitting peaks...\\nPlease wait...', style=wx.ALIGN_CENTER)\n vbox.Add(txt, 0, wx.ALL | wx.EXPAND)\n vbox.Add((1, 1), 1, wx.ALL | wx.EXPAND, 1)\n plswait.SetSizer(vbox)\n plswait.Layout()\n plswait.CenterOnParent()\n plswait.Show()\n wx.BeginBusyCursor()\n ints = list(NISTparms['emission']['emiss_intensities'])\n Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n if len(ints) > 1:\n ints[np.argmax(ints)] = -1\n Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)\n ] * 10000000000.0\n else:\n Lam2 = None\n histId = G2frame.AddSimulatedPowder(ttArr, intArr,\n 'NIST Fundamental Parameters simulation', Lam1, Lam2)\n controls = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, G2frame.root, 'Controls'))\n controldat = controls.get('data', {'deriv type': 'analytic',\n 'min dM/M': 0.001})\n Parms, Parms2 = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId\n (G2frame, histId, 'Instrument Parameters'))\n peakData = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Peak List'))\n bkg1, bkg2 = bkg = G2frame.GPXtree.GetItemPyData(G2gd.\n GetGPXtreeItemId(G2frame, histId, 'Background'))\n bkg1[1] = False\n bkg1[2] = 0\n bkg1[3] = 0.0\n limits = G2frame.GPXtree.GetItemPyData(G2gd.GetGPXtreeItemId(\n G2frame, histId, 'Limits'))\n try:\n Parms['SH/L'][1] = 0.25 * (NISTparms['axial']['length_sample'] +\n NISTparms['axial']['slit_length_source']) / NISTparms[''][\n 'diffractometer_radius']\n except:\n pass\n for pos in peaklist:\n i = ttArr.searchsorted(pos)\n area = sum(intArr[max(0, i - maxPtsHM):min(len(intArr), i +\n maxPtsHM)])\n peakData['peaks'].append(G2mth.setPeakparms(Parms, Parms2, pos,\n area))\n histData = G2frame.GPXtree.GetItemPyData(histId)\n bxye = np.zeros(len(histData[1][1]))\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False,\n controldat, None)[0]\n for pk in peakData['peaks']:\n pk[1] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in ('U', 'V', 'W', 'X', 'Y'):\n Parms[p][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n Parms['SH/L'][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ', peakData['peaks'], bkg,\n limits[1], Parms, Parms2, histData[1], bxye, [], False, controldat\n )[0]\n for p in Parms:\n if len(Parms[p]) == 3:\n Parms[p][0] = Parms[p][1]\n Parms[p][2] = False\n wx.EndBusyCursor()\n plswait.Destroy()\n pth = G2G.GetExportPath(G2frame)\n fldlg = wx.FileDialog(G2frame,\n 'Set name to save GSAS-II instrument parameters file', pth, '',\n 'instrument parameter files (*.instprm)|*.instprm', wx.FD_SAVE |\n wx.FD_OVERWRITE_PROMPT)\n try:\n if fldlg.ShowModal() == wx.ID_OK:\n filename = fldlg.GetPath()\n filename = os.path.splitext(filename)[0] + '.instprm'\n File = open(filename, 'w')\n File.write(\n '#GSAS-II instrument parameter file; do not add/delete items!\\n'\n )\n for item in Parms:\n File.write(item + ':' + str(Parms[item][1]) + '\\n')\n File.close()\n print('Instrument parameters saved to: ' + filename)\n finally:\n fldlg.Destroy()\n\n def _onClose(event):\n dlg.Destroy()\n\n def SetButtonStatus(done=False):\n OKbtn.Enable(bool(NISTparms))\n saveBtn.Enable(bool(NISTparms))\n if done:\n _onOK(None)\n\n def _onSetFPA(event):\n FPdlg = wx.Dialog(dlg, wx.ID_ANY, 'FPA parameters', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeTopasFPASizer(G2frame, FPdlg, 'BBpoint', SetButtonStatus)\n FPdlg.CenterOnParent()\n FPdlg.Raise()\n FPdlg.Show()\n\n def _onSaveFPA(event):\n filename = G2G.askSaveFile(G2frame, '', '.NISTfpa',\n 'dict of NIST FPA values', dlg)\n if not filename:\n return\n fp = open(filename, 'w')\n fp.write(\n '# parameters to be used in the NIST XRD Fundamental Parameters program\\n'\n )\n fp.write('{\\n')\n for key in sorted(NISTparms):\n fp.write(\" '\" + key + \"' : \" + str(NISTparms[key]) + ',')\n if not key:\n fp.write(' # global parameters')\n fp.write('\\n')\n fp.write('}\\n')\n fp.close()\n\n def _onReadFPA(event):\n filename = G2G.GetImportFile(G2frame, message=\n 'Read file with dict of values for NIST Fundamental Parameters',\n parent=dlg, wildcard='dict of NIST FPA values|*.NISTfpa')\n if not filename:\n return\n if not filename[0]:\n return\n try:\n txt = open(filename[0], 'r').read()\n NISTparms.clear()\n array = np.array\n d = eval(txt)\n NISTparms.update(d)\n except Exception as err:\n G2G.G2MessageBox(dlg, u'Error reading file {}:{}\\n'.format(\n filename, err), 'Bad dict input')\n SetButtonStatus()\n if dlg.GetSizer():\n dlg.GetSizer().Clear(True)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add(wx.StaticText(dlg, wx.ID_ANY,\n 'Fit Profile Parameters to Peaks from Fundamental Parameters',\n style=wx.ALIGN_CENTER), 0, wx.EXPAND)\n MainSizer.Add((-1, 5))\n prmSizer = wx.FlexGridSizer(cols=2, hgap=3, vgap=5)\n text = wx.StaticText(dlg, wx.ID_ANY, 'value', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n text = wx.StaticText(dlg, wx.ID_ANY, 'explanation', style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text, 0, wx.EXPAND)\n for key, defVal, text in (('minTT', 3.0,\n 'Location of first peak in 2theta (deg)'), ('maxTT', 123.0,\n 'Location of last peak in 2theta (deg)'), ('step', 0.01,\n 'Pattern step size (deg 2theta)'), ('npeaks', 13.0,\n 'Number of peaks'), ('calcwid', 2.0,\n 'Range to compute each peak (deg 2theta)')):\n if key not in simParms:\n simParms[key] = defVal\n ctrl = G2G.ValidatedTxtCtrl(dlg, simParms, key, size=(70, -1))\n prmSizer.Add(ctrl, 1, wx.ALL | wx.ALIGN_CENTER_VERTICAL, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, text, size=(300, -1))\n txt.Wrap(280)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(dlg, wx.ID_ANY, 'Input FP vals')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON, _onSetFPA)\n saveBtn = wx.Button(dlg, wx.ID_ANY, 'Save FPA dict')\n btnsizer.Add(saveBtn)\n saveBtn.Bind(wx.EVT_BUTTON, _onSaveFPA)\n readBtn = wx.Button(dlg, wx.ID_ANY, 'Read FPA dict')\n btnsizer.Add(readBtn)\n readBtn.Bind(wx.EVT_BUTTON, _onReadFPA)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n txt = wx.StaticText(dlg, wx.ID_ANY, 'If you use this, please cite: ' +\n Citation, size=(350, -1))\n txt.Wrap(340)\n MainSizer.Add(txt, 0, wx.ALIGN_CENTER)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(dlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(dlg, wx.ID_CLOSE, 'Cancel')\n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1, 4), 1, wx.EXPAND, 1)\n OKbtn.Bind(wx.EVT_BUTTON, _onOK)\n Cbtn.Bind(wx.EVT_BUTTON, _onClose)\n SetButtonStatus()\n dlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(dlg)\n dlg.SetMinSize(dlg.GetSize())\n dlg.SendSizeEvent()\n dlg.Raise()\n\n\ndef GetFPAInput(G2frame):\n dlg = wx.Dialog(G2frame, wx.ID_ANY, 'FPA input', style=wx.\n DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n MakeSimSizer(G2frame, dlg)\n dlg.CenterOnParent()\n dlg.Show()\n return\n", "step-5": "# -*- coding: utf-8 -*-\n########### SVN repository information ###################\n# $Date: $\n# $Author: $\n# $Revision: $\n# $URL: $\n# $Id: $\n########### SVN repository information ###################\n'''\n*GSASIIfpaGUI: Fundamental Parameters Routines*\n===============================================\n\nThis module contains routines for getting Fundamental Parameters \nApproach (FPA) input, setting up for running the NIST XRD Fundamental \nParameters Code, plotting the convolutors and computing a set of peaks\ngenerated by that code. \n\n'''\nfrom __future__ import division, print_function\nimport wx\nimport os.path\nimport numpy as np\n\nimport NIST_profile as FP\n\nimport GSASIIpath\nimport GSASIIctrlGUI as G2G\nimport GSASIIdataGUI as G2gd\nimport GSASIIplot as G2plt\nimport GSASIImath as G2mth\nimport GSASIIpwd as G2pwd\n\nsimParms = {}\n'''Parameters to set range for pattern simulation\n'''\n\nparmDict = {'numWave':2}\n'''Parameter dict used for reading Topas-style values. These are \nconverted to SI units and placed into :data:`NISTparms`\n'''\n\nNISTparms = {}\n'''Parameters in a nested dict, with an entry for each concolutor. Entries in \nthose dicts have values in SI units (of course). NISTparms can be \ncan be input directly or can be from created from :data:`parmDict`\nby :func:`XferFPAsettings`\n'''\n\nBraggBrentanoParms = [\n ('divergence', 0.5, 'Bragg-Brentano divergence angle (degrees)'),\n ('soller_angle', 2.0, 'Soller slit axial divergence (degrees)'),\n ('Rs', 220, 'Diffractometer radius (mm)'),\n ('filament_length', 12., 'X-ray tube line focus length (mm)'),\n ('sample_length', 12., 'Illuminated sample length in axial direction (mm)'),\n ('receiving_slit_length', 12., 'Length of receiving slit in axial direction (mm)'),\n ('LAC_cm', 0.,'Linear absorption coef. adjusted for packing density (cm-1)'),\n ('sample_thickness', 1., 'Depth of sample (mm)'),\n ('convolution_steps', 8, 'Number of Fourier-space bins per two-theta step'),\n ('tube-tails_width', 0.04,'Tube filament width, in projection at takeoff angle (mm)'),\n ('tube-tails_L-tail', -1.,'Left-side tube tails width, in projection (mm)'), \n ('tube-tails_R-tail', 1.,'Right-side tube tails width, in projection (mm)'),\n ('tube-tails_rel-I', 0.001,'Tube tails fractional intensity (no units)'),\n ]\n'''FPA dict entries used in :func:`MakeTopasFPASizer`. Tuple contains\na dict key, a default value and a description. These are the parameters\nneeded for all Bragg Brentano instruments\n'''\n\nBBPointDetector = [\n ('receiving_slit_width', 0.2, 'Width of receiving slit (mm)'),]\n'''Additional FPA dict entries used in :func:`MakeTopasFPASizer` \nneeded for Bragg Brentano instruments with point detectors.\n'''\n\nBBPSDDetector = [\n ('lpsd_th2_angular_range', 3.0, 'Angular range observed by PSD (degrees 2Theta)'),\n ('lpsd_equitorial_divergence', 0.1, 'Equatorial divergence of the primary beam (degrees)'),]\n'''Additional FPA dict entries used in :func:`MakeTopasFPASizer` \nneeded for Bragg Brentano instruments with linear (1-D) PSD detectors.\n'''\n\nCitation = '''MH Mendenhall, K Mullen && JP Cline. (2015) J. Res. of NIST 120, 223-251. doi:10.6028/jres.120.014.\n'''\n \ndef SetCu2Wave():\n '''Set the parameters to the two-line Cu K alpha 1+2 spectrum\n '''\n parmDict['wave'] = {i:v for i,v in enumerate((1.540596,1.544493))}\n parmDict['int'] = {i:v for i,v in enumerate((0.653817, 0.346183))}\n parmDict['lwidth'] = {i:v for i,v in enumerate((0.501844,0.626579))}\nSetCu2Wave() # use these as default\n\ndef MakeTopasFPASizer(G2frame,FPdlg,mode,SetButtonStatus):\n '''Create a GUI with parameters for the NIST XRD Fundamental Parameters Code. \n Parameter input is modeled after Topas input parameters.\n\n :param wx.Window FPdlg: Frame or Dialog where GUI will appear\n :param str mode: either 'BBpoint' or 'BBPSD' for Bragg-Brentano point detector or \n (linear) position sensitive detector\n :param dict parmDict: dict to place parameters. If empty, default values from \n globals BraggBrentanoParms, BBPointDetector & BBPSDDetector will be placed in \n the array. \n :returns: a sizer with the GUI controls\n \n '''\n def _onOK(event):\n XferFPAsettings(parmDict)\n SetButtonStatus(done=True) # done=True triggers the simulation\n FPdlg.Destroy()\n def _onClose(event):\n SetButtonStatus()\n FPdlg.Destroy()\n def _onAddWave(event):\n parmDict['numWave'] += 1 \n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)\n def _onRemWave(event):\n parmDict['numWave'] -= 1 \n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)\n def _onSetCu5Wave(event):\n parmDict['wave'] = {i:v for i,v in enumerate((1.534753,1.540596,1.541058,1.54441,1.544721))}\n parmDict['int'] = {i:v for i,v in enumerate((0.0159, 0.5791, 0.0762, 0.2417, 0.0871))}\n parmDict['lwidth'] = {i:v for i,v in enumerate((3.6854, 0.437, 0.6, 0.52, 0.62))}\n parmDict['numWave'] = 5\n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)\n def _onSetCu2Wave(event):\n SetCu2Wave()\n parmDict['numWave'] = 2\n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,mode,SetButtonStatus)\n def _onSetPoint(event):\n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,'BBpoint',SetButtonStatus)\n def _onSetPSD(event):\n wx.CallAfter(MakeTopasFPASizer,G2frame,FPdlg,'BBPSD',SetButtonStatus)\n def PlotTopasFPA(event):\n XferFPAsettings(parmDict)\n ttArr = np.arange(max(0.5,\n simParms['plotpos']-simParms['calcwid']),\n simParms['plotpos']+simParms['calcwid'],\n simParms['step'])\n intArr = np.zeros_like(ttArr)\n NISTpk = setupFPAcalc()\n try:\n center_bin_idx,peakObj = doFPAcalc(\n NISTpk,ttArr,simParms['plotpos'],simParms['calcwid'],\n simParms['step'])\n except Exception as err:\n msg = \"Error computing convolution, revise input\"\n print(msg)\n print(err)\n return\n G2plt.PlotFPAconvolutors(G2frame,NISTpk)\n pkPts = len(peakObj.peak)\n pkMax = peakObj.peak.max()\n startInd = center_bin_idx-(pkPts//2) #this should be the aligned start of the new data\n # scale peak so max I=10,000 and add into intensity array\n if startInd < 0:\n intArr[:startInd+pkPts] += 10000 * peakObj.peak[-startInd:]/pkMax\n elif startInd > len(intArr):\n return\n elif startInd+pkPts >= len(intArr):\n offset = pkPts - len( intArr[startInd:] )\n intArr[startInd:startInd+pkPts-offset] += 10000 * peakObj.peak[:-offset]/pkMax\n else:\n intArr[startInd:startInd+pkPts] += 10000 * peakObj.peak/pkMax\n G2plt.PlotXY(G2frame, [(ttArr, intArr)],\n labelX=r'$2\\theta, deg$',\n labelY=r'Intensity (arbitrary)',\n Title='FPA peak', newPlot=True, lines=True)\n\n if FPdlg.GetSizer(): FPdlg.GetSizer().Clear(True)\n numWave = parmDict['numWave']\n if mode == 'BBpoint':\n itemList = BraggBrentanoParms+BBPointDetector\n elif mode == 'BBPSD':\n itemList = BraggBrentanoParms+BBPSDDetector\n else:\n raise Exception('Unknown mode in MakeTopasFPASizer: '+mode)\n \n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add((-1,5))\n waveSizer = wx.FlexGridSizer(cols=numWave+1,hgap=3,vgap=5)\n for lbl,prm,defVal in zip(\n (u'Wavelength (\\u212b)','Rel. Intensity',u'Lorentz Width\\n(\\u212b/1000)'),\n ('wave','int','lwidth'),\n (0.0, 1.0, 0.1),\n ):\n text = wx.StaticText(FPdlg,wx.ID_ANY,lbl,style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n waveSizer.Add(text,0,wx.EXPAND)\n if prm not in parmDict: parmDict[prm] = {}\n for i in range(numWave):\n if i not in parmDict[prm]: parmDict[prm][i] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg,parmDict[prm],i,size=(90,-1))\n waveSizer.Add(ctrl,1,wx.ALIGN_CENTER_VERTICAL,1)\n MainSizer.Add(waveSizer)\n MainSizer.Add((-1,5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY,'Add col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onAddWave)\n btn = wx.Button(FPdlg, wx.ID_ANY,'Remove col')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onRemWave)\n btn = wx.Button(FPdlg, wx.ID_ANY,'CuKa1+2')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onSetCu2Wave)\n btn = wx.Button(FPdlg, wx.ID_ANY,'CuKa-5wave')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onSetCu5Wave)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,5))\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY,'Point Dect.')\n btn.Enable(not mode == 'BBpoint')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onSetPoint)\n btn = wx.Button(FPdlg, wx.ID_ANY,'PSD')\n btn.Enable(not mode == 'BBPSD')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onSetPSD)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,5))\n \n prmSizer = wx.FlexGridSizer(cols=3,hgap=3,vgap=5)\n text = wx.StaticText(FPdlg,wx.ID_ANY,'label',style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text,0,wx.EXPAND)\n text = wx.StaticText(FPdlg,wx.ID_ANY,'value',style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text,0,wx.EXPAND)\n text = wx.StaticText(FPdlg,wx.ID_ANY,'explanation',style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text,0,wx.EXPAND)\n for lbl,defVal,text in itemList:\n prmSizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,lbl),1,wx.ALIGN_RIGHT|wx.ALIGN_CENTER_VERTICAL,1)\n if lbl not in parmDict: parmDict[lbl] = defVal\n ctrl = G2G.ValidatedTxtCtrl(FPdlg,parmDict,lbl,size=(70,-1))\n prmSizer.Add(ctrl,1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,1)\n txt = wx.StaticText(FPdlg,wx.ID_ANY,text,size=(400,-1))\n txt.Wrap(380)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n MainSizer.Add((-1,4),1,wx.EXPAND,1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(FPdlg, wx.ID_ANY, 'Plot peak')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,PlotTopasFPA)\n btnsizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,' at '))\n if 'plotpos' not in simParms: simParms['plotpos'] = simParms['minTT']\n ctrl = G2G.ValidatedTxtCtrl(FPdlg,simParms,'plotpos',size=(70,-1))\n btnsizer.Add(ctrl)\n btnsizer.Add(wx.StaticText(FPdlg,wx.ID_ANY,' deg.')) \n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,4),1,wx.EXPAND,1)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(FPdlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(FPdlg, wx.ID_CLOSE,\"Cancel\") \n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,4),1,wx.EXPAND,1)\n # bindings for close of window\n OKbtn.Bind(wx.EVT_BUTTON,_onOK)\n Cbtn.Bind(wx.EVT_BUTTON,_onClose)\n FPdlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(FPdlg)\n FPdlg.SetMinSize(FPdlg.GetSize())\n FPdlg.SendSizeEvent()\n\ndef XferFPAsettings(InpParms):\n '''convert Topas-type parameters to SI units for NIST and place in a dict sorted\n according to use in each convoluter\n\n :param dict InpParms: a dict with Topas-like parameters, as set in \n :func:`MakeTopasFPASizer`\n :returns: a nested dict with global parameters and those for each convolution\n '''\n wavenums = range(InpParms['numWave'])\n source_wavelengths_m = 1.e-10 * np.array([InpParms['wave'][i] for i in wavenums])\n la = [InpParms['int'][i] for i in wavenums]\n source_intensities = np.array(la)/max(la)\n source_lor_widths_m = 1.e-10 * 1.e-3 * np.array([InpParms['lwidth'][i] for i in wavenums])\n source_gauss_widths_m = 1.e-10 * 1.e-3 * np.array([0.001 for i in wavenums])\n \n NISTparms[\"emission\"] = {'emiss_wavelengths' : source_wavelengths_m,\n 'emiss_intensities' : source_intensities,\n 'emiss_gauss_widths' : source_gauss_widths_m,\n 'emiss_lor_widths' : source_lor_widths_m,\n 'crystallite_size_gauss' : 1.e-9 * InpParms.get('Size_G',1e6),\n 'crystallite_size_lor' : 1.e-9 * InpParms.get('Size_L',1e6)}\n \n if InpParms['filament_length'] == InpParms['receiving_slit_length']: # workaround: \n InpParms['receiving_slit_length'] *= 1.00001 # avoid bug when slit lengths are identical\n NISTparms[\"axial\"] = {\n 'axDiv':\"full\", 'slit_length_source' : 1e-3*InpParms['filament_length'],\n 'slit_length_target' : 1e-3*InpParms['receiving_slit_length'],\n 'length_sample' : 1e-3 * InpParms['sample_length'], \n 'n_integral_points' : 10,\n 'angI_deg' : InpParms['soller_angle'],\n 'angD_deg': InpParms['soller_angle']\n }\n if InpParms.get('LAC_cm',0) > 0:\n NISTparms[\"absorption\"] = {\n 'absorption_coefficient': InpParms['LAC_cm']*100, #like LaB6, in m^(-1)\n 'sample_thickness': 1e-3 * InpParms['sample_thickness'],\n }\n elif \"absorption\" in NISTparms:\n del NISTparms[\"absorption\"]\n\n if InpParms.get('lpsd_equitorial_divergence',0) > 0 and InpParms.get(\n 'lpsd_th2_angular_range',0) > 0:\n PSDdetector_length_mm=np.arcsin(np.pi*InpParms['lpsd_th2_angular_range']/180.\n )*InpParms['Rs'] # mm\n NISTparms[\"si_psd\"] = {\n 'equatorial_divergence_deg': InpParms['lpsd_equitorial_divergence'],\n 'si_psd_window_bounds': (0.,PSDdetector_length_mm/1000.)\n }\n elif \"si_psd\" in NISTparms:\n del NISTparms[\"si_psd\"]\n \n if InpParms.get('Specimen_Displacement'):\n NISTparms[\"displacement\"] = {'specimen_displacement': 1e-3 * InpParms['Specimen_Displacement']}\n elif \"displacement\" in NISTparms:\n del NISTparms[\"displacement\"]\n\n if InpParms.get('receiving_slit_width'):\n NISTparms[\"receiver_slit\"] = {'slit_width':1e-3*InpParms['receiving_slit_width']}\n elif \"receiver_slit\" in NISTparms:\n del NISTparms[\"receiver_slit\"]\n\n if InpParms.get('tube-tails_width', 0) > 0 and InpParms.get(\n 'tube-tails_rel-I',0) > 0:\n NISTparms[\"tube_tails\"] = {\n 'main_width' : 1e-3 * InpParms.get('tube-tails_width', 0.),\n 'tail_left' : -1e-3 * InpParms.get('tube-tails_L-tail',0.),\n 'tail_right' : 1e-3 * InpParms.get('tube-tails_R-tail',0.),\n 'tail_intens' : InpParms.get('tube-tails_rel-I',0.),}\n elif \"tube_tails\" in NISTparms:\n del NISTparms[\"tube_tails\"]\n\n # set Global parameters\n max_wavelength = source_wavelengths_m[np.argmax(source_intensities)]\n NISTparms[\"\"] = {\n 'equatorial_divergence_deg' : InpParms['divergence'],\n 'dominant_wavelength' : max_wavelength,\n 'diffractometer_radius' : 1e-3* InpParms['Rs'],\n 'oversampling' : InpParms['convolution_steps'],\n }\ndef setupFPAcalc():\n '''Create a peak profile object using the NIST XRD Fundamental \n Parameters Code. \n \n :returns: a profile object that can provide information on \n each convolution or compute the composite peak shape. \n '''\n p=FP.FP_profile(anglemode=\"twotheta\",\n output_gaussian_smoother_bins_sigma=1.0,\n oversampling=NISTparms.get('oversampling',10))\n p.debug_cache=False\n #set parameters for each convolver\n for key in NISTparms:\n if key:\n p.set_parameters(convolver=key,**NISTparms[key])\n else:\n p.set_parameters(**NISTparms[key])\n return p\n \ndef doFPAcalc(NISTpk,ttArr,twotheta,calcwid,step):\n '''Compute a single peak using a NIST profile object\n\n :param object NISTpk: a peak profile computational object from the \n NIST XRD Fundamental Parameters Code, typically established from\n a call to :func:`SetupFPAcalc`\n :param np.Array ttArr: an evenly-spaced grid of two-theta points (degrees)\n :param float twotheta: nominal center of peak (degrees)\n :param float calcwid: width to perform convolution (degrees)\n :param float step: step size\n '''\n # find closest point to twotheta (may be outside limits of the array)\n center_bin_idx=min(ttArr.searchsorted(twotheta),len(ttArr)-1)\n NISTpk.set_optimized_window(twotheta_exact_bin_spacing_deg=step,\n twotheta_window_center_deg=ttArr[center_bin_idx],\n twotheta_approx_window_fullwidth_deg=calcwid,\n )\n NISTpk.set_parameters(twotheta0_deg=twotheta)\n return center_bin_idx,NISTpk.compute_line_profile()\n\ndef MakeSimSizer(G2frame, dlg):\n '''Create a GUI to get simulation with parameters for Fundamental \n Parameters fitting. \n\n :param wx.Window dlg: Frame or Dialog where GUI will appear\n\n :returns: a sizer with the GUI controls \n \n '''\n def _onOK(event):\n msg = ''\n if simParms['minTT']-simParms['calcwid']/1.5 < 0.1:\n msg += 'First peak minus half the calc width is too low'\n if simParms['maxTT']+simParms['calcwid']/1.5 > 175:\n if msg: msg += '\\n'\n msg += 'Last peak plus half the calc width is too high'\n if simParms['npeaks'] < 8:\n if msg: msg += '\\n'\n msg += 'At least 8 peaks are needed'\n if msg:\n G2G.G2MessageBox(dlg,msg,'Bad input, try again')\n return\n # compute \"obs\" pattern\n ttArr = np.arange(max(0.5,\n simParms['minTT']-simParms['calcwid']/1.5),\n simParms['maxTT']+simParms['calcwid']/1.5,\n simParms['step'])\n intArr = np.zeros_like(ttArr)\n peaklist = np.linspace(simParms['minTT'],simParms['maxTT'],\n simParms['npeaks'],endpoint=True)\n peakSpacing = (peaklist[-1]-peaklist[0])/(len(peaklist)-1)\n NISTpk = setupFPAcalc()\n minPtsHM = len(intArr) # initialize points above half-max\n maxPtsHM = 0\n for num,twoth_peak in enumerate(peaklist):\n try:\n center_bin_idx,peakObj = doFPAcalc(\n NISTpk,ttArr,twoth_peak,simParms['calcwid'],\n simParms['step'])\n except:\n if msg: msg += '\\n'\n msg = \"Error computing convolution, revise input\"\n continue\n if num == 0: G2plt.PlotFPAconvolutors(G2frame,NISTpk)\n pkMax = peakObj.peak.max()\n pkPts = len(peakObj.peak)\n minPtsHM = min(minPtsHM,sum(peakObj.peak >= 0.5*pkMax)) # points above half-max\n maxPtsHM = max(maxPtsHM,sum(peakObj.peak >= 0.5*pkMax)) # points above half-max\n startInd = center_bin_idx-(pkPts//2) #this should be the aligned start of the new data\n # scale peak so max I=10,000 and add into intensity array\n if startInd < 0:\n intArr[:startInd+pkPts] += 10000 * peakObj.peak[-startInd:]/pkMax\n elif startInd > len(intArr):\n break\n elif startInd+pkPts >= len(intArr):\n offset = pkPts - len( intArr[startInd:] )\n intArr[startInd:startInd+pkPts-offset] += 10000 * peakObj.peak[:-offset]/pkMax\n else:\n intArr[startInd:startInd+pkPts] += 10000 * peakObj.peak/pkMax\n # check if peaks are too closely spaced\n if maxPtsHM*simParms['step'] > peakSpacing/4:\n if msg: msg += '\\n'\n msg += 'Maximum FWHM ({}) is too large compared to the peak spacing ({}). Decrease number of peaks or increase data range.'.format(\n maxPtsHM*simParms['step'], peakSpacing)\n # check if too few points across Hmax\n if minPtsHM < 10:\n if msg: msg += '\\n'\n msg += 'There are only {} points above the half-max. 10 are needed. Dropping step size.'.format(minPtsHM)\n simParms['step'] *= 0.5\n if msg:\n G2G.G2MessageBox(dlg,msg,'Bad input, try again')\n wx.CallAfter(MakeSimSizer,G2frame, dlg)\n return\n # pattern has been computed successfully\n dlg.Destroy()\n wx.CallAfter(FitFPApeaks,ttArr, intArr, peaklist, maxPtsHM) # do peakfit outside event callback\n\n def FitFPApeaks(ttArr, intArr, peaklist, maxPtsHM):\n '''Perform a peak fit to the FP simulated pattern\n '''\n plswait = wx.Dialog(G2frame,style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)\n vbox = wx.BoxSizer(wx.VERTICAL)\n vbox.Add((1,1),1,wx.ALL|wx.EXPAND,1)\n txt = wx.StaticText(plswait,wx.ID_ANY,\n 'Fitting peaks...\\nPlease wait...',\n style=wx.ALIGN_CENTER)\n vbox.Add(txt,0,wx.ALL|wx.EXPAND)\n vbox.Add((1,1),1,wx.ALL|wx.EXPAND,1)\n plswait.SetSizer(vbox)\n plswait.Layout()\n plswait.CenterOnParent()\n plswait.Show() # post \"please wait\"\n wx.BeginBusyCursor()\n # pick out one or two most intense wavelengths\n ints = list(NISTparms['emission']['emiss_intensities'])\n Lam1 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)]*1e10\n if len(ints) > 1: \n ints[np.argmax(ints)] = -1\n Lam2 = NISTparms['emission']['emiss_wavelengths'][np.argmax(ints)]*1e10\n else:\n Lam2 = None\n histId = G2frame.AddSimulatedPowder(ttArr,intArr,\n 'NIST Fundamental Parameters simulation',\n Lam1,Lam2)\n controls = G2frame.GPXtree.GetItemPyData(\n G2gd.GetGPXtreeItemId(G2frame,G2frame.root,'Controls'))\n controldat = controls.get('data',\n {'deriv type':'analytic','min dM/M':0.001,}) #fil\n Parms,Parms2 = G2frame.GPXtree.GetItemPyData(\n G2gd.GetGPXtreeItemId(G2frame,histId,'Instrument Parameters'))\n peakData = G2frame.GPXtree.GetItemPyData(\n G2gd.GetGPXtreeItemId(G2frame,histId,'Peak List'))\n # set background to 0 with one term = 0; disable refinement\n bkg1,bkg2 = bkg = G2frame.GPXtree.GetItemPyData(\n G2gd.GetGPXtreeItemId(G2frame,histId,'Background'))\n bkg1[1]=False\n bkg1[2]=0\n bkg1[3]=0.0\n limits = G2frame.GPXtree.GetItemPyData(\n G2gd.GetGPXtreeItemId(G2frame,histId,'Limits'))\n # approximate asym correction\n try:\n Parms['SH/L'][1] = 0.25 * (\n NISTparms['axial']['length_sample']+\n NISTparms['axial']['slit_length_source']\n ) / NISTparms['']['diffractometer_radius']\n except:\n pass\n \n for pos in peaklist:\n i = ttArr.searchsorted(pos)\n area = sum(intArr[max(0,i-maxPtsHM):min(len(intArr),i+maxPtsHM)])\n peakData['peaks'].append(G2mth.setPeakparms(Parms,Parms2,pos,area))\n histData = G2frame.GPXtree.GetItemPyData(histId)\n # refine peak positions only\n bxye = np.zeros(len(histData[1][1]))\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],\n bkg,limits[1],\n Parms,Parms2,histData[1],bxye,[],\n False,controldat,None)[0]\n # refine peak areas as well\n for pk in peakData['peaks']:\n pk[1] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],\n bkg,limits[1],\n Parms,Parms2,histData[1],bxye,[],\n False,controldat)[0]\n # refine profile function\n for p in ('U', 'V', 'W', 'X', 'Y'):\n Parms[p][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],\n bkg,limits[1],\n Parms,Parms2,histData[1],bxye,[],\n False,controldat)[0]\n # add in asymmetry\n Parms['SH/L'][2] = True\n peakData['sigDict'] = G2pwd.DoPeakFit('LSQ',peakData['peaks'],\n bkg,limits[1],\n Parms,Parms2,histData[1],bxye,[],\n False,controldat)[0]\n # reset \"initial\" profile\n for p in Parms:\n if len(Parms[p]) == 3:\n Parms[p][0] = Parms[p][1]\n Parms[p][2] = False\n wx.EndBusyCursor()\n plswait.Destroy() # remove \"please wait\"\n # save Iparms\n pth = G2G.GetExportPath(G2frame)\n fldlg = wx.FileDialog(G2frame, 'Set name to save GSAS-II instrument parameters file', pth, '', \n 'instrument parameter files (*.instprm)|*.instprm',wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)\n try:\n if fldlg.ShowModal() == wx.ID_OK:\n filename = fldlg.GetPath()\n # make sure extension is .instprm\n filename = os.path.splitext(filename)[0]+'.instprm'\n File = open(filename,'w')\n File.write(\"#GSAS-II instrument parameter file; do not add/delete items!\\n\")\n for item in Parms:\n File.write(item+':'+str(Parms[item][1])+'\\n')\n File.close()\n print ('Instrument parameters saved to: '+filename)\n finally:\n fldlg.Destroy()\n #GSASIIpath.IPyBreak()\n \n def _onClose(event):\n dlg.Destroy()\n def SetButtonStatus(done=False):\n OKbtn.Enable(bool(NISTparms))\n saveBtn.Enable(bool(NISTparms))\n if done: _onOK(None)\n def _onSetFPA(event):\n # Create a non-modal dialog for Topas-style FP input.\n FPdlg = wx.Dialog(dlg,wx.ID_ANY,'FPA parameters',\n style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)\n MakeTopasFPASizer(G2frame,FPdlg,'BBpoint',SetButtonStatus)\n FPdlg.CenterOnParent()\n FPdlg.Raise()\n FPdlg.Show() \n def _onSaveFPA(event):\n filename = G2G.askSaveFile(G2frame,'','.NISTfpa',\n 'dict of NIST FPA values',dlg)\n if not filename: return\n fp = open(filename,'w')\n fp.write('# parameters to be used in the NIST XRD Fundamental Parameters program\\n')\n fp.write('{\\n')\n for key in sorted(NISTparms):\n fp.write(\" '\"+key+\"' : \"+str(NISTparms[key])+\",\")\n if not key: fp.write(' # global parameters')\n fp.write('\\n')\n fp.write('}\\n')\n fp.close()\n def _onReadFPA(event):\n filename = G2G.GetImportFile(G2frame,\n message='Read file with dict of values for NIST Fundamental Parameters',\n parent=dlg,\n wildcard='dict of NIST FPA values|*.NISTfpa')\n if not filename: return\n if not filename[0]: return\n try:\n txt = open(filename[0],'r').read()\n NISTparms.clear()\n array = np.array\n d = eval(txt)\n NISTparms.update(d)\n except Exception as err:\n G2G.G2MessageBox(dlg,\n u'Error reading file {}:{}\\n'.format(filename,err),\n 'Bad dict input')\n #GSASIIpath.IPyBreak()\n SetButtonStatus()\n\n if dlg.GetSizer(): dlg.GetSizer().Clear(True)\n MainSizer = wx.BoxSizer(wx.VERTICAL)\n MainSizer.Add(wx.StaticText(dlg,wx.ID_ANY,\n 'Fit Profile Parameters to Peaks from Fundamental Parameters',\n style=wx.ALIGN_CENTER),0,wx.EXPAND)\n MainSizer.Add((-1,5))\n prmSizer = wx.FlexGridSizer(cols=2,hgap=3,vgap=5)\n text = wx.StaticText(dlg,wx.ID_ANY,'value',style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text,0,wx.EXPAND)\n text = wx.StaticText(dlg,wx.ID_ANY,'explanation',style=wx.ALIGN_CENTER)\n text.SetBackgroundColour(wx.WHITE)\n prmSizer.Add(text,0,wx.EXPAND)\n for key,defVal,text in (\n ('minTT',3.,'Location of first peak in 2theta (deg)'),\n ('maxTT',123.,'Location of last peak in 2theta (deg)'),\n ('step',0.01,'Pattern step size (deg 2theta)'),\n ('npeaks',13.,'Number of peaks'),\n ('calcwid',2.,'Range to compute each peak (deg 2theta)'),\n ):\n if key not in simParms: simParms[key] = defVal\n ctrl = G2G.ValidatedTxtCtrl(dlg,simParms,key,size=(70,-1))\n prmSizer.Add(ctrl,1,wx.ALL|wx.ALIGN_CENTER_VERTICAL,1)\n txt = wx.StaticText(dlg,wx.ID_ANY,text,size=(300,-1))\n txt.Wrap(280)\n prmSizer.Add(txt)\n MainSizer.Add(prmSizer)\n\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n btn = wx.Button(dlg, wx.ID_ANY,'Input FP vals')\n btnsizer.Add(btn)\n btn.Bind(wx.EVT_BUTTON,_onSetFPA)\n saveBtn = wx.Button(dlg, wx.ID_ANY,'Save FPA dict')\n btnsizer.Add(saveBtn)\n saveBtn.Bind(wx.EVT_BUTTON,_onSaveFPA)\n readBtn = wx.Button(dlg, wx.ID_ANY,'Read FPA dict')\n btnsizer.Add(readBtn)\n readBtn.Bind(wx.EVT_BUTTON,_onReadFPA)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,4),1,wx.EXPAND,1)\n txt = wx.StaticText(dlg,wx.ID_ANY,\n 'If you use this, please cite: '+Citation,\n size=(350,-1))\n txt.Wrap(340)\n MainSizer.Add(txt,0,wx.ALIGN_CENTER)\n btnsizer = wx.BoxSizer(wx.HORIZONTAL)\n OKbtn = wx.Button(dlg, wx.ID_OK)\n OKbtn.SetDefault()\n btnsizer.Add(OKbtn)\n Cbtn = wx.Button(dlg, wx.ID_CLOSE,\"Cancel\") \n btnsizer.Add(Cbtn)\n MainSizer.Add(btnsizer, 0, wx.ALIGN_CENTER, 0)\n MainSizer.Add((-1,4),1,wx.EXPAND,1)\n # bindings for close of window\n OKbtn.Bind(wx.EVT_BUTTON,_onOK)\n Cbtn.Bind(wx.EVT_BUTTON,_onClose)\n SetButtonStatus()\n dlg.SetSizer(MainSizer)\n MainSizer.Layout()\n MainSizer.Fit(dlg)\n dlg.SetMinSize(dlg.GetSize())\n dlg.SendSizeEvent()\n dlg.Raise()\n \ndef GetFPAInput(G2frame):\n dlg = wx.Dialog(G2frame,wx.ID_ANY,'FPA input',\n style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)\n MakeSimSizer(G2frame,dlg)\n dlg.CenterOnParent()\n dlg.Show()\n return\n \n", "step-ids": [ 7, 8, 9, 10, 11 ] }
[ 7, 8, 9, 10, 11 ]
import sqlalchemy from .base import Base from sqlalchemy import Column, Integer, String, ForeignKey from sqlalchemy.dialects.postgresql import UUID from sqlalchemy.orm import relationship class ModelSpellVariantPair(Base): __tablename__ = "spell_variant_pair" uuid = Column( UUID(as_uuid=True), server_default=sqlalchemy.text("uuid_generate_v4()"), unique=True, nullable=False, primary_key=True, ) class_id = Column( UUID(as_uuid=True), ForeignKey("class.uuid", ondelete="CASCADE"), nullable=False ) spells = relationship( "ModelSpell", backref="spell_variant_pair", cascade="all, delete-orphan" )
normal
{ "blob_id": "4958d6d88b762e6fbe860123b7274c16b6452605", "index": 7674, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass ModelSpellVariantPair(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n", "step-3": "<mask token>\n\n\nclass ModelSpellVariantPair(Base):\n __tablename__ = 'spell_variant_pair'\n uuid = Column(UUID(as_uuid=True), server_default=sqlalchemy.text(\n 'uuid_generate_v4()'), unique=True, nullable=False, primary_key=True)\n class_id = Column(UUID(as_uuid=True), ForeignKey('class.uuid', ondelete\n ='CASCADE'), nullable=False)\n spells = relationship('ModelSpell', backref='spell_variant_pair',\n cascade='all, delete-orphan')\n", "step-4": "import sqlalchemy\nfrom .base import Base\nfrom sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.orm import relationship\n\n\nclass ModelSpellVariantPair(Base):\n __tablename__ = 'spell_variant_pair'\n uuid = Column(UUID(as_uuid=True), server_default=sqlalchemy.text(\n 'uuid_generate_v4()'), unique=True, nullable=False, primary_key=True)\n class_id = Column(UUID(as_uuid=True), ForeignKey('class.uuid', ondelete\n ='CASCADE'), nullable=False)\n spells = relationship('ModelSpell', backref='spell_variant_pair',\n cascade='all, delete-orphan')\n", "step-5": "import sqlalchemy\nfrom .base import Base\nfrom sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.orm import relationship\n\n\nclass ModelSpellVariantPair(Base):\n __tablename__ = \"spell_variant_pair\"\n\n uuid = Column(\n UUID(as_uuid=True),\n server_default=sqlalchemy.text(\"uuid_generate_v4()\"),\n unique=True,\n nullable=False,\n primary_key=True,\n )\n class_id = Column(\n UUID(as_uuid=True), ForeignKey(\"class.uuid\", ondelete=\"CASCADE\"), nullable=False\n )\n spells = relationship(\n \"ModelSpell\", backref=\"spell_variant_pair\", cascade=\"all, delete-orphan\"\n )\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
<|reserved_special_token_0|> def read_jla_mock(mock_filename): fp = open(mock_filename, 'r') lines = fp.readlines() fp.close() jla = [] for line in lines: sn = line.split() temp = [] temp.append(float(sn[1])) temp.append(float(sn[2])) temp.append(float(sn[3])) temp.append(float(sn[4])) jla.append(temp) return np.array(jla) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def read_jla_mock(mock_filename): fp = open(mock_filename, 'r') lines = fp.readlines() fp.close() jla = [] for line in lines: sn = line.split() temp = [] temp.append(float(sn[1])) temp.append(float(sn[2])) temp.append(float(sn[3])) temp.append(float(sn[4])) jla.append(temp) return np.array(jla) <|reserved_special_token_0|> plt.subplot(1, 2, 1) <|reserved_special_token_0|> ax.hist(dmu, bins=nbin_all, label='ALL ' + ' p = ' + str(p), alpha=0.5, rwidth=rwidth, color=colors[0]) ax.hist(dmu[ID1], bins=nbin_1, label='$z<' + str(z1) + '$' + ' p = ' + str( p1) + '0', alpha=0.7, rwidth=rwidth, color=colors[1]) ax.hist(dmu[ID2], bins=nbin_2, label='$z>' + str(z2) + '$' + ' p = ' + str( p2), alpha=0.8, rwidth=rwidth, color=colors[2]) ax.set_xlim(-3.5, 3.5) ax.set_xticks([-3, -2, -1, 0, 1, 2, 3]) ax.set_xticklabels([-3, -2, -1, 0, 1, 2, 3], fontsize=14) ax.set_xlabel('$\\widetilde{\\Delta\\mu}$', fontsize=14) <|reserved_special_token_0|> ax.set_ylim(0, 170) ax.set_yticks(yticks) ax.set_yticklabels(yticks, fontsize=14) ax.set_ylabel('Counts', fontsize=14) ax.tick_params(axis='both', direction='in') <|reserved_special_token_0|> for i in range(len(texts)): plt.setp(texts[i], color=colors[i]) plt.subplot(1, 2, 2) <|reserved_special_token_0|> ax.hlines(-1, xmin=0, xmax=1.5, linestyle='dashed', lw=2, alpha=1, color= colors[0], label='Fiducal model') ax.errorbar(z, eos_SP[:, 0], yerr=[eos_SP[:, 0] - eos_SP[:, 2], eos_SP[:, 3 ] - eos_SP[:, 0]], marker='o', elinewidth=1.5, markersize=4, capsize=3, capthick=2, color=colors[1], label='Prior enforced') ax.plot(z, eos_no_prior[:, 0], '--', lw=2.5, color=colors[2]) ax.fill_between(z, y1=eos_no_prior[:, 2], y2=eos_no_prior[:, 3], color= colors[2], label='Reconstruction without prior') ax.set_xlim(-0.025, 1.525) ax.set_xticks([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5]) ax.set_xticklabels([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5], fontsize=14) ax.set_xlabel('$z$', fontsize=14) <|reserved_special_token_0|> ax.set_yticks(yticks) ax.set_yticklabels(yticks, fontsize=14) ax.set_ylabel('$w(z)$', fontsize=14) ax.tick_params(axis='both', direction='in') <|reserved_special_token_0|> for i in range(len(texts)): plt.setp(texts[i], fontsize=14, color=colors[i]) <|reserved_special_token_0|> ax.text(0.05, -2, '$\\chi^2_{\\rm reduced} = ' + str(round(chisq_red, 2)) + '$', fontsize=14, color='r') plt.subplots_adjust(wspace=0.15, hspace=0.25, left=0.065, right=0.985, top= 0.975, bottom=0.175) plt.savefig('example_eos_result.pdf') plt.show() <|reserved_special_token_1|> <|reserved_special_token_0|> colors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd', u'#8c564b'] fig = plt.figure(figsize=(11, 4)) def read_jla_mock(mock_filename): fp = open(mock_filename, 'r') lines = fp.readlines() fp.close() jla = [] for line in lines: sn = line.split() temp = [] temp.append(float(sn[1])) temp.append(float(sn[2])) temp.append(float(sn[3])) temp.append(float(sn[4])) jla.append(temp) return np.array(jla) jla = read_jla_mock('MOCK_JLA_40.txt') eos_SP = np.loadtxt('eos_40.txt') eos_no_prior = np.loadtxt('eos_no_prior.txt') eos_no_prior2 = np.loadtxt('eos_no_prior2.txt') z = jla[:, 0] dmu = (jla[:, 1] - jla[:, 3]) / jla[:, 2] nbin_all = 15 nbin_1 = 15 nbin_2 = 15 z1 = 0.2 z2 = 0.6 ID1 = z < z1 ID2 = z >= z2 p = round(kstest(dmu, cdf='norm')[1], 2) p1 = round(kstest(dmu[ID1], 'norm')[1], 2) p2 = round(kstest(dmu[ID2], 'norm')[1], 2) plt.subplot(1, 2, 1) ax = plt.gca() rwidth = 0.6 ax.hist(dmu, bins=nbin_all, label='ALL ' + ' p = ' + str(p), alpha=0.5, rwidth=rwidth, color=colors[0]) ax.hist(dmu[ID1], bins=nbin_1, label='$z<' + str(z1) + '$' + ' p = ' + str( p1) + '0', alpha=0.7, rwidth=rwidth, color=colors[1]) ax.hist(dmu[ID2], bins=nbin_2, label='$z>' + str(z2) + '$' + ' p = ' + str( p2), alpha=0.8, rwidth=rwidth, color=colors[2]) ax.set_xlim(-3.5, 3.5) ax.set_xticks([-3, -2, -1, 0, 1, 2, 3]) ax.set_xticklabels([-3, -2, -1, 0, 1, 2, 3], fontsize=14) ax.set_xlabel('$\\widetilde{\\Delta\\mu}$', fontsize=14) yticks = [0, 50, 100, 150] ax.set_ylim(0, 170) ax.set_yticks(yticks) ax.set_yticklabels(yticks, fontsize=14) ax.set_ylabel('Counts', fontsize=14) ax.tick_params(axis='both', direction='in') lgd = ax.legend(loc='upper left', fontsize=13, frameon=False) texts = lgd.get_texts() for i in range(len(texts)): plt.setp(texts[i], color=colors[i]) plt.subplot(1, 2, 2) ax = plt.gca() a = np.linspace(1, 0.4, 20) z = 1 / a - 1 colors = ['blue', 'red', 'gray'] ax.hlines(-1, xmin=0, xmax=1.5, linestyle='dashed', lw=2, alpha=1, color= colors[0], label='Fiducal model') ax.errorbar(z, eos_SP[:, 0], yerr=[eos_SP[:, 0] - eos_SP[:, 2], eos_SP[:, 3 ] - eos_SP[:, 0]], marker='o', elinewidth=1.5, markersize=4, capsize=3, capthick=2, color=colors[1], label='Prior enforced') ax.plot(z, eos_no_prior[:, 0], '--', lw=2.5, color=colors[2]) ax.fill_between(z, y1=eos_no_prior[:, 2], y2=eos_no_prior[:, 3], color= colors[2], label='Reconstruction without prior') ax.set_xlim(-0.025, 1.525) ax.set_xticks([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5]) ax.set_xticklabels([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5], fontsize=14) ax.set_xlabel('$z$', fontsize=14) yticks = [-3, -2, -1, -0] ax.set_yticks(yticks) ax.set_yticklabels(yticks, fontsize=14) ax.set_ylabel('$w(z)$', fontsize=14) ax.tick_params(axis='both', direction='in') handles, labels = ax.get_legend_handles_labels() handles = [handles[0], handles[2], handles[1]] labels = [labels[0], labels[2], labels[1]] lgd = ax.legend(handles, labels, loc='lower left', frameon=False, fontsize=14) texts = lgd.get_texts() cid = [0, 2, 1] for i in range(len(texts)): plt.setp(texts[i], fontsize=14, color=colors[i]) dof = 719 chisq_red = 876.39 / dof ax.text(0.05, -2, '$\\chi^2_{\\rm reduced} = ' + str(round(chisq_red, 2)) + '$', fontsize=14, color='r') plt.subplots_adjust(wspace=0.15, hspace=0.25, left=0.065, right=0.985, top= 0.975, bottom=0.175) plt.savefig('example_eos_result.pdf') plt.show() <|reserved_special_token_1|> import sys, os import numpy as np import matplotlib.pylab as plt from scipy.linalg import eig from scipy.stats import norm, kstest, normaltest colors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd', u'#8c564b'] fig = plt.figure(figsize=(11, 4)) def read_jla_mock(mock_filename): fp = open(mock_filename, 'r') lines = fp.readlines() fp.close() jla = [] for line in lines: sn = line.split() temp = [] temp.append(float(sn[1])) temp.append(float(sn[2])) temp.append(float(sn[3])) temp.append(float(sn[4])) jla.append(temp) return np.array(jla) jla = read_jla_mock('MOCK_JLA_40.txt') eos_SP = np.loadtxt('eos_40.txt') eos_no_prior = np.loadtxt('eos_no_prior.txt') eos_no_prior2 = np.loadtxt('eos_no_prior2.txt') z = jla[:, 0] dmu = (jla[:, 1] - jla[:, 3]) / jla[:, 2] nbin_all = 15 nbin_1 = 15 nbin_2 = 15 z1 = 0.2 z2 = 0.6 ID1 = z < z1 ID2 = z >= z2 p = round(kstest(dmu, cdf='norm')[1], 2) p1 = round(kstest(dmu[ID1], 'norm')[1], 2) p2 = round(kstest(dmu[ID2], 'norm')[1], 2) plt.subplot(1, 2, 1) ax = plt.gca() rwidth = 0.6 ax.hist(dmu, bins=nbin_all, label='ALL ' + ' p = ' + str(p), alpha=0.5, rwidth=rwidth, color=colors[0]) ax.hist(dmu[ID1], bins=nbin_1, label='$z<' + str(z1) + '$' + ' p = ' + str( p1) + '0', alpha=0.7, rwidth=rwidth, color=colors[1]) ax.hist(dmu[ID2], bins=nbin_2, label='$z>' + str(z2) + '$' + ' p = ' + str( p2), alpha=0.8, rwidth=rwidth, color=colors[2]) ax.set_xlim(-3.5, 3.5) ax.set_xticks([-3, -2, -1, 0, 1, 2, 3]) ax.set_xticklabels([-3, -2, -1, 0, 1, 2, 3], fontsize=14) ax.set_xlabel('$\\widetilde{\\Delta\\mu}$', fontsize=14) yticks = [0, 50, 100, 150] ax.set_ylim(0, 170) ax.set_yticks(yticks) ax.set_yticklabels(yticks, fontsize=14) ax.set_ylabel('Counts', fontsize=14) ax.tick_params(axis='both', direction='in') lgd = ax.legend(loc='upper left', fontsize=13, frameon=False) texts = lgd.get_texts() for i in range(len(texts)): plt.setp(texts[i], color=colors[i]) plt.subplot(1, 2, 2) ax = plt.gca() a = np.linspace(1, 0.4, 20) z = 1 / a - 1 colors = ['blue', 'red', 'gray'] ax.hlines(-1, xmin=0, xmax=1.5, linestyle='dashed', lw=2, alpha=1, color= colors[0], label='Fiducal model') ax.errorbar(z, eos_SP[:, 0], yerr=[eos_SP[:, 0] - eos_SP[:, 2], eos_SP[:, 3 ] - eos_SP[:, 0]], marker='o', elinewidth=1.5, markersize=4, capsize=3, capthick=2, color=colors[1], label='Prior enforced') ax.plot(z, eos_no_prior[:, 0], '--', lw=2.5, color=colors[2]) ax.fill_between(z, y1=eos_no_prior[:, 2], y2=eos_no_prior[:, 3], color= colors[2], label='Reconstruction without prior') ax.set_xlim(-0.025, 1.525) ax.set_xticks([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5]) ax.set_xticklabels([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5], fontsize=14) ax.set_xlabel('$z$', fontsize=14) yticks = [-3, -2, -1, -0] ax.set_yticks(yticks) ax.set_yticklabels(yticks, fontsize=14) ax.set_ylabel('$w(z)$', fontsize=14) ax.tick_params(axis='both', direction='in') handles, labels = ax.get_legend_handles_labels() handles = [handles[0], handles[2], handles[1]] labels = [labels[0], labels[2], labels[1]] lgd = ax.legend(handles, labels, loc='lower left', frameon=False, fontsize=14) texts = lgd.get_texts() cid = [0, 2, 1] for i in range(len(texts)): plt.setp(texts[i], fontsize=14, color=colors[i]) dof = 719 chisq_red = 876.39 / dof ax.text(0.05, -2, '$\\chi^2_{\\rm reduced} = ' + str(round(chisq_red, 2)) + '$', fontsize=14, color='r') plt.subplots_adjust(wspace=0.15, hspace=0.25, left=0.065, right=0.985, top= 0.975, bottom=0.175) plt.savefig('example_eos_result.pdf') plt.show() <|reserved_special_token_1|> ########################################################### # 2019-02-07: 删除了marginalized prior # ########################################################### import sys,os import numpy as np import matplotlib.pylab as plt from scipy.linalg import eig from scipy.stats import norm, kstest, normaltest # use default colors defined by MatPlotlib colors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd', u'#8c564b'] ########################################################### fig = plt.figure(figsize=(11,4)) ########################################################### # 1) histograms of the normalized dmu(zi). The purpose is # to show that the mock sample is not too peculiar ########################################################### def read_jla_mock( mock_filename ): fp = open(mock_filename,'r') lines = fp.readlines() fp.close() jla = [] for line in lines: sn = line.split() temp = [] temp.append(float(sn[1])) temp.append(float(sn[2])) temp.append(float(sn[3])) temp.append(float(sn[4])) jla.append(temp) return np.array(jla) # jla = read_jla_mock('MOCK_JLA_51.txt') # eos_SP = np.loadtxt('eos_51.txt') # jla = read_jla_mock('MOCK_JLA_16.txt') # eos_SP = np.loadtxt('eos_16.txt') # jla = read_jla_mock('MOCK_JLA_10.txt') # eos_SP = np.loadtxt('eos_10.txt') # jla = read_jla_mock('MOCK_JLA_9.txt') # eos_SP = np.loadtxt('eos_9.txt') # jla = read_jla_mock('MOCK_JLA_30.txt') # eos_SP = np.loadtxt('eos_30.txt') # jla = read_jla_mock('MOCK_JLA_3.txt') # eos_SP = np.loadtxt('eos_3.txt') jla = read_jla_mock('MOCK_JLA_40.txt') eos_SP = np.loadtxt('eos_40.txt') eos_no_prior = np.loadtxt('eos_no_prior.txt') eos_no_prior2 = np.loadtxt('eos_no_prior2.txt') z = jla[:,0] dmu = (jla[:,1]-jla[:,3])/jla[:,2] # normalize the errors nbin_all = 15 nbin_1 = 15 nbin_2 = 15 z1 = 0.2 z2 = 0.6 ID1 = (z < z1 ) ID2 = (z >= z2 ) p = round(kstest(dmu,cdf='norm')[1],2) p1 = round(kstest(dmu[ID1],'norm')[1],2) p2 = round(kstest(dmu[ID2],'norm')[1],2) plt.subplot(1,2,1) ax = plt.gca() rwidth=0.6 ax.hist(dmu, bins=nbin_all, label=r'ALL ' + r' p = '+str(p), alpha=0.5, rwidth=rwidth, color=colors[0]) ax.hist(dmu[ID1], bins=nbin_1, label=r'$z<' + str(z1) + '$' + r' p = '+str(p1)+'0', alpha=0.7, rwidth=rwidth, color=colors[1]) ax.hist(dmu[ID2], bins=nbin_2, label=r'$z>' + str(z2) + '$' + r' p = '+str(p2), alpha=0.8, rwidth=rwidth, color=colors[2]) ax.set_xlim(-3.5,3.5) ax.set_xticks([-3,-2,-1,0,1,2,3]) ax.set_xticklabels([-3,-2,-1,0,1,2,3],fontsize=14) ax.set_xlabel(r'$\widetilde{\Delta\mu}$',fontsize=14) yticks = [0,50,100,150] ax.set_ylim(0,170) ax.set_yticks(yticks) ax.set_yticklabels(yticks,fontsize=14) ax.set_ylabel(r'Counts',fontsize=14) ax.tick_params(axis='both',direction='in') lgd=ax.legend(loc='upper left',fontsize=13,frameon=False) texts = lgd.get_texts() for i in range(len(texts)): plt.setp(texts[i],color=colors[i]) ########################################################### # 3) reconstructed EoS ########################################################### plt.subplot(1,2,2) ax = plt.gca() a = np.linspace(1,.4,20) z = 1/a-1 colors=['blue','red','gray'] ax.hlines(-1,xmin=0,xmax=1.5,linestyle='dashed',lw=2,alpha=1,color=colors[0],label=r'Fiducal model') # EoS result with prior enforced ax.errorbar(z,eos_SP[:,0],yerr=[eos_SP[:,0]-eos_SP[:,2],eos_SP[:,3]-eos_SP[:,0]], marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[1],label=r'Prior enforced') # ax.errorbar(z,eos_SP[:,0],yerr=eos_SP[:,1], # marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[1],label=r'Reconstruction') # EoS result without prior # ax.errorbar(z,eos_no_prior[:,0],yerr=[eos_no_prior[:,0]-eos_no_prior[:,2],eos_no_prior[:,3]-eos_no_prior[:,0]], # marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[2],label=r'Reconstruction without prior') # ax.errorbar(z,eos_no_prior[:,0],yerr=eos_no_prior[:,1], # marker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[2],label=r'Reconstruction without prior') ax.plot(z,eos_no_prior[:,0],'--',lw=2.5,color=colors[2]) # ax.fill_between(z,y1=eos_no_prior[:,0]-eos_no_prior[:,1],y2=eos_no_prior[:,0]+eos_no_prior[:,1], # color=colors[2],alpha=0.5,label=r'Without prior') ax.fill_between(z,y1=eos_no_prior[:,2],y2=eos_no_prior[:,3], color=colors[2],label=r'Reconstruction without prior') # ax.fill_between(z,y1=eos_no_prior2[:,0]-eos_no_prior2[:,1],y2=eos_no_prior2[:,0]+eos_no_prior2[:,1], # color='g',alpha=0.5,label=r'Reconstruction without prior') # ax.fill_between(z,y1=eos_no_prior2[:,2],y2=eos_no_prior2[:,3], # color='g',alpha=0.5,label=r'Reconstruction without prior') ax.set_xlim(-0.025,1.525) ax.set_xticks([0,0.25,0.5,0.75,1.0,1.25,1.5]) ax.set_xticklabels([0,0.25,0.5,0.75,1.0,1.25,1.5],fontsize=14) ax.set_xlabel(r'$z$',fontsize=14) yticks=[-3,-2,-1,-0] ax.set_yticks(yticks) ax.set_yticklabels(yticks,fontsize=14) ax.set_ylabel(r'$w(z)$',fontsize=14) # lgd=ax.legend(loc='lower left',frameon=False,fontsize=14) ax.tick_params(axis='both',direction='in') # texts = lgd.get_texts() # for i in range(len(texts)): # plt.setp(texts[i],color=colors[i]) handles,labels = ax.get_legend_handles_labels() handles = [handles[0], handles[2], handles[1]] labels = [labels[0], labels[2], labels[1]] lgd=ax.legend(handles,labels,loc='lower left',frameon=False,fontsize=14) # lgd=legend(loc='upper left',frameon=False,fontsize=12) texts = lgd.get_texts() cid = [0,2,1] for i in range(len(texts)): plt.setp(texts[i],fontsize=14,color=colors[i]) # add reduced chisq dof = 719 chisq_red = 876.39/dof ax.text(0.05,-2,r'$\chi^2_{\rm reduced} = '+str(round(chisq_red,2))+'$',fontsize=14,color='r') ########################################################### # final adjustments ... plt.subplots_adjust(wspace=0.15, hspace=0.25, left=0.065, right=0.985, top=0.975, bottom=0.175) plt.savefig('example_eos_result.pdf') plt.show()
flexible
{ "blob_id": "ac35672661e1dd0b97567ae4335f537dc69f98f7", "index": 6240, "step-1": "<mask token>\n\n\ndef read_jla_mock(mock_filename):\n fp = open(mock_filename, 'r')\n lines = fp.readlines()\n fp.close()\n jla = []\n for line in lines:\n sn = line.split()\n temp = []\n temp.append(float(sn[1]))\n temp.append(float(sn[2]))\n temp.append(float(sn[3]))\n temp.append(float(sn[4]))\n jla.append(temp)\n return np.array(jla)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef read_jla_mock(mock_filename):\n fp = open(mock_filename, 'r')\n lines = fp.readlines()\n fp.close()\n jla = []\n for line in lines:\n sn = line.split()\n temp = []\n temp.append(float(sn[1]))\n temp.append(float(sn[2]))\n temp.append(float(sn[3]))\n temp.append(float(sn[4]))\n jla.append(temp)\n return np.array(jla)\n\n\n<mask token>\nplt.subplot(1, 2, 1)\n<mask token>\nax.hist(dmu, bins=nbin_all, label='ALL ' + ' p = ' + str(p), alpha=0.5,\n rwidth=rwidth, color=colors[0])\nax.hist(dmu[ID1], bins=nbin_1, label='$z<' + str(z1) + '$' + ' p = ' + str(\n p1) + '0', alpha=0.7, rwidth=rwidth, color=colors[1])\nax.hist(dmu[ID2], bins=nbin_2, label='$z>' + str(z2) + '$' + ' p = ' + str(\n p2), alpha=0.8, rwidth=rwidth, color=colors[2])\nax.set_xlim(-3.5, 3.5)\nax.set_xticks([-3, -2, -1, 0, 1, 2, 3])\nax.set_xticklabels([-3, -2, -1, 0, 1, 2, 3], fontsize=14)\nax.set_xlabel('$\\\\widetilde{\\\\Delta\\\\mu}$', fontsize=14)\n<mask token>\nax.set_ylim(0, 170)\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('Counts', fontsize=14)\nax.tick_params(axis='both', direction='in')\n<mask token>\nfor i in range(len(texts)):\n plt.setp(texts[i], color=colors[i])\nplt.subplot(1, 2, 2)\n<mask token>\nax.hlines(-1, xmin=0, xmax=1.5, linestyle='dashed', lw=2, alpha=1, color=\n colors[0], label='Fiducal model')\nax.errorbar(z, eos_SP[:, 0], yerr=[eos_SP[:, 0] - eos_SP[:, 2], eos_SP[:, 3\n ] - eos_SP[:, 0]], marker='o', elinewidth=1.5, markersize=4, capsize=3,\n capthick=2, color=colors[1], label='Prior enforced')\nax.plot(z, eos_no_prior[:, 0], '--', lw=2.5, color=colors[2])\nax.fill_between(z, y1=eos_no_prior[:, 2], y2=eos_no_prior[:, 3], color=\n colors[2], label='Reconstruction without prior')\nax.set_xlim(-0.025, 1.525)\nax.set_xticks([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5])\nax.set_xticklabels([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5], fontsize=14)\nax.set_xlabel('$z$', fontsize=14)\n<mask token>\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('$w(z)$', fontsize=14)\nax.tick_params(axis='both', direction='in')\n<mask token>\nfor i in range(len(texts)):\n plt.setp(texts[i], fontsize=14, color=colors[i])\n<mask token>\nax.text(0.05, -2, '$\\\\chi^2_{\\\\rm reduced} = ' + str(round(chisq_red, 2)) +\n '$', fontsize=14, color='r')\nplt.subplots_adjust(wspace=0.15, hspace=0.25, left=0.065, right=0.985, top=\n 0.975, bottom=0.175)\nplt.savefig('example_eos_result.pdf')\nplt.show()\n", "step-3": "<mask token>\ncolors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd',\n u'#8c564b']\nfig = plt.figure(figsize=(11, 4))\n\n\ndef read_jla_mock(mock_filename):\n fp = open(mock_filename, 'r')\n lines = fp.readlines()\n fp.close()\n jla = []\n for line in lines:\n sn = line.split()\n temp = []\n temp.append(float(sn[1]))\n temp.append(float(sn[2]))\n temp.append(float(sn[3]))\n temp.append(float(sn[4]))\n jla.append(temp)\n return np.array(jla)\n\n\njla = read_jla_mock('MOCK_JLA_40.txt')\neos_SP = np.loadtxt('eos_40.txt')\neos_no_prior = np.loadtxt('eos_no_prior.txt')\neos_no_prior2 = np.loadtxt('eos_no_prior2.txt')\nz = jla[:, 0]\ndmu = (jla[:, 1] - jla[:, 3]) / jla[:, 2]\nnbin_all = 15\nnbin_1 = 15\nnbin_2 = 15\nz1 = 0.2\nz2 = 0.6\nID1 = z < z1\nID2 = z >= z2\np = round(kstest(dmu, cdf='norm')[1], 2)\np1 = round(kstest(dmu[ID1], 'norm')[1], 2)\np2 = round(kstest(dmu[ID2], 'norm')[1], 2)\nplt.subplot(1, 2, 1)\nax = plt.gca()\nrwidth = 0.6\nax.hist(dmu, bins=nbin_all, label='ALL ' + ' p = ' + str(p), alpha=0.5,\n rwidth=rwidth, color=colors[0])\nax.hist(dmu[ID1], bins=nbin_1, label='$z<' + str(z1) + '$' + ' p = ' + str(\n p1) + '0', alpha=0.7, rwidth=rwidth, color=colors[1])\nax.hist(dmu[ID2], bins=nbin_2, label='$z>' + str(z2) + '$' + ' p = ' + str(\n p2), alpha=0.8, rwidth=rwidth, color=colors[2])\nax.set_xlim(-3.5, 3.5)\nax.set_xticks([-3, -2, -1, 0, 1, 2, 3])\nax.set_xticklabels([-3, -2, -1, 0, 1, 2, 3], fontsize=14)\nax.set_xlabel('$\\\\widetilde{\\\\Delta\\\\mu}$', fontsize=14)\nyticks = [0, 50, 100, 150]\nax.set_ylim(0, 170)\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('Counts', fontsize=14)\nax.tick_params(axis='both', direction='in')\nlgd = ax.legend(loc='upper left', fontsize=13, frameon=False)\ntexts = lgd.get_texts()\nfor i in range(len(texts)):\n plt.setp(texts[i], color=colors[i])\nplt.subplot(1, 2, 2)\nax = plt.gca()\na = np.linspace(1, 0.4, 20)\nz = 1 / a - 1\ncolors = ['blue', 'red', 'gray']\nax.hlines(-1, xmin=0, xmax=1.5, linestyle='dashed', lw=2, alpha=1, color=\n colors[0], label='Fiducal model')\nax.errorbar(z, eos_SP[:, 0], yerr=[eos_SP[:, 0] - eos_SP[:, 2], eos_SP[:, 3\n ] - eos_SP[:, 0]], marker='o', elinewidth=1.5, markersize=4, capsize=3,\n capthick=2, color=colors[1], label='Prior enforced')\nax.plot(z, eos_no_prior[:, 0], '--', lw=2.5, color=colors[2])\nax.fill_between(z, y1=eos_no_prior[:, 2], y2=eos_no_prior[:, 3], color=\n colors[2], label='Reconstruction without prior')\nax.set_xlim(-0.025, 1.525)\nax.set_xticks([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5])\nax.set_xticklabels([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5], fontsize=14)\nax.set_xlabel('$z$', fontsize=14)\nyticks = [-3, -2, -1, -0]\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('$w(z)$', fontsize=14)\nax.tick_params(axis='both', direction='in')\nhandles, labels = ax.get_legend_handles_labels()\nhandles = [handles[0], handles[2], handles[1]]\nlabels = [labels[0], labels[2], labels[1]]\nlgd = ax.legend(handles, labels, loc='lower left', frameon=False, fontsize=14)\ntexts = lgd.get_texts()\ncid = [0, 2, 1]\nfor i in range(len(texts)):\n plt.setp(texts[i], fontsize=14, color=colors[i])\ndof = 719\nchisq_red = 876.39 / dof\nax.text(0.05, -2, '$\\\\chi^2_{\\\\rm reduced} = ' + str(round(chisq_red, 2)) +\n '$', fontsize=14, color='r')\nplt.subplots_adjust(wspace=0.15, hspace=0.25, left=0.065, right=0.985, top=\n 0.975, bottom=0.175)\nplt.savefig('example_eos_result.pdf')\nplt.show()\n", "step-4": "import sys, os\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom scipy.linalg import eig\nfrom scipy.stats import norm, kstest, normaltest\ncolors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd',\n u'#8c564b']\nfig = plt.figure(figsize=(11, 4))\n\n\ndef read_jla_mock(mock_filename):\n fp = open(mock_filename, 'r')\n lines = fp.readlines()\n fp.close()\n jla = []\n for line in lines:\n sn = line.split()\n temp = []\n temp.append(float(sn[1]))\n temp.append(float(sn[2]))\n temp.append(float(sn[3]))\n temp.append(float(sn[4]))\n jla.append(temp)\n return np.array(jla)\n\n\njla = read_jla_mock('MOCK_JLA_40.txt')\neos_SP = np.loadtxt('eos_40.txt')\neos_no_prior = np.loadtxt('eos_no_prior.txt')\neos_no_prior2 = np.loadtxt('eos_no_prior2.txt')\nz = jla[:, 0]\ndmu = (jla[:, 1] - jla[:, 3]) / jla[:, 2]\nnbin_all = 15\nnbin_1 = 15\nnbin_2 = 15\nz1 = 0.2\nz2 = 0.6\nID1 = z < z1\nID2 = z >= z2\np = round(kstest(dmu, cdf='norm')[1], 2)\np1 = round(kstest(dmu[ID1], 'norm')[1], 2)\np2 = round(kstest(dmu[ID2], 'norm')[1], 2)\nplt.subplot(1, 2, 1)\nax = plt.gca()\nrwidth = 0.6\nax.hist(dmu, bins=nbin_all, label='ALL ' + ' p = ' + str(p), alpha=0.5,\n rwidth=rwidth, color=colors[0])\nax.hist(dmu[ID1], bins=nbin_1, label='$z<' + str(z1) + '$' + ' p = ' + str(\n p1) + '0', alpha=0.7, rwidth=rwidth, color=colors[1])\nax.hist(dmu[ID2], bins=nbin_2, label='$z>' + str(z2) + '$' + ' p = ' + str(\n p2), alpha=0.8, rwidth=rwidth, color=colors[2])\nax.set_xlim(-3.5, 3.5)\nax.set_xticks([-3, -2, -1, 0, 1, 2, 3])\nax.set_xticklabels([-3, -2, -1, 0, 1, 2, 3], fontsize=14)\nax.set_xlabel('$\\\\widetilde{\\\\Delta\\\\mu}$', fontsize=14)\nyticks = [0, 50, 100, 150]\nax.set_ylim(0, 170)\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('Counts', fontsize=14)\nax.tick_params(axis='both', direction='in')\nlgd = ax.legend(loc='upper left', fontsize=13, frameon=False)\ntexts = lgd.get_texts()\nfor i in range(len(texts)):\n plt.setp(texts[i], color=colors[i])\nplt.subplot(1, 2, 2)\nax = plt.gca()\na = np.linspace(1, 0.4, 20)\nz = 1 / a - 1\ncolors = ['blue', 'red', 'gray']\nax.hlines(-1, xmin=0, xmax=1.5, linestyle='dashed', lw=2, alpha=1, color=\n colors[0], label='Fiducal model')\nax.errorbar(z, eos_SP[:, 0], yerr=[eos_SP[:, 0] - eos_SP[:, 2], eos_SP[:, 3\n ] - eos_SP[:, 0]], marker='o', elinewidth=1.5, markersize=4, capsize=3,\n capthick=2, color=colors[1], label='Prior enforced')\nax.plot(z, eos_no_prior[:, 0], '--', lw=2.5, color=colors[2])\nax.fill_between(z, y1=eos_no_prior[:, 2], y2=eos_no_prior[:, 3], color=\n colors[2], label='Reconstruction without prior')\nax.set_xlim(-0.025, 1.525)\nax.set_xticks([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5])\nax.set_xticklabels([0, 0.25, 0.5, 0.75, 1.0, 1.25, 1.5], fontsize=14)\nax.set_xlabel('$z$', fontsize=14)\nyticks = [-3, -2, -1, -0]\nax.set_yticks(yticks)\nax.set_yticklabels(yticks, fontsize=14)\nax.set_ylabel('$w(z)$', fontsize=14)\nax.tick_params(axis='both', direction='in')\nhandles, labels = ax.get_legend_handles_labels()\nhandles = [handles[0], handles[2], handles[1]]\nlabels = [labels[0], labels[2], labels[1]]\nlgd = ax.legend(handles, labels, loc='lower left', frameon=False, fontsize=14)\ntexts = lgd.get_texts()\ncid = [0, 2, 1]\nfor i in range(len(texts)):\n plt.setp(texts[i], fontsize=14, color=colors[i])\ndof = 719\nchisq_red = 876.39 / dof\nax.text(0.05, -2, '$\\\\chi^2_{\\\\rm reduced} = ' + str(round(chisq_red, 2)) +\n '$', fontsize=14, color='r')\nplt.subplots_adjust(wspace=0.15, hspace=0.25, left=0.065, right=0.985, top=\n 0.975, bottom=0.175)\nplt.savefig('example_eos_result.pdf')\nplt.show()\n", "step-5": "\n###########################################################\n# 2019-02-07: 删除了marginalized prior\n#\n###########################################################\n\nimport sys,os\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom scipy.linalg import eig\nfrom scipy.stats import norm, kstest, normaltest\n\n# use default colors defined by MatPlotlib\ncolors = [u'#1f77b4', u'#ff7f0e', u'#2ca02c', u'#d62728', u'#9467bd', u'#8c564b']\n\n###########################################################\n\nfig = plt.figure(figsize=(11,4))\n\n###########################################################\n# 1) histograms of the normalized dmu(zi). The purpose is\n# to show that the mock sample is not too peculiar\n###########################################################\n\ndef read_jla_mock( mock_filename ):\n\tfp = open(mock_filename,'r')\n\tlines = fp.readlines()\n\tfp.close()\n\n\tjla = []\n\tfor line in lines:\n\t\tsn = line.split()\n\t\ttemp = []\n\t\ttemp.append(float(sn[1]))\n\t\ttemp.append(float(sn[2]))\n\t\ttemp.append(float(sn[3]))\n\t\ttemp.append(float(sn[4]))\n\t\tjla.append(temp)\n\n\treturn np.array(jla)\n\n# jla = read_jla_mock('MOCK_JLA_51.txt')\n# eos_SP = np.loadtxt('eos_51.txt')\n\n# jla = read_jla_mock('MOCK_JLA_16.txt')\n# eos_SP = np.loadtxt('eos_16.txt')\n\n# jla = read_jla_mock('MOCK_JLA_10.txt')\n# eos_SP = np.loadtxt('eos_10.txt')\n\n# jla = read_jla_mock('MOCK_JLA_9.txt')\n# eos_SP = np.loadtxt('eos_9.txt')\n\n# jla = read_jla_mock('MOCK_JLA_30.txt')\n# eos_SP = np.loadtxt('eos_30.txt')\n\n# jla = read_jla_mock('MOCK_JLA_3.txt')\n# eos_SP = np.loadtxt('eos_3.txt')\n\njla = read_jla_mock('MOCK_JLA_40.txt')\neos_SP = np.loadtxt('eos_40.txt')\neos_no_prior = np.loadtxt('eos_no_prior.txt')\neos_no_prior2 = np.loadtxt('eos_no_prior2.txt')\n\n\nz = jla[:,0]\ndmu = (jla[:,1]-jla[:,3])/jla[:,2] # normalize the errors\n\nnbin_all = 15\nnbin_1 = 15\nnbin_2 = 15\nz1 = 0.2\nz2 = 0.6\nID1 = (z < z1 )\nID2 = (z >= z2 )\n\np = round(kstest(dmu,cdf='norm')[1],2)\np1 = round(kstest(dmu[ID1],'norm')[1],2)\np2 = round(kstest(dmu[ID2],'norm')[1],2)\n\nplt.subplot(1,2,1)\nax = plt.gca()\n\nrwidth=0.6\nax.hist(dmu, bins=nbin_all, label=r'ALL ' + r' p = '+str(p), alpha=0.5, rwidth=rwidth, color=colors[0])\nax.hist(dmu[ID1], bins=nbin_1, label=r'$z<' + str(z1) + '$' + r' p = '+str(p1)+'0', alpha=0.7, rwidth=rwidth, color=colors[1])\nax.hist(dmu[ID2], bins=nbin_2, label=r'$z>' + str(z2) + '$' + r' p = '+str(p2), alpha=0.8, rwidth=rwidth, color=colors[2])\n\n\nax.set_xlim(-3.5,3.5)\nax.set_xticks([-3,-2,-1,0,1,2,3])\nax.set_xticklabels([-3,-2,-1,0,1,2,3],fontsize=14)\nax.set_xlabel(r'$\\widetilde{\\Delta\\mu}$',fontsize=14)\n\nyticks = [0,50,100,150]\nax.set_ylim(0,170)\nax.set_yticks(yticks)\nax.set_yticklabels(yticks,fontsize=14)\nax.set_ylabel(r'Counts',fontsize=14)\n\nax.tick_params(axis='both',direction='in')\n\nlgd=ax.legend(loc='upper left',fontsize=13,frameon=False)\ntexts = lgd.get_texts()\nfor i in range(len(texts)):\n\tplt.setp(texts[i],color=colors[i])\n\n###########################################################\n# 3) reconstructed EoS\n###########################################################\nplt.subplot(1,2,2)\nax = plt.gca()\n\na = np.linspace(1,.4,20)\nz = 1/a-1\n\ncolors=['blue','red','gray']\nax.hlines(-1,xmin=0,xmax=1.5,linestyle='dashed',lw=2,alpha=1,color=colors[0],label=r'Fiducal model')\n\n# EoS result with prior enforced\nax.errorbar(z,eos_SP[:,0],yerr=[eos_SP[:,0]-eos_SP[:,2],eos_SP[:,3]-eos_SP[:,0]],\n\t\t\tmarker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[1],label=r'Prior enforced')\n\n# ax.errorbar(z,eos_SP[:,0],yerr=eos_SP[:,1],\n# \t\t\tmarker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[1],label=r'Reconstruction')\n\n# EoS result without prior\n# ax.errorbar(z,eos_no_prior[:,0],yerr=[eos_no_prior[:,0]-eos_no_prior[:,2],eos_no_prior[:,3]-eos_no_prior[:,0]],\n# \t\t\tmarker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[2],label=r'Reconstruction without prior')\n\n# ax.errorbar(z,eos_no_prior[:,0],yerr=eos_no_prior[:,1],\n# \t\t\tmarker='o',elinewidth=1.5,markersize=4,capsize=3,capthick=2,color=colors[2],label=r'Reconstruction without prior')\n\nax.plot(z,eos_no_prior[:,0],'--',lw=2.5,color=colors[2])\n# ax.fill_between(z,y1=eos_no_prior[:,0]-eos_no_prior[:,1],y2=eos_no_prior[:,0]+eos_no_prior[:,1],\n# \t\t\tcolor=colors[2],alpha=0.5,label=r'Without prior')\n\nax.fill_between(z,y1=eos_no_prior[:,2],y2=eos_no_prior[:,3],\n\t\t\tcolor=colors[2],label=r'Reconstruction without prior')\n\n# ax.fill_between(z,y1=eos_no_prior2[:,0]-eos_no_prior2[:,1],y2=eos_no_prior2[:,0]+eos_no_prior2[:,1],\n# \t\t\tcolor='g',alpha=0.5,label=r'Reconstruction without prior')\n\n# ax.fill_between(z,y1=eos_no_prior2[:,2],y2=eos_no_prior2[:,3],\n# \t\t\tcolor='g',alpha=0.5,label=r'Reconstruction without prior')\n\n\nax.set_xlim(-0.025,1.525)\nax.set_xticks([0,0.25,0.5,0.75,1.0,1.25,1.5])\nax.set_xticklabels([0,0.25,0.5,0.75,1.0,1.25,1.5],fontsize=14)\nax.set_xlabel(r'$z$',fontsize=14)\n\nyticks=[-3,-2,-1,-0]\nax.set_yticks(yticks)\nax.set_yticklabels(yticks,fontsize=14)\nax.set_ylabel(r'$w(z)$',fontsize=14)\n\n# lgd=ax.legend(loc='lower left',frameon=False,fontsize=14)\nax.tick_params(axis='both',direction='in')\n\n\n# texts = lgd.get_texts()\n# for i in range(len(texts)):\n# \tplt.setp(texts[i],color=colors[i])\n\nhandles,labels = ax.get_legend_handles_labels()\nhandles = [handles[0], handles[2], handles[1]]\nlabels = [labels[0], labels[2], labels[1]]\n\nlgd=ax.legend(handles,labels,loc='lower left',frameon=False,fontsize=14)\n# lgd=legend(loc='upper left',frameon=False,fontsize=12)\ntexts = lgd.get_texts()\ncid = [0,2,1]\nfor i in range(len(texts)):\n\tplt.setp(texts[i],fontsize=14,color=colors[i])\n\n\n# add reduced chisq\ndof = 719\nchisq_red = 876.39/dof\nax.text(0.05,-2,r'$\\chi^2_{\\rm reduced} = '+str(round(chisq_red,2))+'$',fontsize=14,color='r')\n\n###########################################################\n# final adjustments ...\nplt.subplots_adjust(wspace=0.15,\n hspace=0.25,\n left=0.065,\n right=0.985,\n top=0.975,\n bottom=0.175)\n\nplt.savefig('example_eos_result.pdf')\nplt.show()\n", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
from p5 import * capture = None def setup(): global capture createCanvas(390, 240) capture = createCapture(VIDEO) capture.size(320, 240) def draw(): background(255) image(capture, 0, 0, 320, 240) run()
normal
{ "blob_id": "93bfca1e756951faacd29871ad19afad374e25d6", "index": 9647, "step-1": "<mask token>\n\n\ndef setup():\n global capture\n createCanvas(390, 240)\n capture = createCapture(VIDEO)\n capture.size(320, 240)\n\n\ndef draw():\n background(255)\n image(capture, 0, 0, 320, 240)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef setup():\n global capture\n createCanvas(390, 240)\n capture = createCapture(VIDEO)\n capture.size(320, 240)\n\n\ndef draw():\n background(255)\n image(capture, 0, 0, 320, 240)\n\n\nrun()\n", "step-3": "<mask token>\ncapture = None\n\n\ndef setup():\n global capture\n createCanvas(390, 240)\n capture = createCapture(VIDEO)\n capture.size(320, 240)\n\n\ndef draw():\n background(255)\n image(capture, 0, 0, 320, 240)\n\n\nrun()\n", "step-4": "from p5 import *\ncapture = None\n\n\ndef setup():\n global capture\n createCanvas(390, 240)\n capture = createCapture(VIDEO)\n capture.size(320, 240)\n\n\ndef draw():\n background(255)\n image(capture, 0, 0, 320, 240)\n\n\nrun()\n", "step-5": null, "step-ids": [ 2, 3, 4, 5 ] }
[ 2, 3, 4, 5 ]
from django.contrib.auth import authenticate from django.http import JsonResponse, HttpResponse from django.shortcuts import render import json from userprofile.models import Profile from .models import * #发送私信 def sendmessage(request): if request.method == "POST": data = json.loads(request.body) uid = data.get("userid") message = data.get("message") tuid = data.get("touserid") Message.objects.create(uid_id=uid, message=message, tuid_id=tuid) return JsonResponse({ "message": "send message success" }) else: return JsonResponse({ "status": 0, "message": "error method" }) #接收私信 def getmessage(request): if request.method == "POST": data = json.loads(request.body) uid = data.get("userid") msglist= [] mres = Message.objects.filter(tuid_id=uid).all() for res in mres: record = { "messageid":res.id, "userid":res.uid.id, "username":res.uid.username, "message":res.message, "time":res.time } msglist.append(record) return JsonResponse(msglist, safe=False) else: return JsonResponse({ "status": 0, "message": "error method" }) #删除私信 def deletemessage(request): if request.method == "POST": data = json.loads(request.body) mid = data.get("messageid") Message.objects.filter(id=mid).delete() return JsonResponse({ "message": "delete message success" }) else: return JsonResponse({ "status": 0, "message": "error method" })
normal
{ "blob_id": "f25db7d797f1f88bd0374d540adcb396e16740a0", "index": 8953, "step-1": "<mask token>\n\n\ndef getmessage(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n uid = data.get('userid')\n msglist = []\n mres = Message.objects.filter(tuid_id=uid).all()\n for res in mres:\n record = {'messageid': res.id, 'userid': res.uid.id, 'username':\n res.uid.username, 'message': res.message, 'time': res.time}\n msglist.append(record)\n return JsonResponse(msglist, safe=False)\n else:\n return JsonResponse({'status': 0, 'message': 'error method'})\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef sendmessage(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n uid = data.get('userid')\n message = data.get('message')\n tuid = data.get('touserid')\n Message.objects.create(uid_id=uid, message=message, tuid_id=tuid)\n return JsonResponse({'message': 'send message success'})\n else:\n return JsonResponse({'status': 0, 'message': 'error method'})\n\n\ndef getmessage(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n uid = data.get('userid')\n msglist = []\n mres = Message.objects.filter(tuid_id=uid).all()\n for res in mres:\n record = {'messageid': res.id, 'userid': res.uid.id, 'username':\n res.uid.username, 'message': res.message, 'time': res.time}\n msglist.append(record)\n return JsonResponse(msglist, safe=False)\n else:\n return JsonResponse({'status': 0, 'message': 'error method'})\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef sendmessage(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n uid = data.get('userid')\n message = data.get('message')\n tuid = data.get('touserid')\n Message.objects.create(uid_id=uid, message=message, tuid_id=tuid)\n return JsonResponse({'message': 'send message success'})\n else:\n return JsonResponse({'status': 0, 'message': 'error method'})\n\n\ndef getmessage(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n uid = data.get('userid')\n msglist = []\n mres = Message.objects.filter(tuid_id=uid).all()\n for res in mres:\n record = {'messageid': res.id, 'userid': res.uid.id, 'username':\n res.uid.username, 'message': res.message, 'time': res.time}\n msglist.append(record)\n return JsonResponse(msglist, safe=False)\n else:\n return JsonResponse({'status': 0, 'message': 'error method'})\n\n\ndef deletemessage(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n mid = data.get('messageid')\n Message.objects.filter(id=mid).delete()\n return JsonResponse({'message': 'delete message success'})\n else:\n return JsonResponse({'status': 0, 'message': 'error method'})\n", "step-4": "from django.contrib.auth import authenticate\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render\nimport json\nfrom userprofile.models import Profile\nfrom .models import *\n\n\ndef sendmessage(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n uid = data.get('userid')\n message = data.get('message')\n tuid = data.get('touserid')\n Message.objects.create(uid_id=uid, message=message, tuid_id=tuid)\n return JsonResponse({'message': 'send message success'})\n else:\n return JsonResponse({'status': 0, 'message': 'error method'})\n\n\ndef getmessage(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n uid = data.get('userid')\n msglist = []\n mres = Message.objects.filter(tuid_id=uid).all()\n for res in mres:\n record = {'messageid': res.id, 'userid': res.uid.id, 'username':\n res.uid.username, 'message': res.message, 'time': res.time}\n msglist.append(record)\n return JsonResponse(msglist, safe=False)\n else:\n return JsonResponse({'status': 0, 'message': 'error method'})\n\n\ndef deletemessage(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n mid = data.get('messageid')\n Message.objects.filter(id=mid).delete()\n return JsonResponse({'message': 'delete message success'})\n else:\n return JsonResponse({'status': 0, 'message': 'error method'})\n", "step-5": "from django.contrib.auth import authenticate\nfrom django.http import JsonResponse, HttpResponse\nfrom django.shortcuts import render\nimport json\n\nfrom userprofile.models import Profile\nfrom .models import *\n\n#发送私信\ndef sendmessage(request):\n if request.method == \"POST\":\n data = json.loads(request.body)\n uid = data.get(\"userid\")\n message = data.get(\"message\")\n tuid = data.get(\"touserid\")\n Message.objects.create(uid_id=uid, message=message, tuid_id=tuid)\n return JsonResponse({\n \"message\": \"send message success\"\n })\n else:\n return JsonResponse({\n \"status\": 0,\n \"message\": \"error method\"\n })\n\n#接收私信\ndef getmessage(request):\n if request.method == \"POST\":\n data = json.loads(request.body)\n uid = data.get(\"userid\")\n msglist= []\n mres = Message.objects.filter(tuid_id=uid).all()\n for res in mres:\n record = {\n \"messageid\":res.id,\n \"userid\":res.uid.id,\n \"username\":res.uid.username,\n \"message\":res.message,\n \"time\":res.time\n }\n msglist.append(record)\n return JsonResponse(msglist, safe=False)\n\n else:\n return JsonResponse({\n \"status\": 0,\n \"message\": \"error method\"\n })\n\n#删除私信\ndef deletemessage(request):\n if request.method == \"POST\":\n data = json.loads(request.body)\n mid = data.get(\"messageid\")\n Message.objects.filter(id=mid).delete()\n return JsonResponse({\n \"message\": \"delete message success\"\n })\n\n else:\n return JsonResponse({\n \"status\": 0,\n \"message\": \"error method\"\n })", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
<|reserved_special_token_0|> def run(name, dim_k, dump='dump', add_cmd=''): res = all_res[name] model = 'ATT_ts' if res.split('_')[1] == 'att' else 'LastItem' cmd = ( f'python main.py -model={model} -ds=v3 -restore_model={res} -k={dim_k} -show_detail -{dump} -nb_topk=2000 -nb_rare_k=1000 -msg={name} {add_cmd}' ) print(cmd) ret = os.system(cmd) if ret != 0: input('Error!!!!!!') <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def run(name, dim_k, dump='dump', add_cmd=''): res = all_res[name] model = 'ATT_ts' if res.split('_')[1] == 'att' else 'LastItem' cmd = ( f'python main.py -model={model} -ds=v3 -restore_model={res} -k={dim_k} -show_detail -{dump} -nb_topk=2000 -nb_rare_k=1000 -msg={name} {add_cmd}' ) print(cmd) ret = os.system(cmd) if ret != 0: input('Error!!!!!!') <|reserved_special_token_0|> def main(): run('id_att_3', 1024, dump='dump') run('id_last', 1024, dump='dump') run('c_att_5', 256, dump='dump', add_cmd='-seq_length=5') run('c_last', 256, dump='dump') run('id_att_3', 1024, dump='dump_all', add_cmd='-skip_vali') run('id_last', 1024, dump='dump_all', add_cmd='-skip_vali') run('c_att_5', 256, dump='dump_all', add_cmd='-skip_vali -seq_length=5') run('c_last', 256, dump='dump_all', add_cmd='-skip_vali') if __name__ == '__main__': main() <|reserved_special_token_1|> <|reserved_special_token_0|> def run(name, dim_k, dump='dump', add_cmd=''): res = all_res[name] model = 'ATT_ts' if res.split('_')[1] == 'att' else 'LastItem' cmd = ( f'python main.py -model={model} -ds=v3 -restore_model={res} -k={dim_k} -show_detail -{dump} -nb_topk=2000 -nb_rare_k=1000 -msg={name} {add_cmd}' ) print(cmd) ret = os.system(cmd) if ret != 0: input('Error!!!!!!') all_res = dict(id_att_3='id_att_3', id_last='id_last', c_att_5='c_att_5', c_last='c_last') def main(): run('id_att_3', 1024, dump='dump') run('id_last', 1024, dump='dump') run('c_att_5', 256, dump='dump', add_cmd='-seq_length=5') run('c_last', 256, dump='dump') run('id_att_3', 1024, dump='dump_all', add_cmd='-skip_vali') run('id_last', 1024, dump='dump_all', add_cmd='-skip_vali') run('c_att_5', 256, dump='dump_all', add_cmd='-skip_vali -seq_length=5') run('c_last', 256, dump='dump_all', add_cmd='-skip_vali') if __name__ == '__main__': main() <|reserved_special_token_1|> import sys import os import utils def run(name, dim_k, dump='dump', add_cmd=''): res = all_res[name] model = 'ATT_ts' if res.split('_')[1] == 'att' else 'LastItem' cmd = ( f'python main.py -model={model} -ds=v3 -restore_model={res} -k={dim_k} -show_detail -{dump} -nb_topk=2000 -nb_rare_k=1000 -msg={name} {add_cmd}' ) print(cmd) ret = os.system(cmd) if ret != 0: input('Error!!!!!!') all_res = dict(id_att_3='id_att_3', id_last='id_last', c_att_5='c_att_5', c_last='c_last') def main(): run('id_att_3', 1024, dump='dump') run('id_last', 1024, dump='dump') run('c_att_5', 256, dump='dump', add_cmd='-seq_length=5') run('c_last', 256, dump='dump') run('id_att_3', 1024, dump='dump_all', add_cmd='-skip_vali') run('id_last', 1024, dump='dump_all', add_cmd='-skip_vali') run('c_att_5', 256, dump='dump_all', add_cmd='-skip_vali -seq_length=5') run('c_last', 256, dump='dump_all', add_cmd='-skip_vali') if __name__ == '__main__': main() <|reserved_special_token_1|> import sys import os import utils def run(name, dim_k, dump='dump', add_cmd=''): res = all_res[name] model = 'ATT_ts' if res.split('_')[1] == 'att' else 'LastItem' cmd = f'python main.py -model={model} -ds=v3 -restore_model={res} -k={dim_k} -show_detail -{dump} -nb_topk=2000 -nb_rare_k=1000 -msg={name} {add_cmd}' print(cmd) ret = os.system(cmd) if ret != 0: input('Error!!!!!!') all_res = dict( id_att_3='id_att_3', id_last='id_last', c_att_5='c_att_5', c_last='c_last', ) def main(): run('id_att_3', 1024, dump='dump') run('id_last', 1024, dump='dump') run('c_att_5', 256, dump='dump', add_cmd='-seq_length=5') run('c_last', 256, dump='dump') run('id_att_3', 1024, dump='dump_all', add_cmd='-skip_vali') run('id_last', 1024, dump='dump_all', add_cmd='-skip_vali') run('c_att_5', 256, dump='dump_all', add_cmd='-skip_vali -seq_length=5') run('c_last', 256, dump='dump_all', add_cmd='-skip_vali') if __name__ == '__main__': main()
flexible
{ "blob_id": "548a236c4c485091d312593dcb0fa331ff98f1a8", "index": 6359, "step-1": "<mask token>\n\n\ndef run(name, dim_k, dump='dump', add_cmd=''):\n res = all_res[name]\n model = 'ATT_ts' if res.split('_')[1] == 'att' else 'LastItem'\n cmd = (\n f'python main.py -model={model} -ds=v3 -restore_model={res} -k={dim_k} -show_detail -{dump} -nb_topk=2000 -nb_rare_k=1000 -msg={name} {add_cmd}'\n )\n print(cmd)\n ret = os.system(cmd)\n if ret != 0:\n input('Error!!!!!!')\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef run(name, dim_k, dump='dump', add_cmd=''):\n res = all_res[name]\n model = 'ATT_ts' if res.split('_')[1] == 'att' else 'LastItem'\n cmd = (\n f'python main.py -model={model} -ds=v3 -restore_model={res} -k={dim_k} -show_detail -{dump} -nb_topk=2000 -nb_rare_k=1000 -msg={name} {add_cmd}'\n )\n print(cmd)\n ret = os.system(cmd)\n if ret != 0:\n input('Error!!!!!!')\n\n\n<mask token>\n\n\ndef main():\n run('id_att_3', 1024, dump='dump')\n run('id_last', 1024, dump='dump')\n run('c_att_5', 256, dump='dump', add_cmd='-seq_length=5')\n run('c_last', 256, dump='dump')\n run('id_att_3', 1024, dump='dump_all', add_cmd='-skip_vali')\n run('id_last', 1024, dump='dump_all', add_cmd='-skip_vali')\n run('c_att_5', 256, dump='dump_all', add_cmd='-skip_vali -seq_length=5')\n run('c_last', 256, dump='dump_all', add_cmd='-skip_vali')\n\n\nif __name__ == '__main__':\n main()\n", "step-3": "<mask token>\n\n\ndef run(name, dim_k, dump='dump', add_cmd=''):\n res = all_res[name]\n model = 'ATT_ts' if res.split('_')[1] == 'att' else 'LastItem'\n cmd = (\n f'python main.py -model={model} -ds=v3 -restore_model={res} -k={dim_k} -show_detail -{dump} -nb_topk=2000 -nb_rare_k=1000 -msg={name} {add_cmd}'\n )\n print(cmd)\n ret = os.system(cmd)\n if ret != 0:\n input('Error!!!!!!')\n\n\nall_res = dict(id_att_3='id_att_3', id_last='id_last', c_att_5='c_att_5',\n c_last='c_last')\n\n\ndef main():\n run('id_att_3', 1024, dump='dump')\n run('id_last', 1024, dump='dump')\n run('c_att_5', 256, dump='dump', add_cmd='-seq_length=5')\n run('c_last', 256, dump='dump')\n run('id_att_3', 1024, dump='dump_all', add_cmd='-skip_vali')\n run('id_last', 1024, dump='dump_all', add_cmd='-skip_vali')\n run('c_att_5', 256, dump='dump_all', add_cmd='-skip_vali -seq_length=5')\n run('c_last', 256, dump='dump_all', add_cmd='-skip_vali')\n\n\nif __name__ == '__main__':\n main()\n", "step-4": "import sys\nimport os\nimport utils\n\n\ndef run(name, dim_k, dump='dump', add_cmd=''):\n res = all_res[name]\n model = 'ATT_ts' if res.split('_')[1] == 'att' else 'LastItem'\n cmd = (\n f'python main.py -model={model} -ds=v3 -restore_model={res} -k={dim_k} -show_detail -{dump} -nb_topk=2000 -nb_rare_k=1000 -msg={name} {add_cmd}'\n )\n print(cmd)\n ret = os.system(cmd)\n if ret != 0:\n input('Error!!!!!!')\n\n\nall_res = dict(id_att_3='id_att_3', id_last='id_last', c_att_5='c_att_5',\n c_last='c_last')\n\n\ndef main():\n run('id_att_3', 1024, dump='dump')\n run('id_last', 1024, dump='dump')\n run('c_att_5', 256, dump='dump', add_cmd='-seq_length=5')\n run('c_last', 256, dump='dump')\n run('id_att_3', 1024, dump='dump_all', add_cmd='-skip_vali')\n run('id_last', 1024, dump='dump_all', add_cmd='-skip_vali')\n run('c_att_5', 256, dump='dump_all', add_cmd='-skip_vali -seq_length=5')\n run('c_last', 256, dump='dump_all', add_cmd='-skip_vali')\n\n\nif __name__ == '__main__':\n main()\n", "step-5": "import sys\nimport os\nimport utils\n\ndef run(name, dim_k, dump='dump', add_cmd=''):\n res = all_res[name]\n model = 'ATT_ts' if res.split('_')[1] == 'att' else 'LastItem'\n\n cmd = f'python main.py -model={model} -ds=v3 -restore_model={res} -k={dim_k} -show_detail -{dump} -nb_topk=2000 -nb_rare_k=1000 -msg={name} {add_cmd}'\n print(cmd)\n\n ret = os.system(cmd)\n if ret != 0:\n input('Error!!!!!!')\n\nall_res = dict(\n id_att_3='id_att_3',\n id_last='id_last',\n\n c_att_5='c_att_5',\n c_last='c_last',\n)\n\n\ndef main():\n run('id_att_3', 1024, dump='dump')\n run('id_last', 1024, dump='dump')\n run('c_att_5', 256, dump='dump', add_cmd='-seq_length=5')\n run('c_last', 256, dump='dump')\n\n run('id_att_3', 1024, dump='dump_all', add_cmd='-skip_vali')\n run('id_last', 1024, dump='dump_all', add_cmd='-skip_vali')\n run('c_att_5', 256, dump='dump_all', add_cmd='-skip_vali -seq_length=5')\n run('c_last', 256, dump='dump_all', add_cmd='-skip_vali')\n\n\n\nif __name__ == '__main__':\n main()", "step-ids": [ 1, 3, 4, 5, 6 ] }
[ 1, 3, 4, 5, 6 ]
import pytest from chess.board import Board, ImpossibleMove from chess.pieces import King, Rook, Pawn, Knight def test_board_has_32_pieces(): board = Board() assert board.pieces_quantity() == 32 def test_board_can_be_instatiated_with_any_set_of_pieces(): board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')}) assert board.pieces_quantity() == 2 def test_piece_cant_capture_an_ally(): board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')}) with pytest.raises(ImpossibleMove): board.move('f3', 'e5') def test_alternating_between_players(): board = Board() assert board.turn == 'white' board.move('g2', 'g3') # white pawn moves assert board.turn == 'black' board.move('b7', 'b6') # black pawn moves assert board.turn == 'white' board.move('f1', 'g2') # white bishop moves assert board.turn == 'black' def test_only_white_pieces_can_start(): board = Board() assert board.turn == 'white' with pytest.raises(ImpossibleMove): board.move('b7', 'b6') def test_players_can_put_opponent_in_check(): board = Board({'e1': King('black'), 'f8': Rook('white')}) assert board.check is None board.move('f8', 'e8') assert board.check == 'black' def test_players_can_get_out_of_check(): board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King('white')}) assert board.check is None board.move('f8', 'e8') assert board.check == 'black' board.move('e1', 'f1') assert board.check is None def test_player_should_to_get_out_of_check(): board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King('white')}) assert board.check is None board.move('f8', 'e8') assert board.check == 'black' with pytest.raises(ImpossibleMove): board.move('e1', 'e2') def test_pieces_can_capture_opponent_pieces(): board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'), 'f3': Knight('white')}) assert board.pieces_quantity() == 3 knight = board.get_piece('f3') board.move('f3', 'e5') assert board.get_piece('e5') is knight assert board.pieces_quantity() == 2
normal
{ "blob_id": "5f471fb75b1c4f6fc7aa4cb4f99f9c1a1a9f0ea1", "index": 8595, "step-1": "<mask token>\n\n\ndef test_board_can_be_instatiated_with_any_set_of_pieces():\n board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})\n assert board.pieces_quantity() == 2\n\n\ndef test_piece_cant_capture_an_ally():\n board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})\n with pytest.raises(ImpossibleMove):\n board.move('f3', 'e5')\n\n\n<mask token>\n\n\ndef test_pieces_can_capture_opponent_pieces():\n board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),\n 'f3': Knight('white')})\n assert board.pieces_quantity() == 3\n knight = board.get_piece('f3')\n board.move('f3', 'e5')\n assert board.get_piece('e5') is knight\n assert board.pieces_quantity() == 2\n", "step-2": "<mask token>\n\n\ndef test_board_has_32_pieces():\n board = Board()\n assert board.pieces_quantity() == 32\n\n\ndef test_board_can_be_instatiated_with_any_set_of_pieces():\n board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})\n assert board.pieces_quantity() == 2\n\n\ndef test_piece_cant_capture_an_ally():\n board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})\n with pytest.raises(ImpossibleMove):\n board.move('f3', 'e5')\n\n\n<mask token>\n\n\ndef test_players_can_put_opponent_in_check():\n board = Board({'e1': King('black'), 'f8': Rook('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n\n\ndef test_players_can_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n board.move('e1', 'f1')\n assert board.check is None\n\n\ndef test_player_should_to_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n with pytest.raises(ImpossibleMove):\n board.move('e1', 'e2')\n\n\ndef test_pieces_can_capture_opponent_pieces():\n board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),\n 'f3': Knight('white')})\n assert board.pieces_quantity() == 3\n knight = board.get_piece('f3')\n board.move('f3', 'e5')\n assert board.get_piece('e5') is knight\n assert board.pieces_quantity() == 2\n", "step-3": "<mask token>\n\n\ndef test_board_has_32_pieces():\n board = Board()\n assert board.pieces_quantity() == 32\n\n\ndef test_board_can_be_instatiated_with_any_set_of_pieces():\n board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})\n assert board.pieces_quantity() == 2\n\n\ndef test_piece_cant_capture_an_ally():\n board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})\n with pytest.raises(ImpossibleMove):\n board.move('f3', 'e5')\n\n\ndef test_alternating_between_players():\n board = Board()\n assert board.turn == 'white'\n board.move('g2', 'g3')\n assert board.turn == 'black'\n board.move('b7', 'b6')\n assert board.turn == 'white'\n board.move('f1', 'g2')\n assert board.turn == 'black'\n\n\ndef test_only_white_pieces_can_start():\n board = Board()\n assert board.turn == 'white'\n with pytest.raises(ImpossibleMove):\n board.move('b7', 'b6')\n\n\ndef test_players_can_put_opponent_in_check():\n board = Board({'e1': King('black'), 'f8': Rook('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n\n\ndef test_players_can_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n board.move('e1', 'f1')\n assert board.check is None\n\n\ndef test_player_should_to_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n with pytest.raises(ImpossibleMove):\n board.move('e1', 'e2')\n\n\ndef test_pieces_can_capture_opponent_pieces():\n board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),\n 'f3': Knight('white')})\n assert board.pieces_quantity() == 3\n knight = board.get_piece('f3')\n board.move('f3', 'e5')\n assert board.get_piece('e5') is knight\n assert board.pieces_quantity() == 2\n", "step-4": "import pytest\nfrom chess.board import Board, ImpossibleMove\nfrom chess.pieces import King, Rook, Pawn, Knight\n\n\ndef test_board_has_32_pieces():\n board = Board()\n assert board.pieces_quantity() == 32\n\n\ndef test_board_can_be_instatiated_with_any_set_of_pieces():\n board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})\n assert board.pieces_quantity() == 2\n\n\ndef test_piece_cant_capture_an_ally():\n board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})\n with pytest.raises(ImpossibleMove):\n board.move('f3', 'e5')\n\n\ndef test_alternating_between_players():\n board = Board()\n assert board.turn == 'white'\n board.move('g2', 'g3')\n assert board.turn == 'black'\n board.move('b7', 'b6')\n assert board.turn == 'white'\n board.move('f1', 'g2')\n assert board.turn == 'black'\n\n\ndef test_only_white_pieces_can_start():\n board = Board()\n assert board.turn == 'white'\n with pytest.raises(ImpossibleMove):\n board.move('b7', 'b6')\n\n\ndef test_players_can_put_opponent_in_check():\n board = Board({'e1': King('black'), 'f8': Rook('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n\n\ndef test_players_can_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n board.move('e1', 'f1')\n assert board.check is None\n\n\ndef test_player_should_to_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King(\n 'white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n with pytest.raises(ImpossibleMove):\n board.move('e1', 'e2')\n\n\ndef test_pieces_can_capture_opponent_pieces():\n board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'),\n 'f3': Knight('white')})\n assert board.pieces_quantity() == 3\n knight = board.get_piece('f3')\n board.move('f3', 'e5')\n assert board.get_piece('e5') is knight\n assert board.pieces_quantity() == 2\n", "step-5": "import pytest\n\nfrom chess.board import Board, ImpossibleMove\nfrom chess.pieces import King, Rook, Pawn, Knight\n\n\ndef test_board_has_32_pieces():\n board = Board()\n assert board.pieces_quantity() == 32\n\n\ndef test_board_can_be_instatiated_with_any_set_of_pieces():\n board = Board(initial_pieces={'a2': Pawn('white'), 'a6': Pawn('black')})\n assert board.pieces_quantity() == 2\n\n\ndef test_piece_cant_capture_an_ally():\n board = Board(initial_pieces={'e5': Pawn('white'), 'f3': Knight('white')})\n with pytest.raises(ImpossibleMove):\n board.move('f3', 'e5')\n\n\ndef test_alternating_between_players():\n board = Board()\n assert board.turn == 'white'\n board.move('g2', 'g3') # white pawn moves\n assert board.turn == 'black'\n board.move('b7', 'b6') # black pawn moves\n assert board.turn == 'white'\n board.move('f1', 'g2') # white bishop moves\n assert board.turn == 'black'\n\n\ndef test_only_white_pieces_can_start():\n board = Board()\n assert board.turn == 'white'\n with pytest.raises(ImpossibleMove):\n board.move('b7', 'b6')\n\n\ndef test_players_can_put_opponent_in_check():\n board = Board({'e1': King('black'), 'f8': Rook('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n\n\ndef test_players_can_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n board.move('e1', 'f1')\n assert board.check is None\n\n\ndef test_player_should_to_get_out_of_check():\n board = Board({'e1': King('black'), 'f8': Rook('white'), 'a1': King('white')})\n assert board.check is None\n board.move('f8', 'e8')\n assert board.check == 'black'\n with pytest.raises(ImpossibleMove):\n board.move('e1', 'e2')\n\n\ndef test_pieces_can_capture_opponent_pieces():\n board = Board(initial_pieces={'a8': King('black'), 'e5': Pawn('black'), 'f3': Knight('white')})\n assert board.pieces_quantity() == 3\n\n knight = board.get_piece('f3')\n board.move('f3', 'e5')\n assert board.get_piece('e5') is knight\n assert board.pieces_quantity() == 2\n", "step-ids": [ 3, 7, 9, 10, 11 ] }
[ 3, 7, 9, 10, 11 ]
class Restaurant: <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def open_restaurant(self): """Message indicating the restaurant is open.""" print('The restaurant is now open!') <|reserved_special_token_1|> class Restaurant: <|reserved_special_token_0|> def __init__(self, restaurant_name, cuisine_type): """Initialize name and type.""" self.name = restaurant_name self.type = cuisine_type <|reserved_special_token_0|> def open_restaurant(self): """Message indicating the restaurant is open.""" print('The restaurant is now open!') <|reserved_special_token_1|> class Restaurant: <|reserved_special_token_0|> def __init__(self, restaurant_name, cuisine_type): """Initialize name and type.""" self.name = restaurant_name self.type = cuisine_type def describe_restaurant(self): """Prints restaurant information.""" print("The restaurant's name is " + self.name.title()) print('The cuisine type is ' + self.type.title()) def open_restaurant(self): """Message indicating the restaurant is open.""" print('The restaurant is now open!') <|reserved_special_token_1|> class Restaurant: """A restaurant model.""" def __init__(self, restaurant_name, cuisine_type): """Initialize name and type.""" self.name = restaurant_name self.type = cuisine_type def describe_restaurant(self): """Prints restaurant information.""" print("The restaurant's name is " + self.name.title()) print('The cuisine type is ' + self.type.title()) def open_restaurant(self): """Message indicating the restaurant is open.""" print('The restaurant is now open!') <|reserved_special_token_1|> class Restaurant(): """A restaurant model.""" def __init__(self, restaurant_name, cuisine_type): """Initialize name and type.""" self.name = restaurant_name self.type = cuisine_type def describe_restaurant(self): """Prints restaurant information.""" print("The restaurant's name is " + self.name.title()) print("The cuisine type is " + self.type.title()) def open_restaurant(self): """Message indicating the restaurant is open.""" print("The restaurant is now open!") # my_restaurant = Restaurant('Juan Pho Yu', 'pho') # print(my_restaurant.name) # print(my_restaurant.type) # my_restaurant.describe_restaurant() # my_restaurant.open_restaurant()
flexible
{ "blob_id": "4ecf976a7d655efb5af427083ec1943cae6fe56d", "index": 3672, "step-1": "class Restaurant:\n <mask token>\n <mask token>\n <mask token>\n\n def open_restaurant(self):\n \"\"\"Message indicating the restaurant is open.\"\"\"\n print('The restaurant is now open!')\n", "step-2": "class Restaurant:\n <mask token>\n\n def __init__(self, restaurant_name, cuisine_type):\n \"\"\"Initialize name and type.\"\"\"\n self.name = restaurant_name\n self.type = cuisine_type\n <mask token>\n\n def open_restaurant(self):\n \"\"\"Message indicating the restaurant is open.\"\"\"\n print('The restaurant is now open!')\n", "step-3": "class Restaurant:\n <mask token>\n\n def __init__(self, restaurant_name, cuisine_type):\n \"\"\"Initialize name and type.\"\"\"\n self.name = restaurant_name\n self.type = cuisine_type\n\n def describe_restaurant(self):\n \"\"\"Prints restaurant information.\"\"\"\n print(\"The restaurant's name is \" + self.name.title())\n print('The cuisine type is ' + self.type.title())\n\n def open_restaurant(self):\n \"\"\"Message indicating the restaurant is open.\"\"\"\n print('The restaurant is now open!')\n", "step-4": "class Restaurant:\n \"\"\"A restaurant model.\"\"\"\n\n def __init__(self, restaurant_name, cuisine_type):\n \"\"\"Initialize name and type.\"\"\"\n self.name = restaurant_name\n self.type = cuisine_type\n\n def describe_restaurant(self):\n \"\"\"Prints restaurant information.\"\"\"\n print(\"The restaurant's name is \" + self.name.title())\n print('The cuisine type is ' + self.type.title())\n\n def open_restaurant(self):\n \"\"\"Message indicating the restaurant is open.\"\"\"\n print('The restaurant is now open!')\n", "step-5": "class Restaurant():\n\t\"\"\"A restaurant model.\"\"\"\n\n\tdef __init__(self, restaurant_name, cuisine_type):\n\t\t\"\"\"Initialize name and type.\"\"\"\n\t\tself.name = restaurant_name\n\t\tself.type = cuisine_type\n\n\n\tdef describe_restaurant(self):\n\t\t\"\"\"Prints restaurant information.\"\"\"\n\t\tprint(\"The restaurant's name is \" + self.name.title())\n\t\tprint(\"The cuisine type is \" + self.type.title())\n\n\n\tdef open_restaurant(self):\n\t\t\"\"\"Message indicating the restaurant is open.\"\"\"\n\t\tprint(\"The restaurant is now open!\")\n\n\n# my_restaurant = Restaurant('Juan Pho Yu', 'pho')\n\n# print(my_restaurant.name)\n# print(my_restaurant.type)\n\n# my_restaurant.describe_restaurant()\n# my_restaurant.open_restaurant()", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> class Auction(object): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Auction(object): def __init__(self, name, type, status, start_price, buy_now_price): self.name = name self.type = type self.status = status if AuctionType.BID == type: self.start_price = start_price self.bids = [] if AuctionType.BUY_NOW == type: self.buy_now_price = buy_now_price <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class Auction(object): def __init__(self, name, type, status, start_price, buy_now_price): self.name = name self.type = type self.status = status if AuctionType.BID == type: self.start_price = start_price self.bids = [] if AuctionType.BUY_NOW == type: self.buy_now_price = buy_now_price def add_bid(self, price): self.bids.append(Bid(price)) <|reserved_special_token_1|> from auction_type import AuctionType from bid import Bid class Auction(object): def __init__(self, name, type, status, start_price, buy_now_price): self.name = name self.type = type self.status = status if AuctionType.BID == type: self.start_price = start_price self.bids = [] if AuctionType.BUY_NOW == type: self.buy_now_price = buy_now_price def add_bid(self, price): self.bids.append(Bid(price))
flexible
{ "blob_id": "9e05f883d80d7583c9f7e16b2fb5d3f67896388d", "index": 5629, "step-1": "<mask token>\n\n\nclass Auction(object):\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass Auction(object):\n\n def __init__(self, name, type, status, start_price, buy_now_price):\n self.name = name\n self.type = type\n self.status = status\n if AuctionType.BID == type:\n self.start_price = start_price\n self.bids = []\n if AuctionType.BUY_NOW == type:\n self.buy_now_price = buy_now_price\n <mask token>\n", "step-3": "<mask token>\n\n\nclass Auction(object):\n\n def __init__(self, name, type, status, start_price, buy_now_price):\n self.name = name\n self.type = type\n self.status = status\n if AuctionType.BID == type:\n self.start_price = start_price\n self.bids = []\n if AuctionType.BUY_NOW == type:\n self.buy_now_price = buy_now_price\n\n def add_bid(self, price):\n self.bids.append(Bid(price))\n", "step-4": "from auction_type import AuctionType\nfrom bid import Bid\n\n\nclass Auction(object):\n\n def __init__(self, name, type, status, start_price, buy_now_price):\n self.name = name\n self.type = type\n self.status = status\n if AuctionType.BID == type:\n self.start_price = start_price\n self.bids = []\n if AuctionType.BUY_NOW == type:\n self.buy_now_price = buy_now_price\n\n def add_bid(self, price):\n self.bids.append(Bid(price))\n", "step-5": null, "step-ids": [ 1, 2, 3, 4 ] }
[ 1, 2, 3, 4 ]
sair = True while sair: num = int(input("informe um numero inteiro:")) if num <16: fatorial = 1 x = num while x>=1: print(x,".") fatorial = fatorial*x x -= 1 print("Valor total do Fatorial do %d = %d "%(num,fatorial)) else: print("Número inválido!!") sair = input("deseja sair? s ou n:") if sair.upper() == "S": sair = False print("Programa Encerrado")
normal
{ "blob_id": "421e7ed0cc5a8c8acc9b98fae4ee6cc784d9b068", "index": 9683, "step-1": "<mask token>\n", "step-2": "<mask token>\nwhile sair:\n num = int(input('informe um numero inteiro:'))\n if num < 16:\n fatorial = 1\n x = num\n while x >= 1:\n print(x, '.')\n fatorial = fatorial * x\n x -= 1\n print('Valor total do Fatorial do %d = %d ' % (num, fatorial))\n else:\n print('Número inválido!!')\n sair = input('deseja sair? s ou n:')\n if sair.upper() == 'S':\n sair = False\n print('Programa Encerrado')\n", "step-3": "sair = True\nwhile sair:\n num = int(input('informe um numero inteiro:'))\n if num < 16:\n fatorial = 1\n x = num\n while x >= 1:\n print(x, '.')\n fatorial = fatorial * x\n x -= 1\n print('Valor total do Fatorial do %d = %d ' % (num, fatorial))\n else:\n print('Número inválido!!')\n sair = input('deseja sair? s ou n:')\n if sair.upper() == 'S':\n sair = False\n print('Programa Encerrado')\n", "step-4": "sair = True\r\nwhile sair:\r\n num = int(input(\"informe um numero inteiro:\"))\r\n if num <16:\r\n fatorial = 1\r\n x = num\r\n while x>=1:\r\n print(x,\".\")\r\n fatorial = fatorial*x\r\n x -= 1\r\n print(\"Valor total do Fatorial do %d = %d \"%(num,fatorial))\r\n else:\r\n print(\"Número inválido!!\")\r\n sair = input(\"deseja sair? s ou n:\")\r\n \r\n if sair.upper() == \"S\":\r\n sair = False\r\n print(\"Programa Encerrado\")", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
# Problem Statement – An automobile company manufactures both a two wheeler (TW) and a four wheeler (FW). A company manager wants to make the production of both types of vehicle according to the given data below: # 1st data, Total number of vehicle (two-wheeler + four-wheeler)=v # 2nd data, Total number of wheels = W # The task is to find how many two-wheelers as well as four-wheelers need to manufacture as per the given data. # Example : # Input : # 200 -> Value of V # 540 -> Value of W # Output : # TW =130 FW=70 v=int(input()) w=int(input()) if (w<2 or w%2!=0 or w<=v): print("INVALID INPUT") else: x=((4*v)-w)//2 print("TW={0} FW={1}".format(x,v-x))
normal
{ "blob_id": "74939f81e999b8e239eb64fa10b56f48c47f7d94", "index": 1622, "step-1": "<mask token>\n", "step-2": "<mask token>\nif w < 2 or w % 2 != 0 or w <= v:\n print('INVALID INPUT')\nelse:\n x = (4 * v - w) // 2\n print('TW={0} FW={1}'.format(x, v - x))\n", "step-3": "v = int(input())\nw = int(input())\nif w < 2 or w % 2 != 0 or w <= v:\n print('INVALID INPUT')\nelse:\n x = (4 * v - w) // 2\n print('TW={0} FW={1}'.format(x, v - x))\n", "step-4": "# Problem Statement – An automobile company manufactures both a two wheeler (TW) and a four wheeler (FW). A company manager wants to make the production of both types of vehicle according to the given data below:\n\n# 1st data, Total number of vehicle (two-wheeler + four-wheeler)=v\n# 2nd data, Total number of wheels = W\n \n\n# The task is to find how many two-wheelers as well as four-wheelers need to manufacture as per the given data.\n# Example :\n\n# Input :\n\n# 200 -> Value of V\n# 540 -> Value of W\n# Output :\n\n# TW =130 FW=70\nv=int(input())\nw=int(input())\nif (w<2 or w%2!=0 or w<=v):\n\tprint(\"INVALID INPUT\")\nelse:\n\tx=((4*v)-w)//2\n\tprint(\"TW={0} FW={1}\".format(x,v-x))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> @app.route('/hello') @app.route('/hello/<name>') def hello(name=None): name = 'World' if not name else name return 'Hello %s' % name @app.route('/') def index(): return render_template('index.html', title='home') @app.route('/distance', methods=['POST', 'GET']) def distance(): result = None if request.method == 'POST': location_a = request.form['location_a'] location_b = request.form['location_b'] result = get_distance(location_a, location_b) return render_template('distance.html', title='Afstand', result=result) <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> @app.route('/hello') @app.route('/hello/<name>') def hello(name=None): name = 'World' if not name else name return 'Hello %s' % name @app.route('/') def index(): return render_template('index.html', title='home') @app.route('/distance', methods=['POST', 'GET']) def distance(): result = None if request.method == 'POST': location_a = request.form['location_a'] location_b = request.form['location_b'] result = get_distance(location_a, location_b) return render_template('distance.html', title='Afstand', result=result) if __name__ == '__main__': app.run(debug=True) <|reserved_special_token_1|> <|reserved_special_token_0|> app = Flask(__name__) @app.route('/hello') @app.route('/hello/<name>') def hello(name=None): name = 'World' if not name else name return 'Hello %s' % name @app.route('/') def index(): return render_template('index.html', title='home') @app.route('/distance', methods=['POST', 'GET']) def distance(): result = None if request.method == 'POST': location_a = request.form['location_a'] location_b = request.form['location_b'] result = get_distance(location_a, location_b) return render_template('distance.html', title='Afstand', result=result) if __name__ == '__main__': app.run(debug=True) <|reserved_special_token_1|> from flask import Flask, render_template, request from distance import get_distance app = Flask(__name__) @app.route('/hello') @app.route('/hello/<name>') def hello(name=None): name = 'World' if not name else name return 'Hello %s' % name @app.route('/') def index(): return render_template('index.html', title='home') @app.route('/distance', methods=['POST', 'GET']) def distance(): result = None if request.method == 'POST': location_a = request.form['location_a'] location_b = request.form['location_b'] result = get_distance(location_a, location_b) return render_template('distance.html', title='Afstand', result=result) if __name__ == '__main__': app.run(debug=True) <|reserved_special_token_1|> from flask import Flask, render_template, request from distance import get_distance app = Flask(__name__) @app.route('/hello') @app.route('/hello/<name>') def hello(name=None): name = "World" if not name else name return "Hello %s" % name @app.route('/') def index(): return render_template('index.html', title='home') @app.route('/distance', methods=['POST', 'GET']) def distance(): result = None if request.method == 'POST': location_a = request.form['location_a'] location_b = request.form['location_b'] result = get_distance(location_a, location_b) return render_template('distance.html', title='Afstand', result=result) if __name__ == '__main__': app.run(debug=True)
flexible
{ "blob_id": "05052e9ccbd076e71e9ec6148887ce7b82ed316d", "index": 6256, "step-1": "<mask token>\n\n\n@app.route('/hello')\n@app.route('/hello/<name>')\ndef hello(name=None):\n name = 'World' if not name else name\n return 'Hello %s' % name\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', title='home')\n\n\n@app.route('/distance', methods=['POST', 'GET'])\ndef distance():\n result = None\n if request.method == 'POST':\n location_a = request.form['location_a']\n location_b = request.form['location_b']\n result = get_distance(location_a, location_b)\n return render_template('distance.html', title='Afstand', result=result)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\n@app.route('/hello')\n@app.route('/hello/<name>')\ndef hello(name=None):\n name = 'World' if not name else name\n return 'Hello %s' % name\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', title='home')\n\n\n@app.route('/distance', methods=['POST', 'GET'])\ndef distance():\n result = None\n if request.method == 'POST':\n location_a = request.form['location_a']\n location_b = request.form['location_b']\n result = get_distance(location_a, location_b)\n return render_template('distance.html', title='Afstand', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-3": "<mask token>\napp = Flask(__name__)\n\n\n@app.route('/hello')\n@app.route('/hello/<name>')\ndef hello(name=None):\n name = 'World' if not name else name\n return 'Hello %s' % name\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', title='home')\n\n\n@app.route('/distance', methods=['POST', 'GET'])\ndef distance():\n result = None\n if request.method == 'POST':\n location_a = request.form['location_a']\n location_b = request.form['location_b']\n result = get_distance(location_a, location_b)\n return render_template('distance.html', title='Afstand', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-4": "from flask import Flask, render_template, request\nfrom distance import get_distance\napp = Flask(__name__)\n\n\n@app.route('/hello')\n@app.route('/hello/<name>')\ndef hello(name=None):\n name = 'World' if not name else name\n return 'Hello %s' % name\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', title='home')\n\n\n@app.route('/distance', methods=['POST', 'GET'])\ndef distance():\n result = None\n if request.method == 'POST':\n location_a = request.form['location_a']\n location_b = request.form['location_b']\n result = get_distance(location_a, location_b)\n return render_template('distance.html', title='Afstand', result=result)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-5": "from flask import Flask, render_template, request\nfrom distance import get_distance\n\napp = Flask(__name__)\n\n\n@app.route('/hello')\n@app.route('/hello/<name>')\ndef hello(name=None):\n name = \"World\" if not name else name\n return \"Hello %s\" % name\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', title='home')\n\n\n@app.route('/distance', methods=['POST', 'GET'])\ndef distance():\n result = None\n if request.method == 'POST':\n location_a = request.form['location_a']\n location_b = request.form['location_b']\n result = get_distance(location_a, location_b)\n\n return render_template('distance.html', title='Afstand', result=result)\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "step-ids": [ 3, 4, 5, 6, 7 ] }
[ 3, 4, 5, 6, 7 ]
import cpt_tools from gui_helpers.gui_config import * chisqr_str = '\u03c72' mu_str = '\u03bc' sigma_str = '\u03c3' class FitWidget( object ) : def __init__( self, plotter_widget, analyzer = None ) : self.plotter_widget = plotter_widget self.plotter = plotter_widget.plotter self.hists = self.plotter.all_hists self.layout = QVBoxLayout() params_labels = [ 'A', mu_str, sigma_str, chisqr_str ] self.num_params = len( params_labels ) h_labels = [ '', '', 'Left', 'Right' ] h_labels.extend( params_labels ) v_labels = [ x.title for x in self.hists ] nrows = len( v_labels ) ncols = len( h_labels ) self.table = QTableWidget( nrows, ncols ) self.table.setMinimumWidth( 400 ) self.table.setMinimumHeight(100) # self.table.setMaximumHeight(200) # size_policy = QSizePolicy( QSizePolicy.Maximum, # QSizePolicy.Maximum ) # self.table.setSizePolicy( size_policy ) self.table.horizontalHeader().setSectionResizeMode( QHeaderView.Stretch ) self.table.verticalHeader().setSectionResizeMode( QHeaderView.Stretch ) # header = self.table.horizontalHeader() # header.setSectionResizeMode( 0, QHeaderView.Stretch ) # for j in range( 1, len( h_labels ) ) : # header.setSectionResizeMode( j, QHeaderView.ResizeToContents ) self.table.setHorizontalHeaderLabels( h_labels ) self.table.setVerticalHeaderLabels( v_labels ) self.bounds_entries = [] self.params_labels = [] self.fit_buttons = [] self.delete_buttons = [] for i in range( len( self.hists ) ) : self.bounds_entries.append( [ QLineEdit(), QLineEdit() ] ) self.params_labels.append( [ QLabel() for i in range( self.num_params ) ] ) self.fit_buttons.append( QPushButton( 'Fit' ) ) self.delete_buttons.append( QPushButton( 'Delete' ) ) self.fit_buttons[i].clicked.connect( lambda state, a=i : self.fit_button_clicked( a ) ) self.delete_buttons[i].clicked.connect( lambda state, a=i : self.delete_button_clicked( a ) ) # self.fit_buttons[i].clicked.emit() self.table.setCellWidget( i, 0, self.fit_buttons[i] ) self.table.setCellWidget( i, 1, self.delete_buttons[i] ) self.table.setCellWidget( i, 2, self.bounds_entries[i][0] ) self.table.setCellWidget( i, 3, self.bounds_entries[i][1] ) for j in range( self.num_params ) : self.table.setCellWidget( i, 4 + j, self.params_labels[i][j] ) # self.left_x_bound_entry.setMaximumWidth( PLOTTER_WIDGET_QLINEEDIT_WIDTH ) # self.right_x_bound_entry.setMaximumWidth( PLOTTER_WIDGET_QLINEEDIT_WIDTH ) # self.layout.setSpacing(0) # self.layout.addLayout( label_layout ) self.layout.addWidget( self.table ) def fit_button_clicked( self, i ) : print( self.bounds_entries[i][0].text() ) try : left_x_bound = float( self.bounds_entries[i][0].text() ) right_x_bound = float( self.bounds_entries[i][1].text() ) except : print( 'WARNING: please specify bounds for fit' ) return bounds = [ left_x_bound, right_x_bound ] fit = self.hists[i].apply_fit( bounds ) if fit is None : print( 'ERROR: fit failed' ) return self.set_fit_params( fit, i ) self.plotter.update_all() self.plotter_widget.reload_visualization_params() return fit def set_fit_params( self, fit, i ) : if fit is None : for j in range( self.num_params ) : self.params_labels[i][j].setText( '' ) return params = fit.params params_errors = fit.params_errors redchisqr = fit.redchisqr # params, params_errors, redchisqr = fit if params_errors is not None : labels = [ '%.1f\u00b1%.1f' % ( params[j], params_errors[j] ) for j in range( len(params) ) ] else : labels = [ '%.1f' % params[j] for j in range( len(params) ) ] labels.append( '%.1f' % redchisqr ) for j in range( len(params) + 1 ) : self.params_labels[i][j].setText( labels[j] ) def delete_button_clicked( self, i ) : self.hists[i].remove_fit()
normal
{ "blob_id": "aa51b2d4bfe4051f3302d14cf2123a3881a8a2e3", "index": 5668, "step-1": "<mask token>\n\n\nclass FitWidget(object):\n\n def __init__(self, plotter_widget, analyzer=None):\n self.plotter_widget = plotter_widget\n self.plotter = plotter_widget.plotter\n self.hists = self.plotter.all_hists\n self.layout = QVBoxLayout()\n params_labels = ['A', mu_str, sigma_str, chisqr_str]\n self.num_params = len(params_labels)\n h_labels = ['', '', 'Left', 'Right']\n h_labels.extend(params_labels)\n v_labels = [x.title for x in self.hists]\n nrows = len(v_labels)\n ncols = len(h_labels)\n self.table = QTableWidget(nrows, ncols)\n self.table.setMinimumWidth(400)\n self.table.setMinimumHeight(100)\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.setHorizontalHeaderLabels(h_labels)\n self.table.setVerticalHeaderLabels(v_labels)\n self.bounds_entries = []\n self.params_labels = []\n self.fit_buttons = []\n self.delete_buttons = []\n for i in range(len(self.hists)):\n self.bounds_entries.append([QLineEdit(), QLineEdit()])\n self.params_labels.append([QLabel() for i in range(self.\n num_params)])\n self.fit_buttons.append(QPushButton('Fit'))\n self.delete_buttons.append(QPushButton('Delete'))\n self.fit_buttons[i].clicked.connect(lambda state, a=i: self.\n fit_button_clicked(a))\n self.delete_buttons[i].clicked.connect(lambda state, a=i: self.\n delete_button_clicked(a))\n self.table.setCellWidget(i, 0, self.fit_buttons[i])\n self.table.setCellWidget(i, 1, self.delete_buttons[i])\n self.table.setCellWidget(i, 2, self.bounds_entries[i][0])\n self.table.setCellWidget(i, 3, self.bounds_entries[i][1])\n for j in range(self.num_params):\n self.table.setCellWidget(i, 4 + j, self.params_labels[i][j])\n self.layout.addWidget(self.table)\n <mask token>\n <mask token>\n <mask token>\n", "step-2": "<mask token>\n\n\nclass FitWidget(object):\n\n def __init__(self, plotter_widget, analyzer=None):\n self.plotter_widget = plotter_widget\n self.plotter = plotter_widget.plotter\n self.hists = self.plotter.all_hists\n self.layout = QVBoxLayout()\n params_labels = ['A', mu_str, sigma_str, chisqr_str]\n self.num_params = len(params_labels)\n h_labels = ['', '', 'Left', 'Right']\n h_labels.extend(params_labels)\n v_labels = [x.title for x in self.hists]\n nrows = len(v_labels)\n ncols = len(h_labels)\n self.table = QTableWidget(nrows, ncols)\n self.table.setMinimumWidth(400)\n self.table.setMinimumHeight(100)\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.setHorizontalHeaderLabels(h_labels)\n self.table.setVerticalHeaderLabels(v_labels)\n self.bounds_entries = []\n self.params_labels = []\n self.fit_buttons = []\n self.delete_buttons = []\n for i in range(len(self.hists)):\n self.bounds_entries.append([QLineEdit(), QLineEdit()])\n self.params_labels.append([QLabel() for i in range(self.\n num_params)])\n self.fit_buttons.append(QPushButton('Fit'))\n self.delete_buttons.append(QPushButton('Delete'))\n self.fit_buttons[i].clicked.connect(lambda state, a=i: self.\n fit_button_clicked(a))\n self.delete_buttons[i].clicked.connect(lambda state, a=i: self.\n delete_button_clicked(a))\n self.table.setCellWidget(i, 0, self.fit_buttons[i])\n self.table.setCellWidget(i, 1, self.delete_buttons[i])\n self.table.setCellWidget(i, 2, self.bounds_entries[i][0])\n self.table.setCellWidget(i, 3, self.bounds_entries[i][1])\n for j in range(self.num_params):\n self.table.setCellWidget(i, 4 + j, self.params_labels[i][j])\n self.layout.addWidget(self.table)\n\n def fit_button_clicked(self, i):\n print(self.bounds_entries[i][0].text())\n try:\n left_x_bound = float(self.bounds_entries[i][0].text())\n right_x_bound = float(self.bounds_entries[i][1].text())\n except:\n print('WARNING: please specify bounds for fit')\n return\n bounds = [left_x_bound, right_x_bound]\n fit = self.hists[i].apply_fit(bounds)\n if fit is None:\n print('ERROR: fit failed')\n return\n self.set_fit_params(fit, i)\n self.plotter.update_all()\n self.plotter_widget.reload_visualization_params()\n return fit\n\n def set_fit_params(self, fit, i):\n if fit is None:\n for j in range(self.num_params):\n self.params_labels[i][j].setText('')\n return\n params = fit.params\n params_errors = fit.params_errors\n redchisqr = fit.redchisqr\n if params_errors is not None:\n labels = [('%.1f±%.1f' % (params[j], params_errors[j])) for j in\n range(len(params))]\n else:\n labels = [('%.1f' % params[j]) for j in range(len(params))]\n labels.append('%.1f' % redchisqr)\n for j in range(len(params) + 1):\n self.params_labels[i][j].setText(labels[j])\n\n def delete_button_clicked(self, i):\n self.hists[i].remove_fit()\n", "step-3": "<mask token>\nchisqr_str = 'χ2'\nmu_str = 'μ'\nsigma_str = 'σ'\n\n\nclass FitWidget(object):\n\n def __init__(self, plotter_widget, analyzer=None):\n self.plotter_widget = plotter_widget\n self.plotter = plotter_widget.plotter\n self.hists = self.plotter.all_hists\n self.layout = QVBoxLayout()\n params_labels = ['A', mu_str, sigma_str, chisqr_str]\n self.num_params = len(params_labels)\n h_labels = ['', '', 'Left', 'Right']\n h_labels.extend(params_labels)\n v_labels = [x.title for x in self.hists]\n nrows = len(v_labels)\n ncols = len(h_labels)\n self.table = QTableWidget(nrows, ncols)\n self.table.setMinimumWidth(400)\n self.table.setMinimumHeight(100)\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.setHorizontalHeaderLabels(h_labels)\n self.table.setVerticalHeaderLabels(v_labels)\n self.bounds_entries = []\n self.params_labels = []\n self.fit_buttons = []\n self.delete_buttons = []\n for i in range(len(self.hists)):\n self.bounds_entries.append([QLineEdit(), QLineEdit()])\n self.params_labels.append([QLabel() for i in range(self.\n num_params)])\n self.fit_buttons.append(QPushButton('Fit'))\n self.delete_buttons.append(QPushButton('Delete'))\n self.fit_buttons[i].clicked.connect(lambda state, a=i: self.\n fit_button_clicked(a))\n self.delete_buttons[i].clicked.connect(lambda state, a=i: self.\n delete_button_clicked(a))\n self.table.setCellWidget(i, 0, self.fit_buttons[i])\n self.table.setCellWidget(i, 1, self.delete_buttons[i])\n self.table.setCellWidget(i, 2, self.bounds_entries[i][0])\n self.table.setCellWidget(i, 3, self.bounds_entries[i][1])\n for j in range(self.num_params):\n self.table.setCellWidget(i, 4 + j, self.params_labels[i][j])\n self.layout.addWidget(self.table)\n\n def fit_button_clicked(self, i):\n print(self.bounds_entries[i][0].text())\n try:\n left_x_bound = float(self.bounds_entries[i][0].text())\n right_x_bound = float(self.bounds_entries[i][1].text())\n except:\n print('WARNING: please specify bounds for fit')\n return\n bounds = [left_x_bound, right_x_bound]\n fit = self.hists[i].apply_fit(bounds)\n if fit is None:\n print('ERROR: fit failed')\n return\n self.set_fit_params(fit, i)\n self.plotter.update_all()\n self.plotter_widget.reload_visualization_params()\n return fit\n\n def set_fit_params(self, fit, i):\n if fit is None:\n for j in range(self.num_params):\n self.params_labels[i][j].setText('')\n return\n params = fit.params\n params_errors = fit.params_errors\n redchisqr = fit.redchisqr\n if params_errors is not None:\n labels = [('%.1f±%.1f' % (params[j], params_errors[j])) for j in\n range(len(params))]\n else:\n labels = [('%.1f' % params[j]) for j in range(len(params))]\n labels.append('%.1f' % redchisqr)\n for j in range(len(params) + 1):\n self.params_labels[i][j].setText(labels[j])\n\n def delete_button_clicked(self, i):\n self.hists[i].remove_fit()\n", "step-4": "import cpt_tools\nfrom gui_helpers.gui_config import *\nchisqr_str = 'χ2'\nmu_str = 'μ'\nsigma_str = 'σ'\n\n\nclass FitWidget(object):\n\n def __init__(self, plotter_widget, analyzer=None):\n self.plotter_widget = plotter_widget\n self.plotter = plotter_widget.plotter\n self.hists = self.plotter.all_hists\n self.layout = QVBoxLayout()\n params_labels = ['A', mu_str, sigma_str, chisqr_str]\n self.num_params = len(params_labels)\n h_labels = ['', '', 'Left', 'Right']\n h_labels.extend(params_labels)\n v_labels = [x.title for x in self.hists]\n nrows = len(v_labels)\n ncols = len(h_labels)\n self.table = QTableWidget(nrows, ncols)\n self.table.setMinimumWidth(400)\n self.table.setMinimumHeight(100)\n self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.verticalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.table.setHorizontalHeaderLabels(h_labels)\n self.table.setVerticalHeaderLabels(v_labels)\n self.bounds_entries = []\n self.params_labels = []\n self.fit_buttons = []\n self.delete_buttons = []\n for i in range(len(self.hists)):\n self.bounds_entries.append([QLineEdit(), QLineEdit()])\n self.params_labels.append([QLabel() for i in range(self.\n num_params)])\n self.fit_buttons.append(QPushButton('Fit'))\n self.delete_buttons.append(QPushButton('Delete'))\n self.fit_buttons[i].clicked.connect(lambda state, a=i: self.\n fit_button_clicked(a))\n self.delete_buttons[i].clicked.connect(lambda state, a=i: self.\n delete_button_clicked(a))\n self.table.setCellWidget(i, 0, self.fit_buttons[i])\n self.table.setCellWidget(i, 1, self.delete_buttons[i])\n self.table.setCellWidget(i, 2, self.bounds_entries[i][0])\n self.table.setCellWidget(i, 3, self.bounds_entries[i][1])\n for j in range(self.num_params):\n self.table.setCellWidget(i, 4 + j, self.params_labels[i][j])\n self.layout.addWidget(self.table)\n\n def fit_button_clicked(self, i):\n print(self.bounds_entries[i][0].text())\n try:\n left_x_bound = float(self.bounds_entries[i][0].text())\n right_x_bound = float(self.bounds_entries[i][1].text())\n except:\n print('WARNING: please specify bounds for fit')\n return\n bounds = [left_x_bound, right_x_bound]\n fit = self.hists[i].apply_fit(bounds)\n if fit is None:\n print('ERROR: fit failed')\n return\n self.set_fit_params(fit, i)\n self.plotter.update_all()\n self.plotter_widget.reload_visualization_params()\n return fit\n\n def set_fit_params(self, fit, i):\n if fit is None:\n for j in range(self.num_params):\n self.params_labels[i][j].setText('')\n return\n params = fit.params\n params_errors = fit.params_errors\n redchisqr = fit.redchisqr\n if params_errors is not None:\n labels = [('%.1f±%.1f' % (params[j], params_errors[j])) for j in\n range(len(params))]\n else:\n labels = [('%.1f' % params[j]) for j in range(len(params))]\n labels.append('%.1f' % redchisqr)\n for j in range(len(params) + 1):\n self.params_labels[i][j].setText(labels[j])\n\n def delete_button_clicked(self, i):\n self.hists[i].remove_fit()\n", "step-5": "import cpt_tools\nfrom gui_helpers.gui_config import * \n\n\nchisqr_str = '\\u03c72'\nmu_str = '\\u03bc'\nsigma_str = '\\u03c3'\n\n\n\nclass FitWidget( object ) :\n\n def __init__( self, plotter_widget, analyzer = None ) :\n\n self.plotter_widget = plotter_widget \n self.plotter = plotter_widget.plotter\n self.hists = self.plotter.all_hists\n \n self.layout = QVBoxLayout()\n\n params_labels = [ 'A', mu_str, sigma_str, chisqr_str ]\n self.num_params = len( params_labels ) \n\n h_labels = [ '', '', 'Left', 'Right' ]\n h_labels.extend( params_labels ) \n v_labels = [ x.title for x in self.hists ] \n \n nrows = len( v_labels )\n ncols = len( h_labels ) \n\n self.table = QTableWidget( nrows, ncols ) \n self.table.setMinimumWidth( 400 ) \n self.table.setMinimumHeight(100)\n # self.table.setMaximumHeight(200)\n # size_policy = QSizePolicy( QSizePolicy.Maximum,\n # QSizePolicy.Maximum )\n \n # self.table.setSizePolicy( size_policy )\n \n self.table.horizontalHeader().setSectionResizeMode( QHeaderView.Stretch ) \n self.table.verticalHeader().setSectionResizeMode( QHeaderView.Stretch )\n # header = self.table.horizontalHeader() \n # header.setSectionResizeMode( 0, QHeaderView.Stretch )\n # for j in range( 1, len( h_labels ) ) : \n # header.setSectionResizeMode( j, QHeaderView.ResizeToContents )\n \n self.table.setHorizontalHeaderLabels( h_labels )\n self.table.setVerticalHeaderLabels( v_labels )\n\n self.bounds_entries = [] \n self.params_labels = []\n self.fit_buttons = []\n self.delete_buttons = [] \n \n for i in range( len( self.hists ) ) :\n \n self.bounds_entries.append( [ QLineEdit(), QLineEdit() ] )\n self.params_labels.append( [ QLabel() for i in range( self.num_params ) ] )\n\n self.fit_buttons.append( QPushButton( 'Fit' ) )\n self.delete_buttons.append( QPushButton( 'Delete' ) )\n \n self.fit_buttons[i].clicked.connect( lambda state, a=i : self.fit_button_clicked( a ) )\n self.delete_buttons[i].clicked.connect( lambda state, a=i : self.delete_button_clicked( a ) )\n # self.fit_buttons[i].clicked.emit() \n\n self.table.setCellWidget( i, 0, self.fit_buttons[i] )\n self.table.setCellWidget( i, 1, self.delete_buttons[i] ) \n\n self.table.setCellWidget( i, 2, self.bounds_entries[i][0] )\n self.table.setCellWidget( i, 3, self.bounds_entries[i][1] )\n\n for j in range( self.num_params ) :\n self.table.setCellWidget( i, 4 + j, self.params_labels[i][j] )\n\n # self.left_x_bound_entry.setMaximumWidth( PLOTTER_WIDGET_QLINEEDIT_WIDTH ) \n # self.right_x_bound_entry.setMaximumWidth( PLOTTER_WIDGET_QLINEEDIT_WIDTH ) \n \n # self.layout.setSpacing(0)\n # self.layout.addLayout( label_layout ) \n\n self.layout.addWidget( self.table )\n \n\n \n def fit_button_clicked( self, i ) :\n\n print( self.bounds_entries[i][0].text() )\n \n try : \n left_x_bound = float( self.bounds_entries[i][0].text() )\n right_x_bound = float( self.bounds_entries[i][1].text() )\n except :\n print( 'WARNING: please specify bounds for fit' )\n return\n\n bounds = [ left_x_bound, right_x_bound ] \n fit = self.hists[i].apply_fit( bounds ) \n if fit is None :\n print( 'ERROR: fit failed' ) \n return\n\n self.set_fit_params( fit, i ) \n self.plotter.update_all()\n self.plotter_widget.reload_visualization_params()\n return fit \n\n\n def set_fit_params( self, fit, i ) :\n\n if fit is None :\n for j in range( self.num_params ) :\n self.params_labels[i][j].setText( '' )\n return \n \n params = fit.params \n params_errors = fit.params_errors\n redchisqr = fit.redchisqr\n \n # params, params_errors, redchisqr = fit\n \n if params_errors is not None : \n labels = [ '%.1f\\u00b1%.1f' % ( params[j], params_errors[j] ) for j in range( len(params) ) ]\n else :\n labels = [ '%.1f' % params[j] for j in range( len(params) ) ]\n \n labels.append( '%.1f' % redchisqr )\n for j in range( len(params) + 1 ) : \n self.params_labels[i][j].setText( labels[j] )\n\n \n \n\n def delete_button_clicked( self, i ) :\n self.hists[i].remove_fit() \n", "step-ids": [ 2, 5, 6, 7, 8 ] }
[ 2, 5, 6, 7, 8 ]
from datetime import date from django.test import TestCase from model_mommy import mommy from apps.debtors.models import Debtor from apps.invoices.models import Invoice, InvoiceStatusChoices from apps.invoices.services import InvoiceService class InvoiceServiceTestCase(TestCase): def setUp(self) ->None: self.invoice_service = InvoiceService() self.debtor_1 = mommy.make(Debtor) self.invoice_1 = mommy.make(Invoice, debtor=self.debtor_1) def test_create_invoice(self): invoice = self.invoice_service.create_invoice(amount=12.1, status= InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor =self.debtor_1) self.assertEqual(invoice.amount, 12.1) self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE) self.assertEqual(invoice.due_date, date(2019, 4, 1)) self.assertEqual(invoice.debtor, self.debtor_1) def test_update_invoice(self): updated_invoice = self.invoice_service.update_invoice(instance=self .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo') self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID) self.assertFalse(hasattr(updated_invoice, 'random_attr')) def test_delete_invoice(self): self.invoice_service.delete_invoice(instance=self.invoice_1) self.assertFalse(Invoice.objects.all().exists())
normal
{ "blob_id": "5f77e93d63c696363c30f019019acd22c694308b", "index": 4529, "step-1": "<mask token>\n\n\nclass InvoiceServiceTestCase(TestCase):\n <mask token>\n\n def test_create_invoice(self):\n invoice = self.invoice_service.create_invoice(amount=12.1, status=\n InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor\n =self.debtor_1)\n self.assertEqual(invoice.amount, 12.1)\n self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE)\n self.assertEqual(invoice.due_date, date(2019, 4, 1))\n self.assertEqual(invoice.debtor, self.debtor_1)\n\n def test_update_invoice(self):\n updated_invoice = self.invoice_service.update_invoice(instance=self\n .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo')\n self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID)\n self.assertFalse(hasattr(updated_invoice, 'random_attr'))\n <mask token>\n", "step-2": "<mask token>\n\n\nclass InvoiceServiceTestCase(TestCase):\n\n def setUp(self) ->None:\n self.invoice_service = InvoiceService()\n self.debtor_1 = mommy.make(Debtor)\n self.invoice_1 = mommy.make(Invoice, debtor=self.debtor_1)\n\n def test_create_invoice(self):\n invoice = self.invoice_service.create_invoice(amount=12.1, status=\n InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor\n =self.debtor_1)\n self.assertEqual(invoice.amount, 12.1)\n self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE)\n self.assertEqual(invoice.due_date, date(2019, 4, 1))\n self.assertEqual(invoice.debtor, self.debtor_1)\n\n def test_update_invoice(self):\n updated_invoice = self.invoice_service.update_invoice(instance=self\n .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo')\n self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID)\n self.assertFalse(hasattr(updated_invoice, 'random_attr'))\n <mask token>\n", "step-3": "<mask token>\n\n\nclass InvoiceServiceTestCase(TestCase):\n\n def setUp(self) ->None:\n self.invoice_service = InvoiceService()\n self.debtor_1 = mommy.make(Debtor)\n self.invoice_1 = mommy.make(Invoice, debtor=self.debtor_1)\n\n def test_create_invoice(self):\n invoice = self.invoice_service.create_invoice(amount=12.1, status=\n InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor\n =self.debtor_1)\n self.assertEqual(invoice.amount, 12.1)\n self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE)\n self.assertEqual(invoice.due_date, date(2019, 4, 1))\n self.assertEqual(invoice.debtor, self.debtor_1)\n\n def test_update_invoice(self):\n updated_invoice = self.invoice_service.update_invoice(instance=self\n .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo')\n self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID)\n self.assertFalse(hasattr(updated_invoice, 'random_attr'))\n\n def test_delete_invoice(self):\n self.invoice_service.delete_invoice(instance=self.invoice_1)\n self.assertFalse(Invoice.objects.all().exists())\n", "step-4": "from datetime import date\nfrom django.test import TestCase\nfrom model_mommy import mommy\nfrom apps.debtors.models import Debtor\nfrom apps.invoices.models import Invoice, InvoiceStatusChoices\nfrom apps.invoices.services import InvoiceService\n\n\nclass InvoiceServiceTestCase(TestCase):\n\n def setUp(self) ->None:\n self.invoice_service = InvoiceService()\n self.debtor_1 = mommy.make(Debtor)\n self.invoice_1 = mommy.make(Invoice, debtor=self.debtor_1)\n\n def test_create_invoice(self):\n invoice = self.invoice_service.create_invoice(amount=12.1, status=\n InvoiceStatusChoices.OVERDUE, due_date=date(2019, 4, 1), debtor\n =self.debtor_1)\n self.assertEqual(invoice.amount, 12.1)\n self.assertEqual(invoice.status, InvoiceStatusChoices.OVERDUE)\n self.assertEqual(invoice.due_date, date(2019, 4, 1))\n self.assertEqual(invoice.debtor, self.debtor_1)\n\n def test_update_invoice(self):\n updated_invoice = self.invoice_service.update_invoice(instance=self\n .invoice_1, status=InvoiceStatusChoices.PAID, random_attr='foo')\n self.assertEqual(updated_invoice.status, InvoiceStatusChoices.PAID)\n self.assertFalse(hasattr(updated_invoice, 'random_attr'))\n\n def test_delete_invoice(self):\n self.invoice_service.delete_invoice(instance=self.invoice_1)\n self.assertFalse(Invoice.objects.all().exists())\n", "step-5": null, "step-ids": [ 3, 4, 5, 6 ] }
[ 3, 4, 5, 6 ]
from platypush.message.event import Event class ClipboardEvent(Event): def __init__(self, text: str, *args, **kwargs): super().__init__(*args, text=text, **kwargs) # vim:sw=4:ts=4:et:
normal
{ "blob_id": "9b02ce0b3acb14bdd6463c5bdba865b28253767c", "index": 7896, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass ClipboardEvent(Event):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass ClipboardEvent(Event):\n\n def __init__(self, text: str, *args, **kwargs):\n super().__init__(*args, text=text, **kwargs)\n", "step-4": "from platypush.message.event import Event\n\n\nclass ClipboardEvent(Event):\n\n def __init__(self, text: str, *args, **kwargs):\n super().__init__(*args, text=text, **kwargs)\n", "step-5": "from platypush.message.event import Event\n\n\nclass ClipboardEvent(Event):\n def __init__(self, text: str, *args, **kwargs):\n super().__init__(*args, text=text, **kwargs)\n\n\n# vim:sw=4:ts=4:et:\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
import math getal1 = 5 getal2 = 7 getal3 = 8 getal4 = -4 getal5 = 2 print(getal1 * getal2 + getal3) print(getal1 * (getal2 + getal3)) print(getal2 + getal3 / getal1) print((getal2 + getal3) / getal1) print(getal2 + getal3 % getal1) print(abs(getal4 * getal1)) print(pow(getal3, getal5)) print(round(getal5 / getal2, 2)) print(max(getal1, getal2, getal3, getal4, getal5)) print(min(getal1, getal2, getal3, getal4, getal5)) print(math.sqrt(getal5 * getal3))
normal
{ "blob_id": "30d75aafd9612ac02557b947fc4e3c2f7322a7fd", "index": 3555, "step-1": "<mask token>\n", "step-2": "<mask token>\nprint(getal1 * getal2 + getal3)\nprint(getal1 * (getal2 + getal3))\nprint(getal2 + getal3 / getal1)\nprint((getal2 + getal3) / getal1)\nprint(getal2 + getal3 % getal1)\nprint(abs(getal4 * getal1))\nprint(pow(getal3, getal5))\nprint(round(getal5 / getal2, 2))\nprint(max(getal1, getal2, getal3, getal4, getal5))\nprint(min(getal1, getal2, getal3, getal4, getal5))\nprint(math.sqrt(getal5 * getal3))\n", "step-3": "<mask token>\ngetal1 = 5\ngetal2 = 7\ngetal3 = 8\ngetal4 = -4\ngetal5 = 2\nprint(getal1 * getal2 + getal3)\nprint(getal1 * (getal2 + getal3))\nprint(getal2 + getal3 / getal1)\nprint((getal2 + getal3) / getal1)\nprint(getal2 + getal3 % getal1)\nprint(abs(getal4 * getal1))\nprint(pow(getal3, getal5))\nprint(round(getal5 / getal2, 2))\nprint(max(getal1, getal2, getal3, getal4, getal5))\nprint(min(getal1, getal2, getal3, getal4, getal5))\nprint(math.sqrt(getal5 * getal3))\n", "step-4": "import math\ngetal1 = 5\ngetal2 = 7\ngetal3 = 8\ngetal4 = -4\ngetal5 = 2\nprint(getal1 * getal2 + getal3)\nprint(getal1 * (getal2 + getal3))\nprint(getal2 + getal3 / getal1)\nprint((getal2 + getal3) / getal1)\nprint(getal2 + getal3 % getal1)\nprint(abs(getal4 * getal1))\nprint(pow(getal3, getal5))\nprint(round(getal5 / getal2, 2))\nprint(max(getal1, getal2, getal3, getal4, getal5))\nprint(min(getal1, getal2, getal3, getal4, getal5))\nprint(math.sqrt(getal5 * getal3))\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from typing import Tuple, List import math class Point: def __init__(self, x, y): self.x = x self.y = y self.constraints = [] def __str__(self): return f"({self.x}, {self.y})" class Line: def __init__(self, point1, point2): if isinstance(point1, Point): self.p1 = point1 elif isinstance(point1, (Tuple, List)): self.p1 = Point(*point1) else: raise TypeError("Incorrect types") if isinstance(point2, Point): self.p2 = point2 elif isinstance(point1, (Tuple, List)): self.p2 = Point(*point2) else: raise TypeError("Incorrect types") self.constraints = [] def middle(self): x = (self.p1.x + self.p2.x)/2 y = (self.p1.y + self.p2.y)/2 return Point(x, y) def length(self): return math.sqrt((self.p2.x - self.p1.x)**2 + (self.p2.y - self.p1.y)**2) def tang(self): return (self.p2.y - self.p1.y)/(self.p2.x - self.p1.x) def __str__(self): return f"p1={self.p1} p2={self.p2}" class Constraints: def __init__(self): pass class Parallelism(Constraints): def __init__(self, line1, line2): super().__init__() self.line1 = line1 self.line2 = line2 def get_const(self): dx = self.line2.length() / math.sqrt(1 + self.line1.tang()**2) dy = self.line1.tang() * dx self.line2.p2.x, self.line2.p2.y = self.line2.p1.x + dx, self.line2.p2.y + dy p1 = Point(0, 0) p2 = Point(2, 6) p3 = Point(7, 0) p4 = Point(10, 6) line11 = Line(p1, p2) line22 = Line(p3, p4) parall = Parallelism(line11, line22) parall.get_const() print(line22)
normal
{ "blob_id": "e59a51641dc2966b0170678de064e2845e170cf5", "index": 4943, "step-1": "<mask token>\n\n\nclass Line:\n\n def __init__(self, point1, point2):\n if isinstance(point1, Point):\n self.p1 = point1\n elif isinstance(point1, (Tuple, List)):\n self.p1 = Point(*point1)\n else:\n raise TypeError('Incorrect types')\n if isinstance(point2, Point):\n self.p2 = point2\n elif isinstance(point1, (Tuple, List)):\n self.p2 = Point(*point2)\n else:\n raise TypeError('Incorrect types')\n self.constraints = []\n\n def middle(self):\n x = (self.p1.x + self.p2.x) / 2\n y = (self.p1.y + self.p2.y) / 2\n return Point(x, y)\n\n def length(self):\n return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.\n p1.y) ** 2)\n\n def tang(self):\n return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)\n\n def __str__(self):\n return f'p1={self.p1} p2={self.p2}'\n\n\nclass Constraints:\n\n def __init__(self):\n pass\n\n\nclass Parallelism(Constraints):\n\n def __init__(self, line1, line2):\n super().__init__()\n self.line1 = line1\n self.line2 = line2\n\n def get_const(self):\n dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)\n dy = self.line1.tang() * dx\n self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.\n line2.p2.y + dy)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Point:\n <mask token>\n <mask token>\n\n\nclass Line:\n\n def __init__(self, point1, point2):\n if isinstance(point1, Point):\n self.p1 = point1\n elif isinstance(point1, (Tuple, List)):\n self.p1 = Point(*point1)\n else:\n raise TypeError('Incorrect types')\n if isinstance(point2, Point):\n self.p2 = point2\n elif isinstance(point1, (Tuple, List)):\n self.p2 = Point(*point2)\n else:\n raise TypeError('Incorrect types')\n self.constraints = []\n\n def middle(self):\n x = (self.p1.x + self.p2.x) / 2\n y = (self.p1.y + self.p2.y) / 2\n return Point(x, y)\n\n def length(self):\n return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.\n p1.y) ** 2)\n\n def tang(self):\n return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)\n\n def __str__(self):\n return f'p1={self.p1} p2={self.p2}'\n\n\nclass Constraints:\n\n def __init__(self):\n pass\n\n\nclass Parallelism(Constraints):\n\n def __init__(self, line1, line2):\n super().__init__()\n self.line1 = line1\n self.line2 = line2\n\n def get_const(self):\n dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)\n dy = self.line1.tang() * dx\n self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.\n line2.p2.y + dy)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.constraints = []\n\n def __str__(self):\n return f'({self.x}, {self.y})'\n\n\nclass Line:\n\n def __init__(self, point1, point2):\n if isinstance(point1, Point):\n self.p1 = point1\n elif isinstance(point1, (Tuple, List)):\n self.p1 = Point(*point1)\n else:\n raise TypeError('Incorrect types')\n if isinstance(point2, Point):\n self.p2 = point2\n elif isinstance(point1, (Tuple, List)):\n self.p2 = Point(*point2)\n else:\n raise TypeError('Incorrect types')\n self.constraints = []\n\n def middle(self):\n x = (self.p1.x + self.p2.x) / 2\n y = (self.p1.y + self.p2.y) / 2\n return Point(x, y)\n\n def length(self):\n return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.\n p1.y) ** 2)\n\n def tang(self):\n return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)\n\n def __str__(self):\n return f'p1={self.p1} p2={self.p2}'\n\n\nclass Constraints:\n\n def __init__(self):\n pass\n\n\nclass Parallelism(Constraints):\n\n def __init__(self, line1, line2):\n super().__init__()\n self.line1 = line1\n self.line2 = line2\n\n def get_const(self):\n dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)\n dy = self.line1.tang() * dx\n self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.\n line2.p2.y + dy)\n\n\n<mask token>\nparall.get_const()\nprint(line22)\n", "step-4": "from typing import Tuple, List\nimport math\n\n\nclass Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n self.constraints = []\n\n def __str__(self):\n return f'({self.x}, {self.y})'\n\n\nclass Line:\n\n def __init__(self, point1, point2):\n if isinstance(point1, Point):\n self.p1 = point1\n elif isinstance(point1, (Tuple, List)):\n self.p1 = Point(*point1)\n else:\n raise TypeError('Incorrect types')\n if isinstance(point2, Point):\n self.p2 = point2\n elif isinstance(point1, (Tuple, List)):\n self.p2 = Point(*point2)\n else:\n raise TypeError('Incorrect types')\n self.constraints = []\n\n def middle(self):\n x = (self.p1.x + self.p2.x) / 2\n y = (self.p1.y + self.p2.y) / 2\n return Point(x, y)\n\n def length(self):\n return math.sqrt((self.p2.x - self.p1.x) ** 2 + (self.p2.y - self.\n p1.y) ** 2)\n\n def tang(self):\n return (self.p2.y - self.p1.y) / (self.p2.x - self.p1.x)\n\n def __str__(self):\n return f'p1={self.p1} p2={self.p2}'\n\n\nclass Constraints:\n\n def __init__(self):\n pass\n\n\nclass Parallelism(Constraints):\n\n def __init__(self, line1, line2):\n super().__init__()\n self.line1 = line1\n self.line2 = line2\n\n def get_const(self):\n dx = self.line2.length() / math.sqrt(1 + self.line1.tang() ** 2)\n dy = self.line1.tang() * dx\n self.line2.p2.x, self.line2.p2.y = (self.line2.p1.x + dx, self.\n line2.p2.y + dy)\n\n\np1 = Point(0, 0)\np2 = Point(2, 6)\np3 = Point(7, 0)\np4 = Point(10, 6)\nline11 = Line(p1, p2)\nline22 = Line(p3, p4)\nparall = Parallelism(line11, line22)\nparall.get_const()\nprint(line22)\n", "step-5": "from typing import Tuple, List\nimport math\n\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n self.constraints = []\n\n def __str__(self):\n return f\"({self.x}, {self.y})\"\n\n\nclass Line:\n def __init__(self, point1, point2):\n if isinstance(point1, Point):\n self.p1 = point1\n elif isinstance(point1, (Tuple, List)):\n self.p1 = Point(*point1)\n else:\n raise TypeError(\"Incorrect types\")\n\n if isinstance(point2, Point):\n self.p2 = point2\n elif isinstance(point1, (Tuple, List)):\n self.p2 = Point(*point2)\n else:\n raise TypeError(\"Incorrect types\")\n\n self.constraints = []\n\n def middle(self):\n x = (self.p1.x + self.p2.x)/2\n y = (self.p1.y + self.p2.y)/2\n return Point(x, y)\n\n def length(self):\n return math.sqrt((self.p2.x - self.p1.x)**2 + (self.p2.y - self.p1.y)**2)\n\n def tang(self):\n return (self.p2.y - self.p1.y)/(self.p2.x - self.p1.x)\n\n def __str__(self):\n return f\"p1={self.p1} p2={self.p2}\"\n\n\nclass Constraints:\n def __init__(self):\n pass\n\n\nclass Parallelism(Constraints):\n def __init__(self, line1, line2):\n super().__init__()\n self.line1 = line1\n self.line2 = line2\n\n def get_const(self):\n dx = self.line2.length() / math.sqrt(1 + self.line1.tang()**2)\n dy = self.line1.tang() * dx\n\n self.line2.p2.x, self.line2.p2.y = self.line2.p1.x + dx, self.line2.p2.y + dy\n\n\np1 = Point(0, 0)\np2 = Point(2, 6)\n\np3 = Point(7, 0)\np4 = Point(10, 6)\n\nline11 = Line(p1, p2)\nline22 = Line(p3, p4)\n\nparall = Parallelism(line11, line22)\nparall.get_const()\n\nprint(line22)\n", "step-ids": [ 11, 12, 15, 17, 18 ] }
[ 11, 12, 15, 17, 18 ]
<|reserved_special_token_0|> class MyThingsDashboardModule(DashboardModule): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> def init_with_context(self, context): user = context['request'].user slices = Slice.objects.filter(group__in=user.groups.all(). values_list('pk', flat=True)) context['slices'] = slices nodes = {} nodes_states = ['offline', 'safe', 'production'] for group in user.groups.all(): nodes[group] = [] qs_nodes = Node.objects.filter(group=group) for state in nodes_states: nodes[group].append(qs_nodes.filter(state_set__value=state) .count()) context['nodes_states'] = nodes_states context['user_nodes'] = nodes self.has_data = nodes or slices <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class MyThingsDashboardModule(DashboardModule): <|reserved_special_token_0|> title = 'My Things' template = 'dashboard/modules/mythings.html' def init_with_context(self, context): user = context['request'].user slices = Slice.objects.filter(group__in=user.groups.all(). values_list('pk', flat=True)) context['slices'] = slices nodes = {} nodes_states = ['offline', 'safe', 'production'] for group in user.groups.all(): nodes[group] = [] qs_nodes = Node.objects.filter(group=group) for state in nodes_states: nodes[group].append(qs_nodes.filter(state_set__value=state) .count()) context['nodes_states'] = nodes_states context['user_nodes'] = nodes self.has_data = nodes or slices def is_empty(self): return not self.has_data <|reserved_special_token_1|> <|reserved_special_token_0|> class MyThingsDashboardModule(DashboardModule): """ Controller dashboard module to provide an overview to the user of the nodes and slices of its groups. """ title = 'My Things' template = 'dashboard/modules/mythings.html' def init_with_context(self, context): user = context['request'].user slices = Slice.objects.filter(group__in=user.groups.all(). values_list('pk', flat=True)) context['slices'] = slices nodes = {} nodes_states = ['offline', 'safe', 'production'] for group in user.groups.all(): nodes[group] = [] qs_nodes = Node.objects.filter(group=group) for state in nodes_states: nodes[group].append(qs_nodes.filter(state_set__value=state) .count()) context['nodes_states'] = nodes_states context['user_nodes'] = nodes self.has_data = nodes or slices def is_empty(self): return not self.has_data <|reserved_special_token_1|> from admin_tools.dashboard.modules import DashboardModule from nodes.models import Node from slices.models import Slice class MyThingsDashboardModule(DashboardModule): """ Controller dashboard module to provide an overview to the user of the nodes and slices of its groups. """ title = 'My Things' template = 'dashboard/modules/mythings.html' def init_with_context(self, context): user = context['request'].user slices = Slice.objects.filter(group__in=user.groups.all(). values_list('pk', flat=True)) context['slices'] = slices nodes = {} nodes_states = ['offline', 'safe', 'production'] for group in user.groups.all(): nodes[group] = [] qs_nodes = Node.objects.filter(group=group) for state in nodes_states: nodes[group].append(qs_nodes.filter(state_set__value=state) .count()) context['nodes_states'] = nodes_states context['user_nodes'] = nodes self.has_data = nodes or slices def is_empty(self): return not self.has_data <|reserved_special_token_1|> from admin_tools.dashboard.modules import DashboardModule from nodes.models import Node from slices.models import Slice class MyThingsDashboardModule(DashboardModule): """ Controller dashboard module to provide an overview to the user of the nodes and slices of its groups. """ title="My Things" template = "dashboard/modules/mythings.html" def init_with_context(self, context): user = context['request'].user # Get user slices slices = Slice.objects.filter(group__in=user.groups.all().values_list('pk', flat=True)) context['slices'] = slices # Get user nodes nodes = {} nodes_states = ['offline', 'safe', 'production'] for group in user.groups.all(): nodes[group] = [] qs_nodes = Node.objects.filter(group=group) for state in nodes_states: nodes[group].append(qs_nodes.filter(state_set__value=state).count()) context['nodes_states'] = nodes_states context['user_nodes'] = nodes # initialize to calculate is_empty self.has_data = nodes or slices def is_empty(self): return not self.has_data
flexible
{ "blob_id": "90324392e763ac6ea78c77b909c4bea667d45e6c", "index": 5896, "step-1": "<mask token>\n\n\nclass MyThingsDashboardModule(DashboardModule):\n <mask token>\n <mask token>\n <mask token>\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n <mask token>\n", "step-2": "<mask token>\n\n\nclass MyThingsDashboardModule(DashboardModule):\n <mask token>\n title = 'My Things'\n template = 'dashboard/modules/mythings.html'\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n\n def is_empty(self):\n return not self.has_data\n", "step-3": "<mask token>\n\n\nclass MyThingsDashboardModule(DashboardModule):\n \"\"\"\n Controller dashboard module to provide an overview to\n the user of the nodes and slices of its groups.\n \"\"\"\n title = 'My Things'\n template = 'dashboard/modules/mythings.html'\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n\n def is_empty(self):\n return not self.has_data\n", "step-4": "from admin_tools.dashboard.modules import DashboardModule\nfrom nodes.models import Node\nfrom slices.models import Slice\n\n\nclass MyThingsDashboardModule(DashboardModule):\n \"\"\"\n Controller dashboard module to provide an overview to\n the user of the nodes and slices of its groups.\n \"\"\"\n title = 'My Things'\n template = 'dashboard/modules/mythings.html'\n\n def init_with_context(self, context):\n user = context['request'].user\n slices = Slice.objects.filter(group__in=user.groups.all().\n values_list('pk', flat=True))\n context['slices'] = slices\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state)\n .count())\n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n self.has_data = nodes or slices\n\n def is_empty(self):\n return not self.has_data\n", "step-5": "from admin_tools.dashboard.modules import DashboardModule\n\nfrom nodes.models import Node\nfrom slices.models import Slice\n\nclass MyThingsDashboardModule(DashboardModule):\n \"\"\"\n Controller dashboard module to provide an overview to\n the user of the nodes and slices of its groups.\n \"\"\"\n title=\"My Things\"\n template = \"dashboard/modules/mythings.html\"\n \n def init_with_context(self, context):\n user = context['request'].user\n \n # Get user slices\n slices = Slice.objects.filter(group__in=user.groups.all().values_list('pk', flat=True))\n context['slices'] = slices\n \n # Get user nodes\n nodes = {}\n nodes_states = ['offline', 'safe', 'production']\n for group in user.groups.all():\n nodes[group] = []\n qs_nodes = Node.objects.filter(group=group)\n for state in nodes_states:\n nodes[group].append(qs_nodes.filter(state_set__value=state).count())\n \n context['nodes_states'] = nodes_states\n context['user_nodes'] = nodes\n \n # initialize to calculate is_empty\n self.has_data = nodes or slices\n \n def is_empty(self):\n return not self.has_data\n", "step-ids": [ 2, 4, 5, 6, 7 ] }
[ 2, 4, 5, 6, 7 ]
#!/usr/bin/env python2 # coding=utf8 from __future__ import absolute_import, division, print_function from sqlalchemy import func from walis.model.walis import walis_session from walis.model.zeus import zeus_session, zeus_db_handler from walis.model.zeus.activity import ( SubsidyProcessRecord, SubsidyPayRecord, ActivityStats, ) from walis.model.walis.activity import PaymentNoticeRecord as NoticeRecord from walis.utils.time import get_today_begin_time, get_today_end_time MAX_LIST_SIZE = 1000 DEFAULT_LIST_SIZE = 200 def get_new_pay_records(process_at, limit=200): with zeus_session() as session: result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.restaurant_id, SubsidyProcessRecord.card_id, SubsidyProcessRecord.processed_at, SubsidyPayRecord.status). \ outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \ filter(SubsidyPayRecord.id > process_at). \ filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \ order_by(SubsidyPayRecord.id.asc()).limit(limit).all() return result def get_success_pay_records(record_ids): with zeus_session() as session: result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.restaurant_id, SubsidyProcessRecord.card_id, SubsidyProcessRecord.processed_at,). \ outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \ filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS). \ filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \ filter(SubsidyPayRecord.id.in_(record_ids)).all() return result def get_activity_stats(pay_record_id): with zeus_session() as session: results = session.query(ActivityStats.activity_id, ActivityStats.activity_category_id, func.sum(ActivityStats.total_subsidy), func.min(ActivityStats.date), func.max(ActivityStats.date), func.sum(ActivityStats.quantity), ).group_by( ActivityStats.restaurant_id, ActivityStats.activity_id, ActivityStats.activity_category_id). \ filter(ActivityStats.pay_record_id == pay_record_id). \ filter(ActivityStats.status == ActivityStats.STATUS_PAY_SUCCESS).all() return results def get_success_record_ids_by_restaurant( restaurant_id, activity_id=None, activity_category_id=None): with zeus_session() as session: query = session.query(SubsidyPayRecord.id). \ filter(SubsidyPayRecord.restaurant_id == restaurant_id). \ filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS) if activity_id is not None: query.filter(SubsidyPayRecord.activity_id == activity_id) if activity_category_id is not None: query.filter( SubsidyPayRecord.activity_category_id == activity_category_id) record_ids = query.all() return [r[0] for r in record_ids] PAYLOG_STATUS_LIST = { ActivityStats.STATUS_PAY_RECORD_GENERATED, ActivityStats.STATUS_PAY_SUCCESS, ActivityStats.STATUS_PAY_FAIL, } @zeus_db_handler def query_paylog_by_rst(restaurant_id, activity_id=None, activity_category_id=None, offset=None, limit=None): """ Except ActivityStats.STATUS_PENDING (未审核状态) """ q = session.query( ActivityStats.pay_record_id, ActivityStats.activity_id, ActivityStats.activity_category_id, ActivityStats.status, func.min(ActivityStats.date), func.max(ActivityStats.date), func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy), SubsidyPayRecord.created_at, func.max(SubsidyProcessRecord.id)). \ group_by(ActivityStats.pay_record_id, ActivityStats.activity_id, ActivityStats.activity_category_id). \ outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.pay_record_id). \ outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \ filter(ActivityStats.restaurant_id == restaurant_id).\ filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\ order_by(SubsidyPayRecord.created_at.desc()) if activity_id is not None: q = q.filter(ActivityStats.activity_id == activity_id) if activity_category_id is not None: q = q.filter(ActivityStats.activity_category_id == activity_category_id) if limit is not None: q = q.limit(min(limit, MAX_LIST_SIZE)) else: q = q.limit(DEFAULT_LIST_SIZE) if offset is not None: q = q.offset(offset) return q @zeus_db_handler def query_pay_records(restaurant_id, offset=None, limit=None): q = session.query(SubsidyPayRecord).\ filter(SubsidyPayRecord.restaurant_id == restaurant_id).\ order_by(SubsidyPayRecord.created_at.desc()) if limit is not None: q = q.limit(min(limit, MAX_LIST_SIZE)) else: q = q.limit(DEFAULT_LIST_SIZE) if offset is not None: q = q.offset(offset) return q.all() @zeus_db_handler def query_paylog(pay_record_ids, activity_id=None, activity_category_id=None, offset=None, limit=None): q = session.query( ActivityStats.pay_record_id, ActivityStats.activity_id, ActivityStats.activity_category_id, ActivityStats.status, func.min(ActivityStats.date), func.max(ActivityStats.date), func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)).\ group_by(ActivityStats.pay_record_id, ActivityStats.activity_id, ActivityStats.activity_category_id). \ filter(ActivityStats.pay_record_id.in_(pay_record_ids)).\ filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\ order_by(ActivityStats.created_at.desc()) if activity_id is not None: q = q.filter(ActivityStats.activity_id == activity_id) if activity_category_id is not None: q = q.filter(ActivityStats.activity_category_id == activity_category_id) if limit is not None: q = q.limit(min(limit, MAX_LIST_SIZE)) else: q = q.limit(DEFAULT_LIST_SIZE) if offset is not None: q = q.offset(offset) return q @zeus_db_handler def get_max_subsidy_process_record_ids(pay_record_ids): q = session.query(func.max(SubsidyProcessRecord.id)).\ group_by(SubsidyProcessRecord.pay_record_id).\ filter(SubsidyProcessRecord.pay_record_id.in_(pay_record_ids)) return q @zeus_db_handler def count_paylog_by_rst(restaurant_id, activity_id=None, activity_category_id=None): """ Except ActivityStats.STATUS_PENDING (未审核状态) """ q = session.query(ActivityStats.id). \ group_by(ActivityStats.pay_record_id, ActivityStats.activity_id, ActivityStats.activity_category_id). \ filter(ActivityStats.restaurant_id == restaurant_id).\ filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)) if activity_id is not None: q = q.filter(ActivityStats.activity_id == activity_id) if activity_category_id is not None: q = q.filter(ActivityStats.activity_category_id == activity_category_id) return len(q.all()) @zeus_db_handler def query_process_records_by_ids(process_ids): query = session.query(SubsidyProcessRecord).\ filter(SubsidyProcessRecord.id.in_(process_ids)) return query.all() @zeus_db_handler def get_subsidy_record_process_time(record_ids, status): return session.query( SubsidyProcessRecord.pay_record_id, SubsidyProcessRecord.processed_at).\ filter(SubsidyProcessRecord.pay_record_id.in_(record_ids)).\ filter(SubsidyProcessRecord.status == status).all() def get_pay_activities_by_restaurant(rst_id): with zeus_session() as session: query = session.query( ActivityStats.activity_id, ActivityStats.activity_category_id,). \ group_by(ActivityStats.activity_id, ActivityStats.activity_category_id). \ filter(ActivityStats.restaurant_id == rst_id) return query.all() # javis model begins def query_sms_send_info(start_time=None, end_time=None, phone=None, restaurant_id=None, card_num_tail=None, status=None): with walis_session() as session: query = session.query(NoticeRecord) if phone: query = query.filter(NoticeRecord.phone == phone) if restaurant_id: query = query.filter(NoticeRecord.restaurant_id == restaurant_id) if card_num_tail: query = query.filter(NoticeRecord.card_num_tail == card_num_tail) if status: query = query.filter(NoticeRecord.status == status) if not start_time: start_time = get_today_begin_time() if not end_time: end_time = get_today_end_time() query = query.filter(NoticeRecord.created_at > start_time).\ filter(NoticeRecord.created_at < end_time) return query.all() def query_sms_send_count(start_time=None, end_time=None, status=None): with walis_session() as session: if not start_time: start_time = get_today_begin_time() if not end_time: end_time = get_today_end_time() query = session.query(func.count(NoticeRecord.record_id)).\ filter(NoticeRecord.created_at > start_time).\ filter(NoticeRecord.created_at < end_time) if status is not None: query = query.filter(NoticeRecord.status == status) return query.scalar() @zeus_db_handler def query_auto_pay_activity_stats_result( city_ids=None, restaurant_ids=None, activity_id=None, activity_category_id=None, from_date=None, to_date=None, statuses=None, offset=None, limit=None, with_subsidy=None): q = session.query(ActivityStats.restaurant_id, ActivityStats.activity_id, ActivityStats.activity_category_id, func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy), func.min(ActivityStats.date), func.max(ActivityStats.date)).\ group_by(ActivityStats.restaurant_id, ActivityStats.activity_id, ActivityStats.activity_category_id).\ order_by(ActivityStats.restaurant_id.desc()) return _query_activity_stats( q, city_ids, restaurant_ids, activity_id, activity_category_id, from_date, to_date, statuses, with_subsidy, offset, limit) def _query_activity_stats( q, city_ids=None, restaurant_ids=None, activity_id=None, activity_category_id=None, from_date=None, to_date=None, statuses=None, with_subsidy=None, offset=None, limit=None): if activity_id is not None: q = q.filter(ActivityStats.activity_id == activity_id) if activity_category_id is not None: q = q.filter(ActivityStats.activity_category_id == activity_category_id) # noqa if city_ids is not None: q = q.filter(ActivityStats.city_id.in_(city_ids)) if restaurant_ids is not None: q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids)) if from_date is not None: q = q.filter(ActivityStats.date >= from_date) if to_date is not None: q = q.filter(ActivityStats.date <= to_date) if statuses is not None: q = q.filter(ActivityStats.status.in_(statuses)) if with_subsidy is not None: if with_subsidy: q = q.filter(ActivityStats.total_subsidy > 0) else: q = q.filter(ActivityStats.total_subsidy == 0) if offset is not None: q = q.offset(offset) q = q.limit(1000) return q
normal
{ "blob_id": "68d537cb8488ae4f2c8300e885be78540952dec0", "index": 450, "step-1": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\n<mask token>\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\n<mask token>\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n<mask token>\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n", "step-2": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\n<mask token>\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id).group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n restaurant_id == rst_id)\n return query.all()\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, func.sum(\n ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).order_by(ActivityStats.\n restaurant_id.desc())\n return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses, with_subsidy,\n offset, limit)\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n", "step-3": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\ndef get_success_pay_records(record_ids):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at).outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id).filter(\n SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS).filter(\n SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL\n ).filter(SubsidyPayRecord.id.in_(record_ids)).all()\n return result\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id).group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n restaurant_id == rst_id)\n return query.all()\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, func.sum(\n ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).order_by(ActivityStats.\n restaurant_id.desc())\n return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses, with_subsidy,\n offset, limit)\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n", "step-4": "<mask token>\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at, SubsidyPayRecord.status\n ).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord.\n pay_record_id == SubsidyPayRecord.id).filter(SubsidyPayRecord.\n id > process_at).filter(SubsidyProcessRecord.status !=\n SubsidyProcessRecord.STATUS_FAIL).order_by(SubsidyPayRecord.id.\n asc()).limit(limit).all()\n return result\n\n\ndef get_success_pay_records(record_ids):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id, SubsidyPayRecord.\n restaurant_id, SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at).outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id).filter(\n SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS).filter(\n SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL\n ).filter(SubsidyPayRecord.id.in_(record_ids)).all()\n return result\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id, func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity)).group_by(ActivityStats.\n restaurant_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.pay_record_id ==\n pay_record_id).filter(ActivityStats.status == ActivityStats.\n STATUS_PAY_SUCCESS).all()\n return results\n\n\ndef get_success_record_ids_by_restaurant(restaurant_id, activity_id=None,\n activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).filter(SubsidyPayRecord.status ==\n SubsidyPayRecord.STATUS_SUCCESS)\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n if activity_category_id is not None:\n query.filter(SubsidyPayRecord.activity_category_id ==\n activity_category_id)\n record_ids = query.all()\n return [r[0] for r in record_ids]\n\n\n<mask token>\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.\n total_subsidy), SubsidyPayRecord.created_at, func.max(\n SubsidyProcessRecord.id)).group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id, ActivityStats.activity_category_id\n ).outerjoin(SubsidyPayRecord, SubsidyPayRecord.id == ActivityStats.\n pay_record_id).outerjoin(SubsidyProcessRecord, SubsidyProcessRecord\n .pay_record_id == SubsidyPayRecord.id).filter(ActivityStats.\n restaurant_id == restaurant_id).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(SubsidyPayRecord.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).filter(SubsidyPayRecord.\n restaurant_id == restaurant_id).order_by(SubsidyPayRecord.\n created_at.desc())\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=\n None, offset=None, limit=None):\n q = session.query(ActivityStats.pay_record_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, ActivityStats.\n status, func.min(ActivityStats.date), func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), func.sum(ActivityStats.total_subsidy)\n ).group_by(ActivityStats.pay_record_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n pay_record_id.in_(pay_record_ids)).filter(ActivityStats.status.in_(\n PAYLOG_STATUS_LIST)).order_by(ActivityStats.created_at.desc())\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n if offset is not None:\n q = q.offset(offset)\n return q\n\n\n@zeus_db_handler\ndef get_max_subsidy_process_record_ids(pay_record_ids):\n q = session.query(func.max(SubsidyProcessRecord.id)).group_by(\n SubsidyProcessRecord.pay_record_id).filter(SubsidyProcessRecord.\n pay_record_id.in_(pay_record_ids))\n return q\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id).group_by(ActivityStats.\n pay_record_id, ActivityStats.activity_id, ActivityStats.\n activity_category_id).filter(ActivityStats.restaurant_id ==\n restaurant_id).filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n return len(q.all())\n\n\n@zeus_db_handler\ndef query_process_records_by_ids(process_ids):\n query = session.query(SubsidyProcessRecord).filter(SubsidyProcessRecord\n .id.in_(process_ids))\n return query.all()\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).filter(SubsidyProcessRecord.\n pay_record_id.in_(record_ids)).filter(SubsidyProcessRecord.status ==\n status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(ActivityStats.activity_id, ActivityStats.\n activity_category_id).group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id).filter(ActivityStats.\n restaurant_id == rst_id)\n return query.all()\n\n\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n with walis_session() as session:\n query = session.query(NoticeRecord)\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n if status:\n query = query.filter(NoticeRecord.status == status)\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = query.filter(NoticeRecord.created_at > start_time).filter(\n NoticeRecord.created_at < end_time)\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n if not start_time:\n start_time = get_today_begin_time()\n if not end_time:\n end_time = get_today_end_time()\n query = session.query(func.count(NoticeRecord.record_id)).filter(\n NoticeRecord.created_at > start_time).filter(NoticeRecord.\n created_at < end_time)\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id, ActivityStats.\n activity_id, ActivityStats.activity_category_id, func.sum(\n ActivityStats.quantity), func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date), func.max(ActivityStats.date)).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id).order_by(ActivityStats.\n restaurant_id.desc())\n return _query_activity_stats(q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses, with_subsidy,\n offset, limit)\n\n\ndef _query_activity_stats(q, city_ids=None, restaurant_ids=None,\n activity_id=None, activity_category_id=None, from_date=None, to_date=\n None, statuses=None, with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id\n )\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n if offset is not None:\n q = q.offset(offset)\n q = q.limit(1000)\n return q\n", "step-5": "#!/usr/bin/env python2\n# coding=utf8\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom sqlalchemy import func\n\nfrom walis.model.walis import walis_session\nfrom walis.model.zeus import zeus_session, zeus_db_handler\nfrom walis.model.zeus.activity import (\n SubsidyProcessRecord,\n SubsidyPayRecord,\n ActivityStats,\n)\nfrom walis.model.walis.activity import PaymentNoticeRecord as NoticeRecord\nfrom walis.utils.time import get_today_begin_time, get_today_end_time\n\n\nMAX_LIST_SIZE = 1000\nDEFAULT_LIST_SIZE = 200\n\n\ndef get_new_pay_records(process_at, limit=200):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id,\n SubsidyPayRecord.restaurant_id,\n SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at,\n SubsidyPayRecord.status). \\\n outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \\\n filter(SubsidyPayRecord.id > process_at). \\\n filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \\\n order_by(SubsidyPayRecord.id.asc()).limit(limit).all()\n\n return result\n\n\ndef get_success_pay_records(record_ids):\n with zeus_session() as session:\n result = session.query(SubsidyPayRecord.id,\n SubsidyPayRecord.restaurant_id,\n SubsidyProcessRecord.card_id,\n SubsidyProcessRecord.processed_at,). \\\n outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \\\n filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS). \\\n filter(SubsidyProcessRecord.status != SubsidyProcessRecord.STATUS_FAIL). \\\n filter(SubsidyPayRecord.id.in_(record_ids)).all()\n\n return result\n\n\ndef get_activity_stats(pay_record_id):\n with zeus_session() as session:\n results = session.query(ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date),\n func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity), ).group_by(\n ActivityStats.restaurant_id, ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.pay_record_id == pay_record_id). \\\n filter(ActivityStats.status == ActivityStats.STATUS_PAY_SUCCESS).all()\n\n return results\n\n\ndef get_success_record_ids_by_restaurant(\n restaurant_id, activity_id=None, activity_category_id=None):\n with zeus_session() as session:\n query = session.query(SubsidyPayRecord.id). \\\n filter(SubsidyPayRecord.restaurant_id == restaurant_id). \\\n filter(SubsidyPayRecord.status == SubsidyPayRecord.STATUS_SUCCESS)\n\n if activity_id is not None:\n query.filter(SubsidyPayRecord.activity_id == activity_id)\n\n if activity_category_id is not None:\n query.filter(\n SubsidyPayRecord.activity_category_id == activity_category_id)\n record_ids = query.all()\n\n return [r[0] for r in record_ids]\n\n\nPAYLOG_STATUS_LIST = {\n ActivityStats.STATUS_PAY_RECORD_GENERATED,\n ActivityStats.STATUS_PAY_SUCCESS,\n ActivityStats.STATUS_PAY_FAIL,\n}\n\n\n@zeus_db_handler\ndef query_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None, offset=None, limit=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(\n ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n ActivityStats.status,\n func.min(ActivityStats.date),\n func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity),\n func.sum(ActivityStats.total_subsidy),\n SubsidyPayRecord.created_at,\n func.max(SubsidyProcessRecord.id)). \\\n group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n outerjoin(SubsidyPayRecord,\n SubsidyPayRecord.id == ActivityStats.pay_record_id). \\\n outerjoin(SubsidyProcessRecord,\n SubsidyProcessRecord.pay_record_id == SubsidyPayRecord.id). \\\n filter(ActivityStats.restaurant_id == restaurant_id).\\\n filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\\\n order_by(SubsidyPayRecord.created_at.desc())\n\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id)\n\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n\n if offset is not None:\n q = q.offset(offset)\n\n return q\n\n\n@zeus_db_handler\ndef query_pay_records(restaurant_id, offset=None, limit=None):\n q = session.query(SubsidyPayRecord).\\\n filter(SubsidyPayRecord.restaurant_id == restaurant_id).\\\n order_by(SubsidyPayRecord.created_at.desc())\n\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n\n if offset is not None:\n q = q.offset(offset)\n\n return q.all()\n\n\n@zeus_db_handler\ndef query_paylog(pay_record_ids, activity_id=None, activity_category_id=None,\n offset=None, limit=None):\n q = session.query(\n ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n ActivityStats.status,\n func.min(ActivityStats.date),\n func.max(ActivityStats.date),\n func.sum(ActivityStats.quantity),\n func.sum(ActivityStats.total_subsidy)).\\\n group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.pay_record_id.in_(pay_record_ids)).\\\n filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST)).\\\n order_by(ActivityStats.created_at.desc())\n\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id)\n\n if limit is not None:\n q = q.limit(min(limit, MAX_LIST_SIZE))\n else:\n q = q.limit(DEFAULT_LIST_SIZE)\n\n if offset is not None:\n q = q.offset(offset)\n\n return q\n\n\n@zeus_db_handler\ndef get_max_subsidy_process_record_ids(pay_record_ids):\n q = session.query(func.max(SubsidyProcessRecord.id)).\\\n group_by(SubsidyProcessRecord.pay_record_id).\\\n filter(SubsidyProcessRecord.pay_record_id.in_(pay_record_ids))\n\n return q\n\n\n@zeus_db_handler\ndef count_paylog_by_rst(restaurant_id, activity_id=None,\n activity_category_id=None):\n \"\"\" Except ActivityStats.STATUS_PENDING (未审核状态)\n \"\"\"\n q = session.query(ActivityStats.id). \\\n group_by(ActivityStats.pay_record_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.restaurant_id == restaurant_id).\\\n filter(ActivityStats.status.in_(PAYLOG_STATUS_LIST))\n\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id)\n\n return len(q.all())\n\n\n@zeus_db_handler\ndef query_process_records_by_ids(process_ids):\n query = session.query(SubsidyProcessRecord).\\\n filter(SubsidyProcessRecord.id.in_(process_ids))\n return query.all()\n\n\n@zeus_db_handler\ndef get_subsidy_record_process_time(record_ids, status):\n return session.query(\n SubsidyProcessRecord.pay_record_id,\n SubsidyProcessRecord.processed_at).\\\n filter(SubsidyProcessRecord.pay_record_id.in_(record_ids)).\\\n filter(SubsidyProcessRecord.status == status).all()\n\n\ndef get_pay_activities_by_restaurant(rst_id):\n with zeus_session() as session:\n query = session.query(\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,). \\\n group_by(ActivityStats.activity_id,\n ActivityStats.activity_category_id). \\\n filter(ActivityStats.restaurant_id == rst_id)\n\n return query.all()\n\n\n# javis model begins\ndef query_sms_send_info(start_time=None, end_time=None, phone=None,\n restaurant_id=None, card_num_tail=None, status=None):\n\n with walis_session() as session:\n query = session.query(NoticeRecord)\n\n if phone:\n query = query.filter(NoticeRecord.phone == phone)\n\n if restaurant_id:\n query = query.filter(NoticeRecord.restaurant_id == restaurant_id)\n\n if card_num_tail:\n query = query.filter(NoticeRecord.card_num_tail == card_num_tail)\n\n if status:\n query = query.filter(NoticeRecord.status == status)\n\n if not start_time:\n start_time = get_today_begin_time()\n\n if not end_time:\n end_time = get_today_end_time()\n\n query = query.filter(NoticeRecord.created_at > start_time).\\\n filter(NoticeRecord.created_at < end_time)\n\n return query.all()\n\n\ndef query_sms_send_count(start_time=None, end_time=None, status=None):\n with walis_session() as session:\n\n if not start_time:\n start_time = get_today_begin_time()\n\n if not end_time:\n end_time = get_today_end_time()\n\n query = session.query(func.count(NoticeRecord.record_id)).\\\n filter(NoticeRecord.created_at > start_time).\\\n filter(NoticeRecord.created_at < end_time)\n\n if status is not None:\n query = query.filter(NoticeRecord.status == status)\n\n return query.scalar()\n\n\n@zeus_db_handler\ndef query_auto_pay_activity_stats_result(\n city_ids=None, restaurant_ids=None, activity_id=None,\n activity_category_id=None, from_date=None, to_date=None, statuses=None,\n offset=None, limit=None, with_subsidy=None):\n q = session.query(ActivityStats.restaurant_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id,\n func.sum(ActivityStats.quantity),\n func.sum(ActivityStats.total_subsidy),\n func.min(ActivityStats.date),\n func.max(ActivityStats.date)).\\\n group_by(ActivityStats.restaurant_id,\n ActivityStats.activity_id,\n ActivityStats.activity_category_id).\\\n order_by(ActivityStats.restaurant_id.desc())\n\n return _query_activity_stats(\n q, city_ids, restaurant_ids, activity_id,\n activity_category_id, from_date, to_date, statuses,\n with_subsidy, offset, limit)\n\n\ndef _query_activity_stats(\n q, city_ids=None, restaurant_ids=None, activity_id=None,\n activity_category_id=None, from_date=None, to_date=None, statuses=None,\n with_subsidy=None, offset=None, limit=None):\n if activity_id is not None:\n q = q.filter(ActivityStats.activity_id == activity_id)\n\n if activity_category_id is not None:\n q = q.filter(ActivityStats.activity_category_id == activity_category_id) # noqa\n\n if city_ids is not None:\n q = q.filter(ActivityStats.city_id.in_(city_ids))\n\n if restaurant_ids is not None:\n q = q.filter(ActivityStats.restaurant_id.in_(restaurant_ids))\n\n if from_date is not None:\n q = q.filter(ActivityStats.date >= from_date)\n\n if to_date is not None:\n q = q.filter(ActivityStats.date <= to_date)\n\n if statuses is not None:\n q = q.filter(ActivityStats.status.in_(statuses))\n\n if with_subsidy is not None:\n if with_subsidy:\n q = q.filter(ActivityStats.total_subsidy > 0)\n else:\n q = q.filter(ActivityStats.total_subsidy == 0)\n\n if offset is not None:\n q = q.offset(offset)\n\n q = q.limit(1000)\n\n return q\n", "step-ids": [ 11, 13, 14, 16, 19 ] }
[ 11, 13, 14, 16, 19 ]
from itertools import count, islice from math import sqrt def is_prime(x): if x<2: return False for i in range(2, int(sqrt(x)) + 1): if x%i == 0: return False return True def primes(x): return islice((p for p in count() if is_prime(p)), x) print(list(primes(1000))[-10:]) print(sum(primes(1000))) print(any([True, True])) print(any([True, False])) print(any([False, False])) # is there a TRUE print(all([True, True])) # are all of them TRUE print(all([True, False])) print(all([False, False])) print("Is there a prime between 1328 and 1361:", any(is_prime(x) for x in range(1328, 1361))) monday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11] tuesday = [x*2-10 for x in monday] print(monday, tuesday) for item in zip(monday, tuesday): print(item, type(item)) for d1, d2 in zip(monday, tuesday): print(f"Hourly average is {(d1 + d2)/2}°C") wednesday = [x*2-20 for x in tuesday] for temps in zip(monday, tuesday, wednesday): print(f"min={min(temps):4.1f}\t max={max(temps):4.1f}\t avg={sum(temps)/len(temps):4.1f}") from itertools import chain temperatures = chain(monday, tuesday, wednesday) print(monday, tuesday, wednesday) # concatenation print(list(temperatures)) # lazy concatenation from md_lucas import lucas from time import perf_counter as tc start = tc() for x in (p for p in lucas() if is_prime(p)): print(x, "time:", tc()-start)
normal
{ "blob_id": "0f1bad350faaff6aab339944b4d24c4801fa8c64", "index": 4965, "step-1": "<mask token>\n\n\ndef is_prime(x):\n if x < 2:\n return False\n for i in range(2, int(sqrt(x)) + 1):\n if x % i == 0:\n return False\n return True\n\n\ndef primes(x):\n return islice((p for p in count() if is_prime(p)), x)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef is_prime(x):\n if x < 2:\n return False\n for i in range(2, int(sqrt(x)) + 1):\n if x % i == 0:\n return False\n return True\n\n\ndef primes(x):\n return islice((p for p in count() if is_prime(p)), x)\n\n\nprint(list(primes(1000))[-10:])\nprint(sum(primes(1000)))\nprint(any([True, True]))\nprint(any([True, False]))\nprint(any([False, False]))\nprint(all([True, True]))\nprint(all([True, False]))\nprint(all([False, False]))\nprint('Is there a prime between 1328 and 1361:', any(is_prime(x) for x in\n range(1328, 1361)))\n<mask token>\nprint(monday, tuesday)\nfor item in zip(monday, tuesday):\n print(item, type(item))\nfor d1, d2 in zip(monday, tuesday):\n print(f'Hourly average is {(d1 + d2) / 2}°C')\n<mask token>\nfor temps in zip(monday, tuesday, wednesday):\n print(\n f'min={min(temps):4.1f}\\t max={max(temps):4.1f}\\t avg={sum(temps) / len(temps):4.1f}'\n )\n<mask token>\nprint(monday, tuesday, wednesday)\nprint(list(temperatures))\n<mask token>\nfor x in (p for p in lucas() if is_prime(p)):\n print(x, 'time:', tc() - start)\n", "step-3": "<mask token>\n\n\ndef is_prime(x):\n if x < 2:\n return False\n for i in range(2, int(sqrt(x)) + 1):\n if x % i == 0:\n return False\n return True\n\n\ndef primes(x):\n return islice((p for p in count() if is_prime(p)), x)\n\n\nprint(list(primes(1000))[-10:])\nprint(sum(primes(1000)))\nprint(any([True, True]))\nprint(any([True, False]))\nprint(any([False, False]))\nprint(all([True, True]))\nprint(all([True, False]))\nprint(all([False, False]))\nprint('Is there a prime between 1328 and 1361:', any(is_prime(x) for x in\n range(1328, 1361)))\nmonday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11]\ntuesday = [(x * 2 - 10) for x in monday]\nprint(monday, tuesday)\nfor item in zip(monday, tuesday):\n print(item, type(item))\nfor d1, d2 in zip(monday, tuesday):\n print(f'Hourly average is {(d1 + d2) / 2}°C')\nwednesday = [(x * 2 - 20) for x in tuesday]\nfor temps in zip(monday, tuesday, wednesday):\n print(\n f'min={min(temps):4.1f}\\t max={max(temps):4.1f}\\t avg={sum(temps) / len(temps):4.1f}'\n )\n<mask token>\ntemperatures = chain(monday, tuesday, wednesday)\nprint(monday, tuesday, wednesday)\nprint(list(temperatures))\n<mask token>\nstart = tc()\nfor x in (p for p in lucas() if is_prime(p)):\n print(x, 'time:', tc() - start)\n", "step-4": "from itertools import count, islice\nfrom math import sqrt\n\n\ndef is_prime(x):\n if x < 2:\n return False\n for i in range(2, int(sqrt(x)) + 1):\n if x % i == 0:\n return False\n return True\n\n\ndef primes(x):\n return islice((p for p in count() if is_prime(p)), x)\n\n\nprint(list(primes(1000))[-10:])\nprint(sum(primes(1000)))\nprint(any([True, True]))\nprint(any([True, False]))\nprint(any([False, False]))\nprint(all([True, True]))\nprint(all([True, False]))\nprint(all([False, False]))\nprint('Is there a prime between 1328 and 1361:', any(is_prime(x) for x in\n range(1328, 1361)))\nmonday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11]\ntuesday = [(x * 2 - 10) for x in monday]\nprint(monday, tuesday)\nfor item in zip(monday, tuesday):\n print(item, type(item))\nfor d1, d2 in zip(monday, tuesday):\n print(f'Hourly average is {(d1 + d2) / 2}°C')\nwednesday = [(x * 2 - 20) for x in tuesday]\nfor temps in zip(monday, tuesday, wednesday):\n print(\n f'min={min(temps):4.1f}\\t max={max(temps):4.1f}\\t avg={sum(temps) / len(temps):4.1f}'\n )\nfrom itertools import chain\ntemperatures = chain(monday, tuesday, wednesday)\nprint(monday, tuesday, wednesday)\nprint(list(temperatures))\nfrom md_lucas import lucas\nfrom time import perf_counter as tc\nstart = tc()\nfor x in (p for p in lucas() if is_prime(p)):\n print(x, 'time:', tc() - start)\n", "step-5": "from itertools import count, islice\nfrom math import sqrt\n\ndef is_prime(x):\n if x<2:\n return False\n for i in range(2, int(sqrt(x)) + 1):\n if x%i == 0:\n return False\n return True\n\ndef primes(x):\n return islice((p for p in count() if is_prime(p)), x)\n\nprint(list(primes(1000))[-10:])\n\nprint(sum(primes(1000)))\n\nprint(any([True, True]))\nprint(any([True, False]))\nprint(any([False, False])) # is there a TRUE\nprint(all([True, True])) # are all of them TRUE\nprint(all([True, False]))\nprint(all([False, False]))\n\nprint(\"Is there a prime between 1328 and 1361:\", any(is_prime(x) for x in range(1328, 1361)))\n\nmonday = [11, 12, 13, 14, 15, 16, 17, 17, 17, 16, 16, 15, 14, 13, 12, 11, 11]\ntuesday = [x*2-10 for x in monday]\nprint(monday, tuesday)\nfor item in zip(monday, tuesday):\n print(item, type(item))\n\nfor d1, d2 in zip(monday, tuesday):\n print(f\"Hourly average is {(d1 + d2)/2}°C\")\n\nwednesday = [x*2-20 for x in tuesday]\n\nfor temps in zip(monday, tuesday, wednesday):\n print(f\"min={min(temps):4.1f}\\t max={max(temps):4.1f}\\t avg={sum(temps)/len(temps):4.1f}\")\n\nfrom itertools import chain\ntemperatures = chain(monday, tuesday, wednesday)\nprint(monday, tuesday, wednesday) # concatenation\nprint(list(temperatures)) # lazy concatenation\n\nfrom md_lucas import lucas\nfrom time import perf_counter as tc\nstart = tc()\nfor x in (p for p in lucas() if is_prime(p)):\n print(x, \"time:\", tc()-start)", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import requests from bs4 import BeautifulSoup class Book: def __init__(self, url): self.url = url self.title = "" self.category = "" self.upc="" self.price_including_tax="" self.price_excluding_tax="" self.number_available="" self.description="" self.review_rating="" self.image_url="" self.tax="" def scrap(self): book = requests.get(self.url) soup = BeautifulSoup(book.content, "html.parser") self.__fill_title(soup) self.__fill_category(soup) self.__fill_upc(soup) self.__fill_price_including_tax(soup) self.__fill_price_excluding_tax(soup) self.__fill_number_available(soup) self.__fill_description(soup) self.__fill_review_rating(soup) self.__fill_image_url(soup) self.__fill_tax(soup) def __fill_title(self,soup): title = soup.find("div", {"class": "col-sm-6 product_main"}).find("h1") self.title= title.text # return self.title def __fill_category(self,soup): category = soup.findAll("li") category2 = category[2].text self.category = category2.replace("\n", "") # return self.category def __fill_upc(self,soup): tds = soup.findAll("td") self.upc = tds[0].text def __fill_price_including_tax(self,soup): tds = soup.findAll("td") self.price_including_tax = tds[3].text def __fill_price_excluding_tax(self,soup): tds = soup.findAll("td") self.price_excluding_tax = tds[2].text def __fill_number_available(self,soup): tds = soup.findAll("td") self.number_available = tds[5].text def __fill_description(self,soup): div = soup.find("div", class_="sub-header") p = div.find_next_sibling() self.description = p.text # return self.description def __fill_review_rating(self,soup): p = soup.find("div", {"class": "col-sm-6 product_main"}).find( "p", class_="star-rating" ) rating = str(p["class"]) star = rating[15:-1] star_rating = eval(star) return star_rating def __fill_image_url(self,soup): image = soup.find("div", {"class": "item active"}).find("img") image_url = image["src"] image_clean_url = image_url.replace("../../", "http://books.toscrape.com/") self.image_url = image_clean_url def __fill_tax(self,soup): tds = soup.findAll("td") self.tax = tds[4].text def __str__(self): output = f"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} " return output # book = Book("http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html") # book.scrap("http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html") # print(book)
normal
{ "blob_id": "3dc83168264fbb4f9b0ab2980b845dffdc4417bb", "index": 7588, "step-1": "<mask token>\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n <mask token>\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n <mask token>\n <mask token>\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n", "step-2": "<mask token>\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n <mask token>\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n <mask token>\n\n def __fill_number_available(self, soup):\n tds = soup.findAll('td')\n self.number_available = tds[5].text\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n", "step-3": "<mask token>\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n <mask token>\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n\n def __fill_price_excluding_tax(self, soup):\n tds = soup.findAll('td')\n self.price_excluding_tax = tds[2].text\n\n def __fill_number_available(self, soup):\n tds = soup.findAll('td')\n self.number_available = tds[5].text\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n", "step-4": "import requests\nfrom bs4 import BeautifulSoup\n\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = ''\n self.category = ''\n self.upc = ''\n self.price_including_tax = ''\n self.price_excluding_tax = ''\n self.number_available = ''\n self.description = ''\n self.review_rating = ''\n self.image_url = ''\n self.tax = ''\n\n def scrap(self):\n book = requests.get(self.url)\n soup = BeautifulSoup(book.content, 'html.parser')\n self.__fill_title(soup)\n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n\n def __fill_title(self, soup):\n title = soup.find('div', {'class': 'col-sm-6 product_main'}).find('h1')\n self.title = title.text\n\n def __fill_category(self, soup):\n category = soup.findAll('li')\n category2 = category[2].text\n self.category = category2.replace('\\n', '')\n\n def __fill_upc(self, soup):\n tds = soup.findAll('td')\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self, soup):\n tds = soup.findAll('td')\n self.price_including_tax = tds[3].text\n\n def __fill_price_excluding_tax(self, soup):\n tds = soup.findAll('td')\n self.price_excluding_tax = tds[2].text\n\n def __fill_number_available(self, soup):\n tds = soup.findAll('td')\n self.number_available = tds[5].text\n\n def __fill_description(self, soup):\n div = soup.find('div', class_='sub-header')\n p = div.find_next_sibling()\n self.description = p.text\n\n def __fill_review_rating(self, soup):\n p = soup.find('div', {'class': 'col-sm-6 product_main'}).find('p',\n class_='star-rating')\n rating = str(p['class'])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n\n def __fill_image_url(self, soup):\n image = soup.find('div', {'class': 'item active'}).find('img')\n image_url = image['src']\n image_clean_url = image_url.replace('../../',\n 'http://books.toscrape.com/')\n self.image_url = image_clean_url\n\n def __fill_tax(self, soup):\n tds = soup.findAll('td')\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"\"\"url : {self.url} \ntitle : {self.title} \ncategory : {self.category} \nupc : {self.upc} \nprice_including_tax : {self.price_including_tax} \nprice_excluding_tax : {self.price_excluding_tax} \nnumber_available : {self.number_available} \ndescription : {self.description} \nreview_rating : {self.review_rating} \nimage_url : {self.image_url} \ntax : {self.tax} \"\"\"\n return output\n", "step-5": "import requests\nfrom bs4 import BeautifulSoup\n\nclass Book:\n\n def __init__(self, url):\n self.url = url\n self.title = \"\"\n self.category = \"\"\n self.upc=\"\"\n self.price_including_tax=\"\"\n self.price_excluding_tax=\"\"\n self.number_available=\"\"\n self.description=\"\"\n self.review_rating=\"\"\n self.image_url=\"\"\n self.tax=\"\"\n \n def scrap(self): \n book = requests.get(self.url) \n soup = BeautifulSoup(book.content, \"html.parser\")\n self.__fill_title(soup) \n self.__fill_category(soup)\n self.__fill_upc(soup)\n self.__fill_price_including_tax(soup)\n self.__fill_price_excluding_tax(soup)\n self.__fill_number_available(soup)\n self.__fill_description(soup)\n self.__fill_review_rating(soup)\n self.__fill_image_url(soup)\n self.__fill_tax(soup)\n \n def __fill_title(self,soup): \n title = soup.find(\"div\", {\"class\": \"col-sm-6 product_main\"}).find(\"h1\")\n self.title= title.text\n # return self.title\n \n \n def __fill_category(self,soup):\n category = soup.findAll(\"li\")\n category2 = category[2].text\n self.category = category2.replace(\"\\n\", \"\")\n # return self.category\n\n def __fill_upc(self,soup):\n tds = soup.findAll(\"td\")\n self.upc = tds[0].text\n\n def __fill_price_including_tax(self,soup):\n tds = soup.findAll(\"td\")\n self.price_including_tax = tds[3].text\n\n def __fill_price_excluding_tax(self,soup):\n tds = soup.findAll(\"td\")\n self.price_excluding_tax = tds[2].text\n\n def __fill_number_available(self,soup):\n tds = soup.findAll(\"td\")\n self.number_available = tds[5].text\n\n def __fill_description(self,soup):\n div = soup.find(\"div\", class_=\"sub-header\")\n p = div.find_next_sibling()\n self.description = p.text\n # return self.description\n\n def __fill_review_rating(self,soup):\n p = soup.find(\"div\", {\"class\": \"col-sm-6 product_main\"}).find(\n \"p\", class_=\"star-rating\"\n )\n rating = str(p[\"class\"])\n star = rating[15:-1]\n star_rating = eval(star)\n return star_rating\n \n\n def __fill_image_url(self,soup):\n image = soup.find(\"div\", {\"class\": \"item active\"}).find(\"img\")\n image_url = image[\"src\"]\n image_clean_url = image_url.replace(\"../../\", \"http://books.toscrape.com/\")\n self.image_url = image_clean_url\n\n def __fill_tax(self,soup):\n tds = soup.findAll(\"td\")\n self.tax = tds[4].text\n\n def __str__(self):\n output = f\"url : {self.url} \\ntitle : {self.title} \\ncategory : {self.category} \\nupc : {self.upc} \\nprice_including_tax : {self.price_including_tax} \\nprice_excluding_tax : {self.price_excluding_tax} \\nnumber_available : {self.number_available} \\ndescription : {self.description} \\nreview_rating : {self.review_rating} \\nimage_url : {self.image_url} \\ntax : {self.tax} \"\n return output\n \n \n\n \n# book = Book(\"http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html\")\n# book.scrap(\"http://books.toscrape.com/catalogue/a-light-in-the-attic_1000/index.html\")\n# print(book)\n", "step-ids": [ 11, 12, 13, 15, 16 ] }
[ 11, 12, 13, 15, 16 ]
from django.contrib.auth.decorators import login_required from django.contrib.auth import login, authenticate from django.shortcuts import render, redirect from mysite.core.forms import SignUpForm,UserProfileForm from django.views.generic import UpdateView from .models import Profile from django.contrib.auth.mixins import LoginRequiredMixin @login_required def home(request): return render(request, 'home.html') def signup(request): if request.method == 'POST': form = SignUpForm(request.POST) if form.is_valid(): user = form.save() user.refresh_from_db() # load the profile instance created by the signal user.profile.birth_date = form.cleaned_data.get('birth_date') user.save() raw_password = form.cleaned_data.get('password1') # user = authenticate(username=user.username, password=raw_password) # login(request, user) return redirect('login') else: form = SignUpForm() return render(request, 'signup.html', {'form': form}) class EditUserProfileView(LoginRequiredMixin,UpdateView): model = Profile form_class = UserProfileForm template_name = "profile.html"
normal
{ "blob_id": "21d261dec6668a24030f37b7dcb87c0132e63528", "index": 1365, "step-1": "<mask token>\n\n\nclass EditUserProfileView(LoginRequiredMixin, UpdateView):\n model = Profile\n form_class = UserProfileForm\n template_name = 'profile.html'\n", "step-2": "<mask token>\n\n\n@login_required\ndef home(request):\n return render(request, 'home.html')\n\n\n<mask token>\n\n\nclass EditUserProfileView(LoginRequiredMixin, UpdateView):\n model = Profile\n form_class = UserProfileForm\n template_name = 'profile.html'\n", "step-3": "<mask token>\n\n\n@login_required\ndef home(request):\n return render(request, 'home.html')\n\n\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db()\n user.profile.birth_date = form.cleaned_data.get('birth_date')\n user.save()\n raw_password = form.cleaned_data.get('password1')\n return redirect('login')\n else:\n form = SignUpForm()\n return render(request, 'signup.html', {'form': form})\n\n\nclass EditUserProfileView(LoginRequiredMixin, UpdateView):\n model = Profile\n form_class = UserProfileForm\n template_name = 'profile.html'\n", "step-4": "from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import login, authenticate\nfrom django.shortcuts import render, redirect\nfrom mysite.core.forms import SignUpForm, UserProfileForm\nfrom django.views.generic import UpdateView\nfrom .models import Profile\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\n\n@login_required\ndef home(request):\n return render(request, 'home.html')\n\n\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db()\n user.profile.birth_date = form.cleaned_data.get('birth_date')\n user.save()\n raw_password = form.cleaned_data.get('password1')\n return redirect('login')\n else:\n form = SignUpForm()\n return render(request, 'signup.html', {'form': form})\n\n\nclass EditUserProfileView(LoginRequiredMixin, UpdateView):\n model = Profile\n form_class = UserProfileForm\n template_name = 'profile.html'\n", "step-5": "from django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import login, authenticate\nfrom django.shortcuts import render, redirect\n\nfrom mysite.core.forms import SignUpForm,UserProfileForm\nfrom django.views.generic import UpdateView\nfrom .models import Profile\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\n@login_required\ndef home(request):\n return render(request, 'home.html')\n\n\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db() # load the profile instance created by the signal\n user.profile.birth_date = form.cleaned_data.get('birth_date')\n user.save()\n raw_password = form.cleaned_data.get('password1')\n # user = authenticate(username=user.username, password=raw_password)\n # login(request, user)\n return redirect('login')\n else:\n form = SignUpForm()\n return render(request, 'signup.html', {'form': form})\n\n\nclass EditUserProfileView(LoginRequiredMixin,UpdateView):\n model = Profile\n\n form_class = UserProfileForm\n template_name = \"profile.html\"\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
from accessor import * from order import Order from copy import deepcopy import pandas as pd import numpy as np import util class Broker: def __init__(self, equity): self.execute = Execute(equity) # Execute def make_order(self, unit, limit_price, stop_loss, stop_profit): order_queue.append(Order(unit, limit_price, stop_loss, stop_profit)) def check_order(self, ohlc, date, commission): """ check the order and set the information to order by different condition """ op = ohlc[0] for o in order_queue: if position() != 0 and position() + o.units != 0 and len(order_queue) == 1: o.is_parents = False if o.limit_price: trading_price = o.limit_price else: trading_price = op setattr(o, 'trading_price', trading_price) setattr(o, 'trading_date', date) if o.is_long: if 1 > o.units > 0: size = int((self.execute.equity * o.units) / trading_price) setattr(o, 'units', size) if o.stop_loss: stop_loss_price = o.trading_price * (1 - o.stop_loss) setattr(o, 'stop_loss_prices', stop_loss_price) if o.stop_profit: stop_profit_price = o.trading_price * (1 + o.stop_profit) setattr(o, 'stop_profit_prices', stop_profit_price) if not o.is_parents: add_position_long_order.append(o) elif o.is_short: if -1 < o.units < 0: size = int((self.execute.equity * o.units) / trading_price) setattr(o, 'units', size) if o.stop_loss: stop_loss_price = o.trading_price * (1 + o.stop_loss) setattr(o, 'stop_loss_prices', stop_loss_price) if o.stop_profit: stop_profit_price = o.trading_price * (1 - o.stop_profit) setattr(o, 'stop_profit_prices', stop_profit_price) if not o.is_parents: add_position_short_order.append(o) order_execute.append(o) self.work(ohlc, date=date, commission=commission) order_queue.clear() self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission) def check_if_sl_or_sp(self, ohlc, date, commission): for t in order_execute: origin_o = deepcopy(t).is_parents if util.touch_stop_loss(order=t, price=ohlc[3], date=date) : t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices, trading_date=date, _is_fill=False, _is_parent=False, stop_loss=None) elif util.touch_stop_profit(order=t, price=ohlc[3], date=date): t.replace(_unit=-t.units, _trading_price=t.stop_profit_prices, trading_date=date, _is_fill=False, _is_parent=False, stop_loss=None) if not origin_o: order_execute.remove(t) self.work(ohlc, date=date, commission=commission) def work(self, price, date, commission): self.execute.trading(price, date, commission) def liquidation(self, pos, price, date, commission): """ clean the last position """ o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=None, is_fill=False) setattr(o, 'trading_price', price[0]) setattr(o, 'trading_date', date) order_execute.append(o) self.work(price=price, date=date, commission=commission) def get_log(self): log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits': buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date, 'SellPrice': sell_price, 'SellUnits': sell_unit, 'CashReceiving': amnt_receiving} log = pd.DataFrame(log_dict) for i in list(log_dict.values()): i.clear() return log class Execute: def __init__(self, equity): self.__equity = equity def trading(self, price, date, commission): c = price[3] for t in order_execute: if not t.is_filled: position_list.append(t.units) if t.is_short and add_position_long_order and t.is_parents: self.split_add_pos_order(t, add_position_long_order, commission) elif t.is_long and add_position_short_order and t.is_parents: self.split_add_pos_order(t, add_position_short_order, commission) else: self.fill(t, commission) # if self._touch_stop_loss(order=t, price=c): # origin_o = deepcopy(t).is_parents # t.replace(units=-t.units, trading_prices=t.stop_loss_price, trading_date=date, is_filled=False, # is_parent=False, stop_loss=None) # if not origin_o: # order_execute.remove(t) if position() == 0 and t in order_execute: del order_execute[: order_execute.index(t) + 1] def fill(self, t, commission): adj_price = util.adjust_price(trade=t, commission=commission) if t.is_long: assert self.__equity >= adj_price * t.units, 'Your money is empty' buy_price.append(t.trading_price) buy_date.append(t.trading_date) buy_unit.append(t.units) amnt_paying.append(adj_price * t.units) self.__equity -= t.units * adj_price setattr(t, 'is_filled', True) elif t.is_short: sell_price.append(t.trading_price) sell_date.append(t.trading_date) sell_unit.append(t.units) amnt_receiving.append(abs(t.units) * adj_price) self.__equity += abs(t.units) * adj_price setattr(t, 'is_filled', True) def split_add_pos_order(self, trade_order, add_position_order: list, commission): """ split the order which include overweight order into a list of single order and fill them e.g. a sell order [with 6 units has an parent order and an overweight order] becomes [an parent order with -4 units , an order with -2 units] """ temp_order_list = [] origin_trader_order_sign = np.sign(trade_order.units) if trade_order.is_short: parents_unit = trade_order.units + sum(abs(_o.units) for _o in add_position_order) else: parents_unit = trade_order.units - sum(abs(_o.units) for _o in add_position_order) trade_order.units = parents_unit if trade_order.units != 0: temp_order_list.append(trade_order) for _t in add_position_order: if np.sign(_t.units) == origin_trader_order_sign: temp_order_list.append(_t) else: ct = deepcopy(_t) ct.units = -_t.units ct.trading_date = trade_order.trading_date ct.trading_prices = trade_order.trading_price temp_order_list.append(ct) for temp_o in temp_order_list: self.fill(temp_o, commission) add_position_order.clear() @property def equity(self): return self.__equity def position(): return sum(size for size in position_list)
normal
{ "blob_id": "ca0aedcfb997299240870649823fb872e0d9f99a", "index": 6023, "step-1": "<mask token>\n\n\nclass Broker:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\nclass Broker:\n\n def __init__(self, equity):\n self.execute = Execute(equity)\n <mask token>\n\n def check_order(self, ohlc, date, commission):\n \"\"\"\n check the order and set the information to order by different condition\n \"\"\"\n op = ohlc[0]\n for o in order_queue:\n if position() != 0 and position() + o.units != 0 and len(\n order_queue) == 1:\n o.is_parents = False\n if o.limit_price:\n trading_price = o.limit_price\n else:\n trading_price = op\n setattr(o, 'trading_price', trading_price)\n setattr(o, 'trading_date', date)\n if o.is_long:\n if 1 > o.units > 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_long_order.append(o)\n elif o.is_short:\n if -1 < o.units < 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_short_order.append(o)\n order_execute.append(o)\n self.work(ohlc, date=date, commission=commission)\n order_queue.clear()\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\n <mask token>\n <mask token>\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\nclass Broker:\n\n def __init__(self, equity):\n self.execute = Execute(equity)\n <mask token>\n\n def check_order(self, ohlc, date, commission):\n \"\"\"\n check the order and set the information to order by different condition\n \"\"\"\n op = ohlc[0]\n for o in order_queue:\n if position() != 0 and position() + o.units != 0 and len(\n order_queue) == 1:\n o.is_parents = False\n if o.limit_price:\n trading_price = o.limit_price\n else:\n trading_price = op\n setattr(o, 'trading_price', trading_price)\n setattr(o, 'trading_date', date)\n if o.is_long:\n if 1 > o.units > 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_long_order.append(o)\n elif o.is_short:\n if -1 < o.units < 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_short_order.append(o)\n order_execute.append(o)\n self.work(ohlc, date=date, commission=commission)\n order_queue.clear()\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\n\n def check_if_sl_or_sp(self, ohlc, date, commission):\n for t in order_execute:\n origin_o = deepcopy(t).is_parents\n if util.touch_stop_loss(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices,\n trading_date=date, _is_fill=False, _is_parent=False,\n stop_loss=None)\n elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.\n stop_profit_prices, trading_date=date, _is_fill=False,\n _is_parent=False, stop_loss=None)\n if not origin_o:\n order_execute.remove(t)\n self.work(ohlc, date=date, commission=commission)\n <mask token>\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\n<mask token>\n", "step-4": "from accessor import *\nfrom order import Order\nfrom copy import deepcopy\nimport pandas as pd\nimport numpy as np\nimport util\n\n\nclass Broker:\n\n def __init__(self, equity):\n self.execute = Execute(equity)\n\n def make_order(self, unit, limit_price, stop_loss, stop_profit):\n order_queue.append(Order(unit, limit_price, stop_loss, stop_profit))\n\n def check_order(self, ohlc, date, commission):\n \"\"\"\n check the order and set the information to order by different condition\n \"\"\"\n op = ohlc[0]\n for o in order_queue:\n if position() != 0 and position() + o.units != 0 and len(\n order_queue) == 1:\n o.is_parents = False\n if o.limit_price:\n trading_price = o.limit_price\n else:\n trading_price = op\n setattr(o, 'trading_price', trading_price)\n setattr(o, 'trading_date', date)\n if o.is_long:\n if 1 > o.units > 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_long_order.append(o)\n elif o.is_short:\n if -1 < o.units < 0:\n size = int(self.execute.equity * o.units / trading_price)\n setattr(o, 'units', size)\n if o.stop_loss:\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\n setattr(o, 'stop_loss_prices', stop_loss_price)\n if o.stop_profit:\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\n setattr(o, 'stop_profit_prices', stop_profit_price)\n if not o.is_parents:\n add_position_short_order.append(o)\n order_execute.append(o)\n self.work(ohlc, date=date, commission=commission)\n order_queue.clear()\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\n\n def check_if_sl_or_sp(self, ohlc, date, commission):\n for t in order_execute:\n origin_o = deepcopy(t).is_parents\n if util.touch_stop_loss(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices,\n trading_date=date, _is_fill=False, _is_parent=False,\n stop_loss=None)\n elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):\n t.replace(_unit=-t.units, _trading_price=t.\n stop_profit_prices, trading_date=date, _is_fill=False,\n _is_parent=False, stop_loss=None)\n if not origin_o:\n order_execute.remove(t)\n self.work(ohlc, date=date, commission=commission)\n\n def work(self, price, date, commission):\n self.execute.trading(price, date, commission)\n\n def liquidation(self, pos, price, date, commission):\n \"\"\"\n clean the last position\n \"\"\"\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=\n None, is_fill=False)\n setattr(o, 'trading_price', price[0])\n setattr(o, 'trading_date', date)\n order_execute.append(o)\n self.work(price=price, date=date, commission=commission)\n\n def get_log(self):\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits':\n buy_unit, 'CashPaying': amnt_paying, 'SellDate': sell_date,\n 'SellPrice': sell_price, 'SellUnits': sell_unit,\n 'CashReceiving': amnt_receiving}\n log = pd.DataFrame(log_dict)\n for i in list(log_dict.values()):\n i.clear()\n return log\n\n\nclass Execute:\n\n def __init__(self, equity):\n self.__equity = equity\n\n def trading(self, price, date, commission):\n c = price[3]\n for t in order_execute:\n if not t.is_filled:\n position_list.append(t.units)\n if t.is_short and add_position_long_order and t.is_parents:\n self.split_add_pos_order(t, add_position_long_order,\n commission)\n elif t.is_long and add_position_short_order and t.is_parents:\n self.split_add_pos_order(t, add_position_short_order,\n commission)\n else:\n self.fill(t, commission)\n if position() == 0 and t in order_execute:\n del order_execute[:order_execute.index(t) + 1]\n\n def fill(self, t, commission):\n adj_price = util.adjust_price(trade=t, commission=commission)\n if t.is_long:\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\n buy_price.append(t.trading_price)\n buy_date.append(t.trading_date)\n buy_unit.append(t.units)\n amnt_paying.append(adj_price * t.units)\n self.__equity -= t.units * adj_price\n setattr(t, 'is_filled', True)\n elif t.is_short:\n sell_price.append(t.trading_price)\n sell_date.append(t.trading_date)\n sell_unit.append(t.units)\n amnt_receiving.append(abs(t.units) * adj_price)\n self.__equity += abs(t.units) * adj_price\n setattr(t, 'is_filled', True)\n\n def split_add_pos_order(self, trade_order, add_position_order: list,\n commission):\n \"\"\"\n split the order which include overweight order into a list of single order and fill them\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\n [an parent order with -4 units , an order with -2 units]\n \"\"\"\n temp_order_list = []\n origin_trader_order_sign = np.sign(trade_order.units)\n if trade_order.is_short:\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in\n add_position_order)\n else:\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in\n add_position_order)\n trade_order.units = parents_unit\n if trade_order.units != 0:\n temp_order_list.append(trade_order)\n for _t in add_position_order:\n if np.sign(_t.units) == origin_trader_order_sign:\n temp_order_list.append(_t)\n else:\n ct = deepcopy(_t)\n ct.units = -_t.units\n ct.trading_date = trade_order.trading_date\n ct.trading_prices = trade_order.trading_price\n temp_order_list.append(ct)\n for temp_o in temp_order_list:\n self.fill(temp_o, commission)\n add_position_order.clear()\n\n @property\n def equity(self):\n return self.__equity\n\n\ndef position():\n return sum(size for size in position_list)\n", "step-5": "from accessor import *\r\nfrom order import Order\r\nfrom copy import deepcopy\r\nimport pandas as pd\r\nimport numpy as np\r\nimport util\r\n\r\n\r\nclass Broker:\r\n def __init__(self, equity):\r\n\r\n self.execute = Execute(equity) # Execute\r\n\r\n def make_order(self, unit, limit_price, stop_loss, stop_profit):\r\n\r\n order_queue.append(Order(unit, limit_price, stop_loss, stop_profit))\r\n\r\n def check_order(self, ohlc, date, commission):\r\n \"\"\"\r\n check the order and set the information to order by different condition\r\n \"\"\"\r\n\r\n op = ohlc[0]\r\n\r\n for o in order_queue:\r\n if position() != 0 and position() + o.units != 0 and len(order_queue) == 1:\r\n o.is_parents = False\r\n\r\n if o.limit_price:\r\n trading_price = o.limit_price\r\n\r\n else:\r\n trading_price = op\r\n\r\n setattr(o, 'trading_price', trading_price)\r\n setattr(o, 'trading_date', date)\r\n\r\n if o.is_long:\r\n if 1 > o.units > 0:\r\n\r\n size = int((self.execute.equity * o.units) / trading_price)\r\n setattr(o, 'units', size)\r\n\r\n if o.stop_loss:\r\n stop_loss_price = o.trading_price * (1 - o.stop_loss)\r\n setattr(o, 'stop_loss_prices', stop_loss_price)\r\n\r\n if o.stop_profit:\r\n stop_profit_price = o.trading_price * (1 + o.stop_profit)\r\n setattr(o, 'stop_profit_prices', stop_profit_price)\r\n\r\n if not o.is_parents:\r\n add_position_long_order.append(o)\r\n\r\n elif o.is_short:\r\n\r\n if -1 < o.units < 0:\r\n size = int((self.execute.equity * o.units) / trading_price)\r\n\r\n setattr(o, 'units', size)\r\n\r\n if o.stop_loss:\r\n stop_loss_price = o.trading_price * (1 + o.stop_loss)\r\n setattr(o, 'stop_loss_prices', stop_loss_price)\r\n\r\n if o.stop_profit:\r\n stop_profit_price = o.trading_price * (1 - o.stop_profit)\r\n setattr(o, 'stop_profit_prices', stop_profit_price)\r\n\r\n if not o.is_parents:\r\n add_position_short_order.append(o)\r\n\r\n order_execute.append(o)\r\n self.work(ohlc, date=date, commission=commission)\r\n\r\n order_queue.clear()\r\n\r\n self.check_if_sl_or_sp(ohlc=ohlc, date=date, commission=commission)\r\n\r\n def check_if_sl_or_sp(self, ohlc, date, commission):\r\n for t in order_execute:\r\n origin_o = deepcopy(t).is_parents\r\n if util.touch_stop_loss(order=t, price=ohlc[3], date=date) :\r\n\r\n t.replace(_unit=-t.units, _trading_price=t.stop_loss_prices, trading_date=date, _is_fill=False,\r\n _is_parent=False, stop_loss=None)\r\n\r\n elif util.touch_stop_profit(order=t, price=ohlc[3], date=date):\r\n t.replace(_unit=-t.units, _trading_price=t.stop_profit_prices, trading_date=date, _is_fill=False,\r\n _is_parent=False, stop_loss=None)\r\n\r\n if not origin_o:\r\n order_execute.remove(t)\r\n\r\n self.work(ohlc, date=date, commission=commission)\r\n\r\n def work(self, price, date, commission):\r\n\r\n self.execute.trading(price, date, commission)\r\n\r\n def liquidation(self, pos, price, date, commission):\r\n \"\"\"\r\n clean the last position\r\n \"\"\"\r\n o = Order(-1 * pos, limit_price=None, stop_loss=None, stop_profit=None, is_fill=False)\r\n setattr(o, 'trading_price', price[0])\r\n setattr(o, 'trading_date', date)\r\n order_execute.append(o)\r\n\r\n self.work(price=price, date=date, commission=commission)\r\n\r\n def get_log(self):\r\n log_dict = {'BuyDate': buy_date, 'BuyPrice': buy_price, 'BuyUnits': buy_unit, 'CashPaying': amnt_paying,\r\n 'SellDate': sell_date, 'SellPrice': sell_price, 'SellUnits': sell_unit,\r\n 'CashReceiving': amnt_receiving}\r\n\r\n log = pd.DataFrame(log_dict)\r\n\r\n for i in list(log_dict.values()):\r\n i.clear()\r\n\r\n return log\r\n\r\n\r\nclass Execute:\r\n def __init__(self, equity):\r\n self.__equity = equity\r\n\r\n def trading(self, price, date, commission):\r\n\r\n c = price[3]\r\n\r\n for t in order_execute:\r\n if not t.is_filled:\r\n position_list.append(t.units)\r\n\r\n if t.is_short and add_position_long_order and t.is_parents:\r\n self.split_add_pos_order(t, add_position_long_order, commission)\r\n elif t.is_long and add_position_short_order and t.is_parents:\r\n self.split_add_pos_order(t, add_position_short_order, commission)\r\n\r\n else:\r\n self.fill(t, commission)\r\n\r\n # if self._touch_stop_loss(order=t, price=c):\r\n # origin_o = deepcopy(t).is_parents\r\n # t.replace(units=-t.units, trading_prices=t.stop_loss_price, trading_date=date, is_filled=False,\r\n # is_parent=False, stop_loss=None)\r\n # if not origin_o:\r\n # order_execute.remove(t)\r\n\r\n if position() == 0 and t in order_execute: del order_execute[: order_execute.index(t) + 1]\r\n\r\n def fill(self, t, commission):\r\n adj_price = util.adjust_price(trade=t, commission=commission)\r\n\r\n if t.is_long:\r\n assert self.__equity >= adj_price * t.units, 'Your money is empty'\r\n\r\n buy_price.append(t.trading_price)\r\n buy_date.append(t.trading_date)\r\n buy_unit.append(t.units)\r\n amnt_paying.append(adj_price * t.units)\r\n\r\n self.__equity -= t.units * adj_price\r\n setattr(t, 'is_filled', True)\r\n\r\n elif t.is_short:\r\n\r\n sell_price.append(t.trading_price)\r\n sell_date.append(t.trading_date)\r\n sell_unit.append(t.units)\r\n amnt_receiving.append(abs(t.units) * adj_price)\r\n\r\n self.__equity += abs(t.units) * adj_price\r\n setattr(t, 'is_filled', True)\r\n\r\n\r\n def split_add_pos_order(self, trade_order, add_position_order: list, commission):\r\n \"\"\"\r\n split the order which include overweight order into a list of single order and fill them\r\n e.g. a sell order [with 6 units has an parent order and an overweight order] becomes\r\n [an parent order with -4 units , an order with -2 units]\r\n \"\"\"\r\n temp_order_list = []\r\n origin_trader_order_sign = np.sign(trade_order.units)\r\n if trade_order.is_short:\r\n parents_unit = trade_order.units + sum(abs(_o.units) for _o in add_position_order)\r\n else:\r\n parents_unit = trade_order.units - sum(abs(_o.units) for _o in add_position_order)\r\n trade_order.units = parents_unit\r\n if trade_order.units != 0:\r\n temp_order_list.append(trade_order)\r\n for _t in add_position_order:\r\n if np.sign(_t.units) == origin_trader_order_sign:\r\n temp_order_list.append(_t)\r\n\r\n else:\r\n ct = deepcopy(_t)\r\n\r\n ct.units = -_t.units\r\n ct.trading_date = trade_order.trading_date\r\n ct.trading_prices = trade_order.trading_price\r\n\r\n temp_order_list.append(ct)\r\n for temp_o in temp_order_list:\r\n self.fill(temp_o, commission)\r\n\r\n add_position_order.clear()\r\n\r\n @property\r\n def equity(self):\r\n return self.__equity\r\n\r\n\r\ndef position():\r\n return sum(size for size in position_list)\r\n", "step-ids": [ 9, 11, 12, 16, 17 ] }
[ 9, 11, 12, 16, 17 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class CfCoreConfig(AppConfig): <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> class CfCoreConfig(AppConfig): name = 'cf_core' <|reserved_special_token_1|> from django.apps import AppConfig class CfCoreConfig(AppConfig): name = 'cf_core'
flexible
{ "blob_id": "01847c9e601eae6775cd4324483740c30e344557", "index": 382, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\nclass CfCoreConfig(AppConfig):\n <mask token>\n", "step-3": "<mask token>\n\n\nclass CfCoreConfig(AppConfig):\n name = 'cf_core'\n", "step-4": "from django.apps import AppConfig\n\n\nclass CfCoreConfig(AppConfig):\n name = 'cf_core'\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> class Solution: <|reserved_special_token_0|> <|reserved_special_token_1|> class Solution: def evalRPN(self, tokens: List[str]) ->int: def operation(op1, op2, op): if op == '+': return op1 + op2 if op == '-': return op1 - op2 if op == '*': return op1 * op2 if op == '/': return int(op1 / op2) stack = [] for char in tokens: if char in ['+', '-', '*', '/']: op2 = stack.pop() op1 = stack.pop() res = operation(op1, op2, char) stack.append(int(res)) else: stack.append(int(char)) return stack.pop() <|reserved_special_token_1|> class Solution: def evalRPN(self, tokens: List[str]) -> int: def operation(op1,op2,op): if op == "+": return op1 + op2 if op == "-": return op1 - op2 if op == "*": return op1 * op2 if op == "/": return int(op1/op2) stack = [] for char in tokens: if char in ["+", "-", "*", "/"]: op2 = stack.pop() op1 = stack.pop() res = operation(op1,op2,char) stack.append(int(res)) else: stack.append(int(char)) return stack.pop()
flexible
{ "blob_id": "6b597f1570c022d17e4476e2ab8817e724a166a7", "index": 1096, "step-1": "<mask token>\n", "step-2": "class Solution:\n <mask token>\n", "step-3": "class Solution:\n\n def evalRPN(self, tokens: List[str]) ->int:\n\n def operation(op1, op2, op):\n if op == '+':\n return op1 + op2\n if op == '-':\n return op1 - op2\n if op == '*':\n return op1 * op2\n if op == '/':\n return int(op1 / op2)\n stack = []\n for char in tokens:\n if char in ['+', '-', '*', '/']:\n op2 = stack.pop()\n op1 = stack.pop()\n res = operation(op1, op2, char)\n stack.append(int(res))\n else:\n stack.append(int(char))\n return stack.pop()\n", "step-4": "class Solution:\r\n def evalRPN(self, tokens: List[str]) -> int:\r\n def operation(op1,op2,op):\r\n if op == \"+\":\r\n return op1 + op2\r\n if op == \"-\":\r\n return op1 - op2\r\n if op == \"*\":\r\n return op1 * op2\r\n if op == \"/\":\r\n return int(op1/op2)\r\n \r\n stack = []\r\n for char in tokens:\r\n if char in [\"+\", \"-\", \"*\", \"/\"]:\r\n op2 = stack.pop()\r\n op1 = stack.pop()\r\n res = operation(op1,op2,char)\r\n stack.append(int(res))\r\n else:\r\n stack.append(int(char))\r\n return stack.pop()", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
from numpy import array import xspec as xs import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import Grid from spectralTools.step import Step class xspecView(object): def __init__(self): #xs.Plot.device="/xs" xs.Plot.xAxis='keV' self.swift = [] self.nai=[] self.bgo=[] def LoadSwiftPHAs(self,phaFiles): ''' Load The Swift PHAs in time order ''' for pha in phaFiles: s = xs.Spectrum(pha) s.ignore("**-15. 150.-**") cnts = sum(s.values) self.swift.append(cnts) def LoadNaiPHAs(self,phaFiles): ''' Load The GBM NaI PHAs in time order ''' for pha in phaFiles: s = xs.Spectrum(pha) s.ignore("**-8. 1999..-**") cnts = sum(s.values) self.nai.append(cnts) def LoadBGOPHAs(self,phaFiles): ''' Load The GBM BGO PHAs in time order ''' for pha in phaFiles: s = xs.Spectrum(pha) s.ignore("**-250. 10000.-**") cnts = sum(s.values) self.bgo.append(cnts) def SetTimeBins(self,starts,stops): self.tBins = array(zip(starts,stops)) def PlotLC(self): fig = plt.figure(1) grid = Grid(fig,111,nrows_ncols = (3,1), axes_pad=0.,direction='column') Step(grid[0],self.tBins,self.swift,'r',1.) Step(grid[1],self.tBins,self.nai,'b',1.) Step(grid[2],self.tBins,self.bgo,'g',1.)
normal
{ "blob_id": "ba34bae7849ad97f939c1a7cb91461269cd58b64", "index": 8994, "step-1": "<mask token>\n\n\nclass xspecView(object):\n <mask token>\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n <mask token>\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n", "step-2": "<mask token>\n\n\nclass xspecView(object):\n <mask token>\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n\n def LoadBGOPHAs(self, phaFiles):\n \"\"\"\n Load The GBM BGO PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-250. 10000.-**')\n cnts = sum(s.values)\n self.bgo.append(cnts)\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n", "step-3": "<mask token>\n\n\nclass xspecView(object):\n\n def __init__(self):\n xs.Plot.xAxis = 'keV'\n self.swift = []\n self.nai = []\n self.bgo = []\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n\n def LoadBGOPHAs(self, phaFiles):\n \"\"\"\n Load The GBM BGO PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-250. 10000.-**')\n cnts = sum(s.values)\n self.bgo.append(cnts)\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n", "step-4": "from numpy import array\nimport xspec as xs\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import Grid\nfrom spectralTools.step import Step\n\n\nclass xspecView(object):\n\n def __init__(self):\n xs.Plot.xAxis = 'keV'\n self.swift = []\n self.nai = []\n self.bgo = []\n\n def LoadSwiftPHAs(self, phaFiles):\n \"\"\"\n Load The Swift PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-15. 150.-**')\n cnts = sum(s.values)\n self.swift.append(cnts)\n\n def LoadNaiPHAs(self, phaFiles):\n \"\"\"\n Load The GBM NaI PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-8. 1999..-**')\n cnts = sum(s.values)\n self.nai.append(cnts)\n\n def LoadBGOPHAs(self, phaFiles):\n \"\"\"\n Load The GBM BGO PHAs in time order\n\n \"\"\"\n for pha in phaFiles:\n s = xs.Spectrum(pha)\n s.ignore('**-250. 10000.-**')\n cnts = sum(s.values)\n self.bgo.append(cnts)\n\n def SetTimeBins(self, starts, stops):\n self.tBins = array(zip(starts, stops))\n\n def PlotLC(self):\n fig = plt.figure(1)\n grid = Grid(fig, 111, nrows_ncols=(3, 1), axes_pad=0.0, direction=\n 'column')\n Step(grid[0], self.tBins, self.swift, 'r', 1.0)\n Step(grid[1], self.tBins, self.nai, 'b', 1.0)\n Step(grid[2], self.tBins, self.bgo, 'g', 1.0)\n", "step-5": "from numpy import array\nimport xspec as xs \nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import Grid\nfrom spectralTools.step import Step\n\n\n\nclass xspecView(object):\n\n\n def __init__(self):\n\n #xs.Plot.device=\"/xs\"\n xs.Plot.xAxis='keV'\n\n self.swift = []\n self.nai=[]\n self.bgo=[]\n\n def LoadSwiftPHAs(self,phaFiles):\n '''\n Load The Swift PHAs in time order\n\n '''\n for pha in phaFiles:\n\n s = xs.Spectrum(pha)\n s.ignore(\"**-15. 150.-**\")\n\n cnts = sum(s.values)\n\n\n self.swift.append(cnts)\n\n\n def LoadNaiPHAs(self,phaFiles):\n '''\n Load The GBM NaI PHAs in time order\n\n '''\n for pha in phaFiles:\n\n s = xs.Spectrum(pha)\n s.ignore(\"**-8. 1999..-**\")\n cnts = sum(s.values)\n\n self.nai.append(cnts)\n\n\n def LoadBGOPHAs(self,phaFiles):\n '''\n Load The GBM BGO PHAs in time order\n\n '''\n for pha in phaFiles:\n\n s = xs.Spectrum(pha)\n s.ignore(\"**-250. 10000.-**\")\n cnts = sum(s.values)\n\n self.bgo.append(cnts)\n \n\n\n def SetTimeBins(self,starts,stops):\n\n self.tBins = array(zip(starts,stops))\n\n \n\n def PlotLC(self):\n\n fig = plt.figure(1)\n\n grid = Grid(fig,111,nrows_ncols = (3,1), axes_pad=0.,direction='column')\n \n Step(grid[0],self.tBins,self.swift,'r',1.)\n\n Step(grid[1],self.tBins,self.nai,'b',1.)\n\n Step(grid[2],self.tBins,self.bgo,'g',1.)\n \n\n \n \n", "step-ids": [ 5, 6, 7, 8, 9 ] }
[ 5, 6, 7, 8, 9 ]
<|reserved_special_token_0|> def get_slots_query(scene: GameScene, entity: int): """Return a query that resolves to entity's equipment slots and their equipped items.""" def query(): paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity) equipment = paper_doll.get_equipment() return [(k, scene.cm.get_one(Entity, v)) for k, v in equipment.items()] return query def row_builder(entity): part_name = entity[0] equipped = entity[1].name if entity[1] else '' return [part_name, equipped] <|reserved_special_token_1|> <|reserved_special_token_0|> def handle_show_equip_screen(scene: GameScene): brains = [b for b in scene.cm.get(Brain) if b.intention is Intention. SHOW_EQUIP_SCREEN] for brain in brains: entity = brain.entity menu_actions = OrderedDict() equip_action = MenuAction('e', 'equip', lambda slot: set_intention( scene, entity, slot, Intention.EQUIP_SLOT)) menu_actions[tcod.event.K_e] = equip_action equipment_scene = ListMenuScene('Equipment', get_slots_query(scene, entity), row_builder=row_builder, default_action=equip_action, menu_actions=menu_actions, id_extractor=lambda e: e[1], parent_scene=scene) scene.controller.push_scene(equipment_scene) retract_intention(scene, entity) def get_slots_query(scene: GameScene, entity: int): """Return a query that resolves to entity's equipment slots and their equipped items.""" def query(): paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity) equipment = paper_doll.get_equipment() return [(k, scene.cm.get_one(Entity, v)) for k, v in equipment.items()] return query def row_builder(entity): part_name = entity[0] equipped = entity[1].name if entity[1] else '' return [part_name, equipped] <|reserved_special_token_1|> <|reserved_special_token_0|> def run(scene: GameScene): handle_show_equip_screen(scene) def handle_show_equip_screen(scene: GameScene): brains = [b for b in scene.cm.get(Brain) if b.intention is Intention. SHOW_EQUIP_SCREEN] for brain in brains: entity = brain.entity menu_actions = OrderedDict() equip_action = MenuAction('e', 'equip', lambda slot: set_intention( scene, entity, slot, Intention.EQUIP_SLOT)) menu_actions[tcod.event.K_e] = equip_action equipment_scene = ListMenuScene('Equipment', get_slots_query(scene, entity), row_builder=row_builder, default_action=equip_action, menu_actions=menu_actions, id_extractor=lambda e: e[1], parent_scene=scene) scene.controller.push_scene(equipment_scene) retract_intention(scene, entity) def get_slots_query(scene: GameScene, entity: int): """Return a query that resolves to entity's equipment slots and their equipped items.""" def query(): paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity) equipment = paper_doll.get_equipment() return [(k, scene.cm.get_one(Entity, v)) for k, v in equipment.items()] return query def row_builder(entity): part_name = entity[0] equipped = entity[1].name if entity[1] else '' return [part_name, equipped] <|reserved_special_token_1|> from collections import OrderedDict import tcod.event from components import Entity, PaperDoll, Brain from components.enums import Intention from engine import GameScene from scenes.list_menu_scene import MenuAction, ListMenuScene from systems.utilities import set_intention, retract_intention def run(scene: GameScene): handle_show_equip_screen(scene) def handle_show_equip_screen(scene: GameScene): brains = [b for b in scene.cm.get(Brain) if b.intention is Intention. SHOW_EQUIP_SCREEN] for brain in brains: entity = brain.entity menu_actions = OrderedDict() equip_action = MenuAction('e', 'equip', lambda slot: set_intention( scene, entity, slot, Intention.EQUIP_SLOT)) menu_actions[tcod.event.K_e] = equip_action equipment_scene = ListMenuScene('Equipment', get_slots_query(scene, entity), row_builder=row_builder, default_action=equip_action, menu_actions=menu_actions, id_extractor=lambda e: e[1], parent_scene=scene) scene.controller.push_scene(equipment_scene) retract_intention(scene, entity) def get_slots_query(scene: GameScene, entity: int): """Return a query that resolves to entity's equipment slots and their equipped items.""" def query(): paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity) equipment = paper_doll.get_equipment() return [(k, scene.cm.get_one(Entity, v)) for k, v in equipment.items()] return query def row_builder(entity): part_name = entity[0] equipped = entity[1].name if entity[1] else '' return [part_name, equipped] <|reserved_special_token_1|> from collections import OrderedDict import tcod.event from components import Entity, PaperDoll, Brain from components.enums import Intention from engine import GameScene from scenes.list_menu_scene import MenuAction, ListMenuScene from systems.utilities import set_intention, retract_intention def run(scene: GameScene): handle_show_equip_screen(scene) def handle_show_equip_screen(scene: GameScene): brains = [b for b in scene.cm.get(Brain) if b.intention is Intention.SHOW_EQUIP_SCREEN] for brain in brains: entity = brain.entity menu_actions = OrderedDict() equip_action = MenuAction('e', 'equip', lambda slot: set_intention(scene, entity, slot, Intention.EQUIP_SLOT)) menu_actions[tcod.event.K_e] = equip_action equipment_scene = ListMenuScene( "Equipment", get_slots_query(scene, entity), row_builder=row_builder, default_action=equip_action, menu_actions=menu_actions, id_extractor=lambda e: e[1], parent_scene=scene ) scene.controller.push_scene(equipment_scene) retract_intention(scene, entity) def get_slots_query(scene: GameScene, entity: int): """Return a query that resolves to entity's equipment slots and their equipped items.""" def query(): paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity) equipment = paper_doll.get_equipment() return [ (k, scene.cm.get_one(Entity, v)) for k, v in equipment.items() ] return query def row_builder(entity): part_name = entity[0] equipped = entity[1].name if entity[1] else '' return [part_name, equipped]
flexible
{ "blob_id": "f1547e0893ce9c4661b546e49f3fc998745390d9", "index": 4397, "step-1": "<mask token>\n\n\ndef get_slots_query(scene: GameScene, entity: int):\n \"\"\"Return a query that resolves to entity's equipment slots and their equipped items.\"\"\"\n\n def query():\n paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity)\n equipment = paper_doll.get_equipment()\n return [(k, scene.cm.get_one(Entity, v)) for k, v in equipment.items()]\n return query\n\n\ndef row_builder(entity):\n part_name = entity[0]\n equipped = entity[1].name if entity[1] else ''\n return [part_name, equipped]\n", "step-2": "<mask token>\n\n\ndef handle_show_equip_screen(scene: GameScene):\n brains = [b for b in scene.cm.get(Brain) if b.intention is Intention.\n SHOW_EQUIP_SCREEN]\n for brain in brains:\n entity = brain.entity\n menu_actions = OrderedDict()\n equip_action = MenuAction('e', 'equip', lambda slot: set_intention(\n scene, entity, slot, Intention.EQUIP_SLOT))\n menu_actions[tcod.event.K_e] = equip_action\n equipment_scene = ListMenuScene('Equipment', get_slots_query(scene,\n entity), row_builder=row_builder, default_action=equip_action,\n menu_actions=menu_actions, id_extractor=lambda e: e[1],\n parent_scene=scene)\n scene.controller.push_scene(equipment_scene)\n retract_intention(scene, entity)\n\n\ndef get_slots_query(scene: GameScene, entity: int):\n \"\"\"Return a query that resolves to entity's equipment slots and their equipped items.\"\"\"\n\n def query():\n paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity)\n equipment = paper_doll.get_equipment()\n return [(k, scene.cm.get_one(Entity, v)) for k, v in equipment.items()]\n return query\n\n\ndef row_builder(entity):\n part_name = entity[0]\n equipped = entity[1].name if entity[1] else ''\n return [part_name, equipped]\n", "step-3": "<mask token>\n\n\ndef run(scene: GameScene):\n handle_show_equip_screen(scene)\n\n\ndef handle_show_equip_screen(scene: GameScene):\n brains = [b for b in scene.cm.get(Brain) if b.intention is Intention.\n SHOW_EQUIP_SCREEN]\n for brain in brains:\n entity = brain.entity\n menu_actions = OrderedDict()\n equip_action = MenuAction('e', 'equip', lambda slot: set_intention(\n scene, entity, slot, Intention.EQUIP_SLOT))\n menu_actions[tcod.event.K_e] = equip_action\n equipment_scene = ListMenuScene('Equipment', get_slots_query(scene,\n entity), row_builder=row_builder, default_action=equip_action,\n menu_actions=menu_actions, id_extractor=lambda e: e[1],\n parent_scene=scene)\n scene.controller.push_scene(equipment_scene)\n retract_intention(scene, entity)\n\n\ndef get_slots_query(scene: GameScene, entity: int):\n \"\"\"Return a query that resolves to entity's equipment slots and their equipped items.\"\"\"\n\n def query():\n paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity)\n equipment = paper_doll.get_equipment()\n return [(k, scene.cm.get_one(Entity, v)) for k, v in equipment.items()]\n return query\n\n\ndef row_builder(entity):\n part_name = entity[0]\n equipped = entity[1].name if entity[1] else ''\n return [part_name, equipped]\n", "step-4": "from collections import OrderedDict\nimport tcod.event\nfrom components import Entity, PaperDoll, Brain\nfrom components.enums import Intention\nfrom engine import GameScene\nfrom scenes.list_menu_scene import MenuAction, ListMenuScene\nfrom systems.utilities import set_intention, retract_intention\n\n\ndef run(scene: GameScene):\n handle_show_equip_screen(scene)\n\n\ndef handle_show_equip_screen(scene: GameScene):\n brains = [b for b in scene.cm.get(Brain) if b.intention is Intention.\n SHOW_EQUIP_SCREEN]\n for brain in brains:\n entity = brain.entity\n menu_actions = OrderedDict()\n equip_action = MenuAction('e', 'equip', lambda slot: set_intention(\n scene, entity, slot, Intention.EQUIP_SLOT))\n menu_actions[tcod.event.K_e] = equip_action\n equipment_scene = ListMenuScene('Equipment', get_slots_query(scene,\n entity), row_builder=row_builder, default_action=equip_action,\n menu_actions=menu_actions, id_extractor=lambda e: e[1],\n parent_scene=scene)\n scene.controller.push_scene(equipment_scene)\n retract_intention(scene, entity)\n\n\ndef get_slots_query(scene: GameScene, entity: int):\n \"\"\"Return a query that resolves to entity's equipment slots and their equipped items.\"\"\"\n\n def query():\n paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity)\n equipment = paper_doll.get_equipment()\n return [(k, scene.cm.get_one(Entity, v)) for k, v in equipment.items()]\n return query\n\n\ndef row_builder(entity):\n part_name = entity[0]\n equipped = entity[1].name if entity[1] else ''\n return [part_name, equipped]\n", "step-5": "\n\nfrom collections import OrderedDict\n\nimport tcod.event\n\nfrom components import Entity, PaperDoll, Brain\nfrom components.enums import Intention\nfrom engine import GameScene\nfrom scenes.list_menu_scene import MenuAction, ListMenuScene\nfrom systems.utilities import set_intention, retract_intention\n\n\ndef run(scene: GameScene):\n handle_show_equip_screen(scene)\n\n\ndef handle_show_equip_screen(scene: GameScene):\n brains = [b for b in scene.cm.get(Brain) if b.intention is Intention.SHOW_EQUIP_SCREEN]\n for brain in brains:\n entity = brain.entity\n menu_actions = OrderedDict()\n equip_action = MenuAction('e', 'equip', lambda slot: set_intention(scene, entity, slot, Intention.EQUIP_SLOT))\n menu_actions[tcod.event.K_e] = equip_action\n equipment_scene = ListMenuScene(\n \"Equipment\",\n get_slots_query(scene, entity),\n row_builder=row_builder,\n default_action=equip_action,\n menu_actions=menu_actions,\n id_extractor=lambda e: e[1],\n parent_scene=scene\n )\n scene.controller.push_scene(equipment_scene)\n retract_intention(scene, entity)\n\n\ndef get_slots_query(scene: GameScene, entity: int):\n \"\"\"Return a query that resolves to entity's equipment slots and their equipped items.\"\"\"\n\n def query():\n\n paper_doll: PaperDoll = scene.cm.get_one(PaperDoll, entity)\n equipment = paper_doll.get_equipment()\n\n return [\n (k, scene.cm.get_one(Entity, v))\n for k, v in equipment.items()\n ]\n return query\n\n\ndef row_builder(entity):\n part_name = entity[0]\n equipped = entity[1].name if entity[1] else ''\n return [part_name, equipped]\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
""" This file contains the general data storage classes used throughout Logician. """ import csv import json import os from collections import OrderedDict VALID_CHANNEL_COUNTS = [4] class Acquisition: """ The acqusition object contains data from all of the acquired channels. Parameters ---------- data : array or bytes or str Array of form [[1, 0, 0, ...], [0, 0, 1, ...], ...] or bytes of data. If data is bytes, channel_count must be provided. samplerate : int The acquisition rate in Samples / sec. """ def __init__(self, data, sample_rate=1, channel_count=None): if isinstance(data, list): if len(data) not in VALID_CHANNEL_COUNTS: raise ValueError('data must have length %s' % str(VALID_CHANNEL_COUNTS)) l = len(data[0]) for channel in data: if len(channel) != l: raise ValueError('All channels must be have same length.') self.data = data elif isinstance(data, bytes): if channel_count not in VALID_CHANNEL_COUNTS: raise ValueError('Invalid number of channels.') # Convert byte string to list of 1's and 0's. If there are 4 # channels each byte should have 2 4 channel samples in it. The MSB # is the 4th channel of the least recent sample. sep_channel_data = [f(c) for c in data for f in (lambda x: ord(x) >> 4, lambda x: ord(x) & 0x0F)] unpacked_data = [[int(i) for i in list(bin(d)[2:].zfill(4))] for d in sep_channel_data] self.data = list(zip(*unpacked_data)) self.data.reverse() elif isinstance(data, str): self.load_csv_file(data) return else: raise TypeError('Invalid data type') self.sample_rate = sample_rate @property def dt(self): return 1.0 / self.sample_rate @property def acquisition_length(self): return len(self.data[0]) @property def channel_count(self): return len(self.data) def csv_string(self): out_string = '#sample_rate=%d' % self.sample_rate for row in zip(*self.data): out_string += str(row)[1:-1].replace(' ', '') out_string += '\n' return out_string def load_csv_file(self, fname): with open(fname) as f: reader = csv.reader(f) header = next(reader) sample_rate = int(header[0].split('=')[-1]) data = [[int(d) for d in row] for row in reader if len(row) != 1] self.data = list(zip(*data)) self.sample_rate = sample_rate def __len__(self): return len(self.data) def __getitem__(self, key): return self.data[key] def __iter__(self): return iter(self.data) class AnalyzerCommand: """ Simple class to hold analyzer commands and create appropriate command bytes to be sent to the firmware. """ sample_counts = OrderedDict((('200K', 200000), ('100K', 100000), ('50K', 50000), ('10K', 10000), ('2K', 2000))) sample_rates = OrderedDict((('1 MS/s', 1000000), ('500 KS/s', 500000), ('200 KS/s', 200000), ('100 KS/s', 10000))) def __init__(self, sample_rate=1e6, sample_count=64000, trigger_type=0, trigger_channel=0): sp = int(1.0 / sample_rate / 1e-6) self.sample_count = sample_count self.sample_rate = sample_rate sample_count /= 1000 self.command_bytes = \ [0x01, # Command (sp & 0x00FF), (sp >> 8), # Sample Period (us) (sample_count & 0x00FF), (sample_count >> 8), trigger_type, trigger_channel] self.command_bytes = (''.join([chr(x) for x in self.command_bytes]) + ' '*(64 - len(self.command_bytes))) class ThemeManager: """ A class to manage and load themes for the signal display. """ def __init__(self, theme_dir): self.theme_dir = theme_dir self.refresh() def refresh(self): self.themes = [] for fname in os.listdir(self.theme_dir): if fname.endswith('.json'): try: j = json.loads( open(os.path.join(self.theme_dir, fname)).read()) self.themes.append(j) except: continue def theme_names(self): """ Returns the names for each theme. """ return [theme.get('name', 'Error') for theme in self.themes] def theme_named(self, name): """ Returns the theme named name. Paramters --------- name : str The name of the theme to return. Returns ------- Returns the theme as a dict, or an empty dict if theme could not be found. """ for theme in self.themes: if theme.get('name', 'Error') == name: return theme
normal
{ "blob_id": "ec44e12624fbee3148cfa4f886e86ba437e920ec", "index": 4158, "step-1": "<mask token>\n\n\nclass Acquisition:\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def csv_string(self):\n out_string = '#sample_rate=%d' % self.sample_rate\n for row in zip(*self.data):\n out_string += str(row)[1:-1].replace(' ', '')\n out_string += '\\n'\n return out_string\n <mask token>\n <mask token>\n\n def __getitem__(self, key):\n return self.data[key]\n <mask token>\n\n\nclass AnalyzerCommand:\n \"\"\"\n Simple class to hold analyzer commands and create appropriate command bytes\n to be sent to the firmware.\n \"\"\"\n sample_counts = OrderedDict((('200K', 200000), ('100K', 100000), ('50K',\n 50000), ('10K', 10000), ('2K', 2000)))\n sample_rates = OrderedDict((('1 MS/s', 1000000), ('500 KS/s', 500000),\n ('200 KS/s', 200000), ('100 KS/s', 10000)))\n\n def __init__(self, sample_rate=1000000.0, sample_count=64000,\n trigger_type=0, trigger_channel=0):\n sp = int(1.0 / sample_rate / 1e-06)\n self.sample_count = sample_count\n self.sample_rate = sample_rate\n sample_count /= 1000\n self.command_bytes = [1, sp & 255, sp >> 8, sample_count & 255, \n sample_count >> 8, trigger_type, trigger_channel]\n self.command_bytes = ''.join([chr(x) for x in self.command_bytes]\n ) + ' ' * (64 - len(self.command_bytes))\n\n\nclass ThemeManager:\n \"\"\"\n A class to manage and load themes for the signal display.\n \"\"\"\n\n def __init__(self, theme_dir):\n self.theme_dir = theme_dir\n self.refresh()\n\n def refresh(self):\n self.themes = []\n for fname in os.listdir(self.theme_dir):\n if fname.endswith('.json'):\n try:\n j = json.loads(open(os.path.join(self.theme_dir, fname)\n ).read())\n self.themes.append(j)\n except:\n continue\n\n def theme_names(self):\n \"\"\"\n Returns the names for each theme.\n \"\"\"\n return [theme.get('name', 'Error') for theme in self.themes]\n\n def theme_named(self, name):\n \"\"\"\n Returns the theme named name.\n\n Paramters\n ---------\n name : str\n The name of the theme to return.\n\n Returns\n -------\n Returns the theme as a dict, or an empty dict if theme could not be\n found.\n \"\"\"\n for theme in self.themes:\n if theme.get('name', 'Error') == name:\n return theme\n", "step-2": "<mask token>\n\n\nclass Acquisition:\n <mask token>\n <mask token>\n\n @property\n def dt(self):\n return 1.0 / self.sample_rate\n <mask token>\n\n @property\n def channel_count(self):\n return len(self.data)\n\n def csv_string(self):\n out_string = '#sample_rate=%d' % self.sample_rate\n for row in zip(*self.data):\n out_string += str(row)[1:-1].replace(' ', '')\n out_string += '\\n'\n return out_string\n <mask token>\n <mask token>\n\n def __getitem__(self, key):\n return self.data[key]\n <mask token>\n\n\nclass AnalyzerCommand:\n \"\"\"\n Simple class to hold analyzer commands and create appropriate command bytes\n to be sent to the firmware.\n \"\"\"\n sample_counts = OrderedDict((('200K', 200000), ('100K', 100000), ('50K',\n 50000), ('10K', 10000), ('2K', 2000)))\n sample_rates = OrderedDict((('1 MS/s', 1000000), ('500 KS/s', 500000),\n ('200 KS/s', 200000), ('100 KS/s', 10000)))\n\n def __init__(self, sample_rate=1000000.0, sample_count=64000,\n trigger_type=0, trigger_channel=0):\n sp = int(1.0 / sample_rate / 1e-06)\n self.sample_count = sample_count\n self.sample_rate = sample_rate\n sample_count /= 1000\n self.command_bytes = [1, sp & 255, sp >> 8, sample_count & 255, \n sample_count >> 8, trigger_type, trigger_channel]\n self.command_bytes = ''.join([chr(x) for x in self.command_bytes]\n ) + ' ' * (64 - len(self.command_bytes))\n\n\nclass ThemeManager:\n \"\"\"\n A class to manage and load themes for the signal display.\n \"\"\"\n\n def __init__(self, theme_dir):\n self.theme_dir = theme_dir\n self.refresh()\n\n def refresh(self):\n self.themes = []\n for fname in os.listdir(self.theme_dir):\n if fname.endswith('.json'):\n try:\n j = json.loads(open(os.path.join(self.theme_dir, fname)\n ).read())\n self.themes.append(j)\n except:\n continue\n\n def theme_names(self):\n \"\"\"\n Returns the names for each theme.\n \"\"\"\n return [theme.get('name', 'Error') for theme in self.themes]\n\n def theme_named(self, name):\n \"\"\"\n Returns the theme named name.\n\n Paramters\n ---------\n name : str\n The name of the theme to return.\n\n Returns\n -------\n Returns the theme as a dict, or an empty dict if theme could not be\n found.\n \"\"\"\n for theme in self.themes:\n if theme.get('name', 'Error') == name:\n return theme\n", "step-3": "<mask token>\n\n\nclass Acquisition:\n <mask token>\n <mask token>\n\n @property\n def dt(self):\n return 1.0 / self.sample_rate\n\n @property\n def acquisition_length(self):\n return len(self.data[0])\n\n @property\n def channel_count(self):\n return len(self.data)\n\n def csv_string(self):\n out_string = '#sample_rate=%d' % self.sample_rate\n for row in zip(*self.data):\n out_string += str(row)[1:-1].replace(' ', '')\n out_string += '\\n'\n return out_string\n\n def load_csv_file(self, fname):\n with open(fname) as f:\n reader = csv.reader(f)\n header = next(reader)\n sample_rate = int(header[0].split('=')[-1])\n data = [[int(d) for d in row] for row in reader if len(row) != 1]\n self.data = list(zip(*data))\n self.sample_rate = sample_rate\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __iter__(self):\n return iter(self.data)\n\n\nclass AnalyzerCommand:\n \"\"\"\n Simple class to hold analyzer commands and create appropriate command bytes\n to be sent to the firmware.\n \"\"\"\n sample_counts = OrderedDict((('200K', 200000), ('100K', 100000), ('50K',\n 50000), ('10K', 10000), ('2K', 2000)))\n sample_rates = OrderedDict((('1 MS/s', 1000000), ('500 KS/s', 500000),\n ('200 KS/s', 200000), ('100 KS/s', 10000)))\n\n def __init__(self, sample_rate=1000000.0, sample_count=64000,\n trigger_type=0, trigger_channel=0):\n sp = int(1.0 / sample_rate / 1e-06)\n self.sample_count = sample_count\n self.sample_rate = sample_rate\n sample_count /= 1000\n self.command_bytes = [1, sp & 255, sp >> 8, sample_count & 255, \n sample_count >> 8, trigger_type, trigger_channel]\n self.command_bytes = ''.join([chr(x) for x in self.command_bytes]\n ) + ' ' * (64 - len(self.command_bytes))\n\n\nclass ThemeManager:\n \"\"\"\n A class to manage and load themes for the signal display.\n \"\"\"\n\n def __init__(self, theme_dir):\n self.theme_dir = theme_dir\n self.refresh()\n\n def refresh(self):\n self.themes = []\n for fname in os.listdir(self.theme_dir):\n if fname.endswith('.json'):\n try:\n j = json.loads(open(os.path.join(self.theme_dir, fname)\n ).read())\n self.themes.append(j)\n except:\n continue\n\n def theme_names(self):\n \"\"\"\n Returns the names for each theme.\n \"\"\"\n return [theme.get('name', 'Error') for theme in self.themes]\n\n def theme_named(self, name):\n \"\"\"\n Returns the theme named name.\n\n Paramters\n ---------\n name : str\n The name of the theme to return.\n\n Returns\n -------\n Returns the theme as a dict, or an empty dict if theme could not be\n found.\n \"\"\"\n for theme in self.themes:\n if theme.get('name', 'Error') == name:\n return theme\n", "step-4": "<mask token>\n\n\nclass Acquisition:\n <mask token>\n\n def __init__(self, data, sample_rate=1, channel_count=None):\n if isinstance(data, list):\n if len(data) not in VALID_CHANNEL_COUNTS:\n raise ValueError('data must have length %s' % str(\n VALID_CHANNEL_COUNTS))\n l = len(data[0])\n for channel in data:\n if len(channel) != l:\n raise ValueError('All channels must be have same length.')\n self.data = data\n elif isinstance(data, bytes):\n if channel_count not in VALID_CHANNEL_COUNTS:\n raise ValueError('Invalid number of channels.')\n sep_channel_data = [f(c) for c in data for f in (lambda x: ord(\n x) >> 4, lambda x: ord(x) & 15)]\n unpacked_data = [[int(i) for i in list(bin(d)[2:].zfill(4))] for\n d in sep_channel_data]\n self.data = list(zip(*unpacked_data))\n self.data.reverse()\n elif isinstance(data, str):\n self.load_csv_file(data)\n return\n else:\n raise TypeError('Invalid data type')\n self.sample_rate = sample_rate\n\n @property\n def dt(self):\n return 1.0 / self.sample_rate\n\n @property\n def acquisition_length(self):\n return len(self.data[0])\n\n @property\n def channel_count(self):\n return len(self.data)\n\n def csv_string(self):\n out_string = '#sample_rate=%d' % self.sample_rate\n for row in zip(*self.data):\n out_string += str(row)[1:-1].replace(' ', '')\n out_string += '\\n'\n return out_string\n\n def load_csv_file(self, fname):\n with open(fname) as f:\n reader = csv.reader(f)\n header = next(reader)\n sample_rate = int(header[0].split('=')[-1])\n data = [[int(d) for d in row] for row in reader if len(row) != 1]\n self.data = list(zip(*data))\n self.sample_rate = sample_rate\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __iter__(self):\n return iter(self.data)\n\n\nclass AnalyzerCommand:\n \"\"\"\n Simple class to hold analyzer commands and create appropriate command bytes\n to be sent to the firmware.\n \"\"\"\n sample_counts = OrderedDict((('200K', 200000), ('100K', 100000), ('50K',\n 50000), ('10K', 10000), ('2K', 2000)))\n sample_rates = OrderedDict((('1 MS/s', 1000000), ('500 KS/s', 500000),\n ('200 KS/s', 200000), ('100 KS/s', 10000)))\n\n def __init__(self, sample_rate=1000000.0, sample_count=64000,\n trigger_type=0, trigger_channel=0):\n sp = int(1.0 / sample_rate / 1e-06)\n self.sample_count = sample_count\n self.sample_rate = sample_rate\n sample_count /= 1000\n self.command_bytes = [1, sp & 255, sp >> 8, sample_count & 255, \n sample_count >> 8, trigger_type, trigger_channel]\n self.command_bytes = ''.join([chr(x) for x in self.command_bytes]\n ) + ' ' * (64 - len(self.command_bytes))\n\n\nclass ThemeManager:\n \"\"\"\n A class to manage and load themes for the signal display.\n \"\"\"\n\n def __init__(self, theme_dir):\n self.theme_dir = theme_dir\n self.refresh()\n\n def refresh(self):\n self.themes = []\n for fname in os.listdir(self.theme_dir):\n if fname.endswith('.json'):\n try:\n j = json.loads(open(os.path.join(self.theme_dir, fname)\n ).read())\n self.themes.append(j)\n except:\n continue\n\n def theme_names(self):\n \"\"\"\n Returns the names for each theme.\n \"\"\"\n return [theme.get('name', 'Error') for theme in self.themes]\n\n def theme_named(self, name):\n \"\"\"\n Returns the theme named name.\n\n Paramters\n ---------\n name : str\n The name of the theme to return.\n\n Returns\n -------\n Returns the theme as a dict, or an empty dict if theme could not be\n found.\n \"\"\"\n for theme in self.themes:\n if theme.get('name', 'Error') == name:\n return theme\n", "step-5": "\"\"\"\nThis file contains the general data storage classes used throughout Logician.\n\"\"\"\nimport csv\nimport json\nimport os\nfrom collections import OrderedDict\n\nVALID_CHANNEL_COUNTS = [4]\n\n\nclass Acquisition:\n \"\"\"\n The acqusition object contains data from all of the acquired channels.\n\n Parameters\n ----------\n data : array or bytes or str\n Array of form [[1, 0, 0, ...], [0, 0, 1, ...], ...]\n or bytes of data.\n If data is bytes, channel_count must be provided.\n\n samplerate : int\n The acquisition rate in Samples / sec.\n \"\"\"\n def __init__(self, data, sample_rate=1, channel_count=None):\n if isinstance(data, list):\n if len(data) not in VALID_CHANNEL_COUNTS:\n raise ValueError('data must have length %s'\n % str(VALID_CHANNEL_COUNTS))\n l = len(data[0])\n for channel in data:\n if len(channel) != l:\n raise ValueError('All channels must be have same length.')\n self.data = data\n elif isinstance(data, bytes):\n if channel_count not in VALID_CHANNEL_COUNTS:\n raise ValueError('Invalid number of channels.')\n # Convert byte string to list of 1's and 0's. If there are 4\n # channels each byte should have 2 4 channel samples in it. The MSB\n # is the 4th channel of the least recent sample.\n sep_channel_data = [f(c) for c in data\n for f in (lambda x: ord(x) >> 4,\n lambda x: ord(x) & 0x0F)]\n unpacked_data = [[int(i) for i in list(bin(d)[2:].zfill(4))]\n for d in sep_channel_data]\n self.data = list(zip(*unpacked_data))\n self.data.reverse()\n elif isinstance(data, str):\n self.load_csv_file(data)\n return\n else:\n raise TypeError('Invalid data type')\n self.sample_rate = sample_rate\n\n @property\n def dt(self):\n return 1.0 / self.sample_rate\n\n @property\n def acquisition_length(self):\n return len(self.data[0])\n\n @property\n def channel_count(self):\n return len(self.data)\n\n def csv_string(self):\n out_string = '#sample_rate=%d' % self.sample_rate\n for row in zip(*self.data):\n out_string += str(row)[1:-1].replace(' ', '')\n out_string += '\\n'\n return out_string\n\n def load_csv_file(self, fname):\n with open(fname) as f:\n reader = csv.reader(f)\n header = next(reader)\n sample_rate = int(header[0].split('=')[-1])\n data = [[int(d) for d in row] for row in reader\n if len(row) != 1]\n self.data = list(zip(*data))\n self.sample_rate = sample_rate\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __iter__(self):\n return iter(self.data)\n\n\nclass AnalyzerCommand:\n \"\"\"\n Simple class to hold analyzer commands and create appropriate command bytes\n to be sent to the firmware.\n \"\"\"\n sample_counts = OrderedDict((('200K', 200000),\n ('100K', 100000),\n ('50K', 50000),\n ('10K', 10000),\n ('2K', 2000)))\n sample_rates = OrderedDict((('1 MS/s', 1000000),\n ('500 KS/s', 500000),\n ('200 KS/s', 200000),\n ('100 KS/s', 10000)))\n\n def __init__(self, sample_rate=1e6, sample_count=64000,\n trigger_type=0, trigger_channel=0):\n sp = int(1.0 / sample_rate / 1e-6)\n self.sample_count = sample_count\n self.sample_rate = sample_rate\n sample_count /= 1000\n self.command_bytes = \\\n [0x01, # Command\n (sp & 0x00FF), (sp >> 8), # Sample Period (us)\n (sample_count & 0x00FF), (sample_count >> 8),\n trigger_type, trigger_channel]\n self.command_bytes = (''.join([chr(x) for x in self.command_bytes]) +\n ' '*(64 - len(self.command_bytes)))\n\n\nclass ThemeManager:\n \"\"\"\n A class to manage and load themes for the signal display.\n \"\"\"\n def __init__(self, theme_dir):\n self.theme_dir = theme_dir\n self.refresh()\n\n def refresh(self):\n self.themes = []\n for fname in os.listdir(self.theme_dir):\n if fname.endswith('.json'):\n try:\n j = json.loads(\n open(os.path.join(self.theme_dir, fname)).read())\n self.themes.append(j)\n except:\n continue\n\n def theme_names(self):\n \"\"\"\n Returns the names for each theme.\n \"\"\"\n return [theme.get('name', 'Error') for theme in self.themes]\n\n def theme_named(self, name):\n \"\"\"\n Returns the theme named name.\n\n Paramters\n ---------\n name : str\n The name of the theme to return.\n\n Returns\n -------\n Returns the theme as a dict, or an empty dict if theme could not be\n found.\n \"\"\"\n for theme in self.themes:\n if theme.get('name', 'Error') == name:\n return theme\n", "step-ids": [ 13, 15, 19, 20, 24 ] }
[ 13, 15, 19, 20, 24 ]
source = open("input.txt", "r") total = 0 def calculateWeight( weight ): fuel = calculateFuel(weight) if fuel > 0: sum = fuel + calculateWeight(fuel) return sum else: return max(0, fuel) def calculateFuel ( weight ): return weight // 3 -2 for line in source.readlines(): total += calculateWeight(int(line)) print(total)
normal
{ "blob_id": "bea1a5bc9c92d095a2f187a4c06d18d0a939f233", "index": 3376, "step-1": "<mask token>\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef calculateWeight(weight):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef calculateWeight(weight):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\nfor line in source.readlines():\n total += calculateWeight(int(line))\nprint(total)\n", "step-4": "source = open('input.txt', 'r')\ntotal = 0\n\n\ndef calculateWeight(weight):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel(weight):\n return weight // 3 - 2\n\n\nfor line in source.readlines():\n total += calculateWeight(int(line))\nprint(total)\n", "step-5": "source = open(\"input.txt\", \"r\")\ntotal = 0\n\ndef calculateWeight( weight ):\n fuel = calculateFuel(weight)\n if fuel > 0:\n sum = fuel + calculateWeight(fuel)\n return sum\n else:\n return max(0, fuel)\n\n\ndef calculateFuel ( weight ):\n return weight // 3 -2\n\nfor line in source.readlines():\n total += calculateWeight(int(line))\n\nprint(total)", "step-ids": [ 1, 2, 3, 4, 5 ] }
[ 1, 2, 3, 4, 5 ]
import matplotlib.pyplot as plt Ci_MSB = [32,16,8,4,2,1] Ci_LSB = [16,8,4,2,1] CB = 1 CP_B = 0 CP_LSB = (32-1)*(CB+CP_B-1)+10 print(CP_LSB) CP_MSB = 0 Csum_LSB = sum(Ci_LSB)+CP_LSB Csum_MSB = sum(Ci_MSB)+CP_MSB Cx = Csum_LSB*Csum_MSB+(CB+CP_B)*Csum_LSB+(CB+CP_B)*Csum_MSB Wi_MSB = [Ci_MSB[i]*(CB+CP_B+Csum_LSB)/Cx for i in range (6)] Wi_LSB = [Ci_LSB[i]*(CB+CP_B)/Cx for i in range (5)] print(Wi_MSB) print(Wi_LSB) def AtoD(vin): code = [0 for i in range(12)] code[0] = 1 if vin > 0 else 0 for i in range(6): vin = vin - Wi_MSB[i] * (code[i]-0.5)*2 code[i+1] = 1 if vin > 0 else 0 for i in range(5): vin = vin - Wi_LSB[i] * (code[i+6]-0.5)*2 code[i + 7] = 1 if vin > 0 else 0 dec_num = 0 for b in code: dec_num = dec_num * 2 + b return dec_num print(AtoD(0.50)) def DtoA_ideal(code): v = -1.0 for i in range(12): v += 2**(11-i)*code[i]/2048 return v print(DtoA_ideal([1,1,1,1,1,1,1,1,1,1,1,1])) n=1000000 x = [-1+i/n for i in range(2*n+1)] y = [AtoD(v) for v in x] # print(y[int(n/6):int(n/6)+100]) bin_num = [i for i in range(4096)] bin_size = [0 for i in range(4096)] left = x[0] for i in range(2*n): if y[i+1]!=y[i]: bin_size[y[i]] = x[i+1] - left left = x[i+1] # print(bin_size) DNL = [data*2047 -1 for data in bin_size] plt.plot(bin_num[1:4094],DNL[1:4094]) # plt.xlim(1000,1005) plt.show() # y = [DtoA_ideal(AtoD(v)) for v in x] # plt.plot(x,y) # plt.xlim(-0.01,0) # plt.ylim(-0.01,0) # plt.show() # def Vout(index): # V = 0.0 # for i in range(6): # V = V + Wi_MSB[i] * int(format(index,'b').zfill(11)[i])*1 # for i in range(5): # V = V + Wi_LSB[i] * int(format(index,'b').zfill(11)[i+6])*1 # return V # print(Vout(2047)) # # x = [i for i in range(2048)] # y = [Vout(i) for i in range(2048)] # DNL = [0]+[y[i+1]-y[i]-Vout(2047)/2047 for i in range(2047)] # DNL = [data*2048 for data in DNL] # INL = [y[i] -i*Vout(2047)/2047 for i in range (2048)] # INL = [data*2048 for data in INL] # # plt.plot(x,DNL) # plt.show()
normal
{ "blob_id": "b5ac3695a224d531f5baa53a07d3c894d44e8c4c", "index": 395, "step-1": "<mask token>\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\n<mask token>\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\n<mask token>\n", "step-2": "<mask token>\nprint(CP_LSB)\n<mask token>\nprint(Wi_MSB)\nprint(Wi_LSB)\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\nprint(AtoD(0.5))\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\nprint(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\n<mask token>\nfor i in range(2 * n):\n if y[i + 1] != y[i]:\n bin_size[y[i]] = x[i + 1] - left\n left = x[i + 1]\n<mask token>\nplt.plot(bin_num[1:4094], DNL[1:4094])\nplt.show()\n", "step-3": "<mask token>\nCi_MSB = [32, 16, 8, 4, 2, 1]\nCi_LSB = [16, 8, 4, 2, 1]\nCB = 1\nCP_B = 0\nCP_LSB = (32 - 1) * (CB + CP_B - 1) + 10\nprint(CP_LSB)\nCP_MSB = 0\nCsum_LSB = sum(Ci_LSB) + CP_LSB\nCsum_MSB = sum(Ci_MSB) + CP_MSB\nCx = Csum_LSB * Csum_MSB + (CB + CP_B) * Csum_LSB + (CB + CP_B) * Csum_MSB\nWi_MSB = [(Ci_MSB[i] * (CB + CP_B + Csum_LSB) / Cx) for i in range(6)]\nWi_LSB = [(Ci_LSB[i] * (CB + CP_B) / Cx) for i in range(5)]\nprint(Wi_MSB)\nprint(Wi_LSB)\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\nprint(AtoD(0.5))\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\nprint(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\nn = 1000000\nx = [(-1 + i / n) for i in range(2 * n + 1)]\ny = [AtoD(v) for v in x]\nbin_num = [i for i in range(4096)]\nbin_size = [(0) for i in range(4096)]\nleft = x[0]\nfor i in range(2 * n):\n if y[i + 1] != y[i]:\n bin_size[y[i]] = x[i + 1] - left\n left = x[i + 1]\nDNL = [(data * 2047 - 1) for data in bin_size]\nplt.plot(bin_num[1:4094], DNL[1:4094])\nplt.show()\n", "step-4": "import matplotlib.pyplot as plt\nCi_MSB = [32, 16, 8, 4, 2, 1]\nCi_LSB = [16, 8, 4, 2, 1]\nCB = 1\nCP_B = 0\nCP_LSB = (32 - 1) * (CB + CP_B - 1) + 10\nprint(CP_LSB)\nCP_MSB = 0\nCsum_LSB = sum(Ci_LSB) + CP_LSB\nCsum_MSB = sum(Ci_MSB) + CP_MSB\nCx = Csum_LSB * Csum_MSB + (CB + CP_B) * Csum_LSB + (CB + CP_B) * Csum_MSB\nWi_MSB = [(Ci_MSB[i] * (CB + CP_B + Csum_LSB) / Cx) for i in range(6)]\nWi_LSB = [(Ci_LSB[i] * (CB + CP_B) / Cx) for i in range(5)]\nprint(Wi_MSB)\nprint(Wi_LSB)\n\n\ndef AtoD(vin):\n code = [(0) for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i] - 0.5) * 2\n code[i + 1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i + 6] - 0.5) * 2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\n\n\nprint(AtoD(0.5))\n\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2 ** (11 - i) * code[i] / 2048\n return v\n\n\nprint(DtoA_ideal([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))\nn = 1000000\nx = [(-1 + i / n) for i in range(2 * n + 1)]\ny = [AtoD(v) for v in x]\nbin_num = [i for i in range(4096)]\nbin_size = [(0) for i in range(4096)]\nleft = x[0]\nfor i in range(2 * n):\n if y[i + 1] != y[i]:\n bin_size[y[i]] = x[i + 1] - left\n left = x[i + 1]\nDNL = [(data * 2047 - 1) for data in bin_size]\nplt.plot(bin_num[1:4094], DNL[1:4094])\nplt.show()\n", "step-5": "import matplotlib.pyplot as plt\n\nCi_MSB = [32,16,8,4,2,1]\nCi_LSB = [16,8,4,2,1]\nCB = 1\nCP_B = 0\nCP_LSB = (32-1)*(CB+CP_B-1)+10\nprint(CP_LSB)\nCP_MSB = 0\nCsum_LSB = sum(Ci_LSB)+CP_LSB\nCsum_MSB = sum(Ci_MSB)+CP_MSB\nCx = Csum_LSB*Csum_MSB+(CB+CP_B)*Csum_LSB+(CB+CP_B)*Csum_MSB\nWi_MSB = [Ci_MSB[i]*(CB+CP_B+Csum_LSB)/Cx for i in range (6)]\nWi_LSB = [Ci_LSB[i]*(CB+CP_B)/Cx for i in range (5)]\nprint(Wi_MSB)\nprint(Wi_LSB)\n\ndef AtoD(vin):\n code = [0 for i in range(12)]\n code[0] = 1 if vin > 0 else 0\n for i in range(6):\n vin = vin - Wi_MSB[i] * (code[i]-0.5)*2\n code[i+1] = 1 if vin > 0 else 0\n for i in range(5):\n vin = vin - Wi_LSB[i] * (code[i+6]-0.5)*2\n code[i + 7] = 1 if vin > 0 else 0\n dec_num = 0\n for b in code:\n dec_num = dec_num * 2 + b\n return dec_num\nprint(AtoD(0.50))\n\ndef DtoA_ideal(code):\n v = -1.0\n for i in range(12):\n v += 2**(11-i)*code[i]/2048\n return v\nprint(DtoA_ideal([1,1,1,1,1,1,1,1,1,1,1,1]))\n\n\nn=1000000\nx = [-1+i/n for i in range(2*n+1)]\ny = [AtoD(v) for v in x]\n# print(y[int(n/6):int(n/6)+100])\n\nbin_num = [i for i in range(4096)]\nbin_size = [0 for i in range(4096)]\n\nleft = x[0]\nfor i in range(2*n):\n if y[i+1]!=y[i]:\n bin_size[y[i]] = x[i+1] - left\n left = x[i+1]\n# print(bin_size)\nDNL = [data*2047 -1 for data in bin_size]\nplt.plot(bin_num[1:4094],DNL[1:4094])\n# plt.xlim(1000,1005)\n\nplt.show()\n\n\n\n\n\n# y = [DtoA_ideal(AtoD(v)) for v in x]\n# plt.plot(x,y)\n# plt.xlim(-0.01,0)\n# plt.ylim(-0.01,0)\n# plt.show()\n# def Vout(index):\n# V = 0.0\n# for i in range(6):\n# V = V + Wi_MSB[i] * int(format(index,'b').zfill(11)[i])*1\n# for i in range(5):\n# V = V + Wi_LSB[i] * int(format(index,'b').zfill(11)[i+6])*1\n# return V\n# print(Vout(2047))\n#\n# x = [i for i in range(2048)]\n# y = [Vout(i) for i in range(2048)]\n# DNL = [0]+[y[i+1]-y[i]-Vout(2047)/2047 for i in range(2047)]\n# DNL = [data*2048 for data in DNL]\n# INL = [y[i] -i*Vout(2047)/2047 for i in range (2048)]\n# INL = [data*2048 for data in INL]\n#\n# plt.plot(x,DNL)\n# plt.show()\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
import numpy as np import dl_style_transfer.workspace.data_helpers import os here = os.path.dirname(os.path.abspath(__file__)) sents = list(open(os.path.join(here, 'yelp_sentences.txt'))) + list(open(os.path.join(here, 'shake_sentences.txt'))) thresh = 5 col = dict() word_to_ind = dict() ind_to_word = dict() def __line_into_col__(line): tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(" ") for wor in tokens: if col.get(wor) is None: col[wor] = 1 else: col[wor] = col[wor] + 1 for l in sents: __line_into_col__(l) lis = list(col.items()) lis.sort(key=lambda count: count[1], reverse=True) for i, word in enumerate(lis): word_to_ind[word[0]] = i ind_to_word[i] = word[0] voc_len = len(word_to_ind) shape = (len(sents), voc_len) def get_small_bag(): bag = [] for sent in sents: sbag =[] for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(" "): sbag.append(word_to_ind[wor]) bag.append(sbag) return bag def get_bag(): bag = np.zeros(shape) for j,sent in enumerate(sents): for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(" "): bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1 return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1) def string_to_vec(string): tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(" ") vec = np.zeros(voc_len) for wor in tokens: vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1 return vec def get_ryans_strange_input(): vec = [] for l in sents: vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l)) return np.array([word_to_ind[i] for l in vec for i in l.split(" ")]) def vocab_length(): return voc_len
normal
{ "blob_id": "2317a2fff493588ad6cc3a4ac2b600fbf1c5583c", "index": 8594, "step-1": "<mask token>\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\n<mask token>\n\n\ndef vocab_length():\n return voc_len\n", "step-2": "<mask token>\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\n<mask token>\n\n\ndef get_small_bag():\n bag = []\n for sent in sents:\n sbag = []\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n sbag.append(word_to_ind[wor])\n bag.append(sbag)\n return bag\n\n\n<mask token>\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])\n\n\ndef vocab_length():\n return voc_len\n", "step-3": "<mask token>\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\n<mask token>\n\n\ndef get_small_bag():\n bag = []\n for sent in sents:\n sbag = []\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n sbag.append(word_to_ind[wor])\n bag.append(sbag)\n return bag\n\n\ndef get_bag():\n bag = np.zeros(shape)\n for j, sent in enumerate(sents):\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1\n return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)\n\n\ndef string_to_vec(string):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(\n ' ')\n vec = np.zeros(voc_len)\n for wor in tokens:\n vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1\n return vec\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])\n\n\ndef vocab_length():\n return voc_len\n", "step-4": "<mask token>\nhere = os.path.dirname(os.path.abspath(__file__))\nsents = list(open(os.path.join(here, 'yelp_sentences.txt'))) + list(open(os\n .path.join(here, 'shake_sentences.txt')))\nthresh = 5\ncol = dict()\nword_to_ind = dict()\nind_to_word = dict()\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(' '\n )\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\nfor l in sents:\n __line_into_col__(l)\nlis = list(col.items())\nlis.sort(key=lambda count: count[1], reverse=True)\nfor i, word in enumerate(lis):\n word_to_ind[word[0]] = i\n ind_to_word[i] = word[0]\nvoc_len = len(word_to_ind)\nshape = len(sents), voc_len\n\n\ndef get_small_bag():\n bag = []\n for sent in sents:\n sbag = []\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n sbag.append(word_to_ind[wor])\n bag.append(sbag)\n return bag\n\n\ndef get_bag():\n bag = np.zeros(shape)\n for j, sent in enumerate(sents):\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent\n ).split(' '):\n bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1\n return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)\n\n\ndef string_to_vec(string):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(\n ' ')\n vec = np.zeros(voc_len)\n for wor in tokens:\n vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1\n return vec\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(' ')])\n\n\ndef vocab_length():\n return voc_len\n", "step-5": "import numpy as np\nimport dl_style_transfer.workspace.data_helpers\nimport os\n\n\nhere = os.path.dirname(os.path.abspath(__file__))\n\nsents = list(open(os.path.join(here, 'yelp_sentences.txt'))) + list(open(os.path.join(here, 'shake_sentences.txt')))\n\nthresh = 5\n\ncol = dict()\nword_to_ind = dict()\nind_to_word = dict()\n\n\ndef __line_into_col__(line):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(line).split(\" \")\n for wor in tokens:\n if col.get(wor) is None:\n col[wor] = 1\n else:\n col[wor] = col[wor] + 1\n\n\nfor l in sents:\n __line_into_col__(l)\n\nlis = list(col.items())\nlis.sort(key=lambda count: count[1], reverse=True)\nfor i, word in enumerate(lis):\n word_to_ind[word[0]] = i\n ind_to_word[i] = word[0]\n\nvoc_len = len(word_to_ind)\n\nshape = (len(sents), voc_len)\n\ndef get_small_bag():\n\tbag = []\n\tfor sent in sents:\n\t\tsbag =[]\n\t\tfor wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(\" \"):\n\t\t\tsbag.append(word_to_ind[wor])\n\t\tbag.append(sbag)\n\treturn bag\n\t\t\t\n\t\t\t\n\ndef get_bag():\n bag = np.zeros(shape)\n for j,sent in enumerate(sents):\n for wor in dl_style_transfer.workspace.data_helpers.clean_str(sent).split(\" \"):\n bag[j, word_to_ind[wor]] = bag[j, word_to_ind[wor]] + 1\n return np.log(1 + bag) / np.max(np.log(1 + bag), axis=1)\n\n\ndef string_to_vec(string):\n tokens = dl_style_transfer.workspace.data_helpers.clean_str(string).split(\" \")\n vec = np.zeros(voc_len)\n for wor in tokens:\n vec[word_to_ind[wor]] = vec[word_to_ind[wor]] + 1\n return vec\n\n\ndef get_ryans_strange_input():\n vec = []\n for l in sents:\n vec.append(dl_style_transfer.workspace.data_helpers.clean_str(l))\n return np.array([word_to_ind[i] for l in vec for i in l.split(\" \")])\n\n\ndef vocab_length():\n return voc_len\n", "step-ids": [ 2, 4, 6, 8, 10 ] }
[ 2, 4, 6, 8, 10 ]
<|reserved_special_token_0|> def calc_frames(new_time): old_time = datetime(new_time.year - 1, 11, 30, 23) days = (new_time - old_time).days sec = (new_time - old_time).seconds hours = days * 24 + sec / 3600 return int(hours) def read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn): ff = open(filname, 'r') line1 = ff.readline() line2 = ff.readline() line3 = ff.readline() line4 = ff.readline() plat = [] plon = [] line = ff.readline() while line: if line.strip().split(' ')[0] == 'TRACK_ID': num = int(ff.readline().strip().split(' ')[-1]) for nl in range(0, num, 1): data = list(map(float, ff.readline().strip().split(' '))) if str(int(data[0])) == fixtime and data[1] <= flonr and data[1 ] >= flonl and data[2] <= flatn and data[2] >= flats: plat.append(data[2]) plon.append(data[1]) line = ff.readline() ff.close() print('%s total feature point in %s : %d' % (filname, fixtime, len(plat))) return plat, plon <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def calc_frames(new_time): old_time = datetime(new_time.year - 1, 11, 30, 23) days = (new_time - old_time).days sec = (new_time - old_time).seconds hours = days * 24 + sec / 3600 return int(hours) def read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn): ff = open(filname, 'r') line1 = ff.readline() line2 = ff.readline() line3 = ff.readline() line4 = ff.readline() plat = [] plon = [] line = ff.readline() while line: if line.strip().split(' ')[0] == 'TRACK_ID': num = int(ff.readline().strip().split(' ')[-1]) for nl in range(0, num, 1): data = list(map(float, ff.readline().strip().split(' '))) if str(int(data[0])) == fixtime and data[1] <= flonr and data[1 ] >= flonl and data[2] <= flatn and data[2] >= flats: plat.append(data[2]) plon.append(data[1]) line = ff.readline() ff.close() print('%s total feature point in %s : %d' % (filname, fixtime, len(plat))) return plat, plon <|reserved_special_token_0|> del ds gc.collect() <|reserved_special_token_0|> plt.rcParams.update(params) for nt in range(len(dtime)): fig = plt.figure(figsize=(12, 12), dpi=100) ax = fig.subplots(nrow, ncol, subplot_kw=dict(projection=ccrs. PlateCarree())) for nl in range(len(lev)): var = f[varname].sel(time=dtime[nt], level=lev[nl], longitude=ilon, latitude=ilat) var.data = var.data / 9.8 path2 = '%sERA5_VOR%d_1hr_%d_DET/' % (path, lev[nl], dtime[nt].year) plat, plon = read_point_fixtime(path2 + 'fft_trs_pos', dtime[nt]. strftime('%Y%m%d%H'), lonl, lonr, lats, latn) fvor = xr.open_dataset('%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc' % (path2, lev[nl], dtime[nt].year, nfilt)) var1 = fvor['var'].sel(time=calc_frames(dtime[nt]), level=1, lon= ilon, lat=ilat, method='nearest').load() var1.values = var1.values * 100000.0 axe = ax[nl] axe.add_feature(cfeat.COASTLINE.with_scale('110m'), edgecolor= 'black', linewidth=0.8, zorder=1) axe.set_title('%s %dhPa (%d)' % (dtime[nt].strftime( '%Y-%m-%d-%H:00'), lev[nl], len(plat)), fontsize=title_font) shad = axe.contourf(ilon, ilat, var1, cnlevels, transform=ccrs. PlateCarree(), cmap=fcolors, extend='both', norm=norm) cont = axe.contour(ilon, ilat, var, np.arange(1000, 15000, cnlvl2[ nl]), transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5) pint = axe.scatter(plon, plat, 10.0 ** 2, color='k', marker='o', transform=ccrs.PlateCarree()) topo = axe.contour(ilon, ilat, phis, [1500, 3000], transform=ccrs. PlateCarree(), colors='black', linewidths=1.2) axe.set_yticks(np.arange(lats, latn, lat_sp), crs=ccrs.PlateCarree()) axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol='')) axe.set_xticks(np.arange(lonl, lonr, lon_sp), crs=ccrs.PlateCarree()) axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol='')) position = fig.add_axes([0.85, bmlo + 0.1, 0.015, 0.7]) cb = plt.colorbar(shad, cax=position, orientation='vertical') cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font) plt.tight_layout(rect=(0, bmlo, 1, 1)) plt.savefig(figdir + 'filt_vor_%s.png' % dtime[nt].strftime('%Y%m%d%H'), bbox_inches='tight', pad_inches=0.01) if create_gif == True: figname = figdir + 'filt_vor_*.png' fn_stream = subprocess.check_output('ls ' + figname, shell=True).decode( 'utf-8') fn_list = fn_stream.split() print(fn_list[0]) print('filenumber : ' + str(len(fn_list))) gif_name = figname.rsplit('_', 1)[0] + '.gif' frames = [] for itm in fn_list: frame = Image.open(itm) frames.append(frame) frames[0].save(gif_name, save_all=True, append_images=frames[1:], duration=1000, loop=0, disposal=1) subprocess.run('rm -f %s' % figname, shell=True) <|reserved_special_token_1|> <|reserved_special_token_0|> def calc_frames(new_time): old_time = datetime(new_time.year - 1, 11, 30, 23) days = (new_time - old_time).days sec = (new_time - old_time).seconds hours = days * 24 + sec / 3600 return int(hours) def read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn): ff = open(filname, 'r') line1 = ff.readline() line2 = ff.readline() line3 = ff.readline() line4 = ff.readline() plat = [] plon = [] line = ff.readline() while line: if line.strip().split(' ')[0] == 'TRACK_ID': num = int(ff.readline().strip().split(' ')[-1]) for nl in range(0, num, 1): data = list(map(float, ff.readline().strip().split(' '))) if str(int(data[0])) == fixtime and data[1] <= flonr and data[1 ] >= flonl and data[2] <= flatn and data[2] >= flats: plat.append(data[2]) plon.append(data[1]) line = ff.readline() ff.close() print('%s total feature point in %s : %d' % (filname, fixtime, len(plat))) return plat, plon lonl = 0 lonr = 150 lats = 15 latn = 70 lat_sp = 20 lon_sp = 30 nrow = 3 ncol = 1 bmlo = 0.1 title_font = 18 label_font = 14 dtime = pd.date_range(start='1995-01-01 00', periods=60, freq='6H', closed=None ) create_gif = True nfilt = 'T63' lev = [850, 500, 250] cnlvl = [[-8, 1]] cnlvl2 = [30, 50, 100] varname = 'z' path = '/home/users/qd201969/ERA5-1HR-lev/' datapath = '/gws/nopw/j04/ncas_generic/users/renql/' figdir = '/home/users/qd201969/uor_track/fig/' f = xr.open_dataset('%sERA5_subdaily/%s/ERA5_NH_%s_%d.nc' % (datapath, varname, varname, dtime[0].year)) lat = f['latitude'].data lon = f['longitude'].data ilon = lon[(lon >= lonl) & (lon <= lonr)] ilat = lat[(lat >= lats) & (lat <= latn)] ds = xr.open_dataset('/home/users/qd201969/gtopo30_0.9x1.25.nc') phis = ds['PHIS'].sel(lon=ilon, lat=ilat, method='nearest').load() phis = phis / 9.8 del ds gc.collect() nl = 0 fcolors = cmaps.BlueDarkRed18 cnlevels = np.arange(cnlvl[nl][0], cnlvl[nl][0] + cnlvl[nl][1] * (fcolors.N - 1), cnlvl[nl][1]) norm = colors.BoundaryNorm(boundaries=cnlevels, ncolors=fcolors.N, extend= 'both') params = {'legend.fontsize': label_font, 'axes.labelsize': label_font, 'axes.titlesize': label_font, 'xtick.labelsize': label_font, 'ytick.labelsize': label_font} plt.rcParams.update(params) for nt in range(len(dtime)): fig = plt.figure(figsize=(12, 12), dpi=100) ax = fig.subplots(nrow, ncol, subplot_kw=dict(projection=ccrs. PlateCarree())) for nl in range(len(lev)): var = f[varname].sel(time=dtime[nt], level=lev[nl], longitude=ilon, latitude=ilat) var.data = var.data / 9.8 path2 = '%sERA5_VOR%d_1hr_%d_DET/' % (path, lev[nl], dtime[nt].year) plat, plon = read_point_fixtime(path2 + 'fft_trs_pos', dtime[nt]. strftime('%Y%m%d%H'), lonl, lonr, lats, latn) fvor = xr.open_dataset('%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc' % (path2, lev[nl], dtime[nt].year, nfilt)) var1 = fvor['var'].sel(time=calc_frames(dtime[nt]), level=1, lon= ilon, lat=ilat, method='nearest').load() var1.values = var1.values * 100000.0 axe = ax[nl] axe.add_feature(cfeat.COASTLINE.with_scale('110m'), edgecolor= 'black', linewidth=0.8, zorder=1) axe.set_title('%s %dhPa (%d)' % (dtime[nt].strftime( '%Y-%m-%d-%H:00'), lev[nl], len(plat)), fontsize=title_font) shad = axe.contourf(ilon, ilat, var1, cnlevels, transform=ccrs. PlateCarree(), cmap=fcolors, extend='both', norm=norm) cont = axe.contour(ilon, ilat, var, np.arange(1000, 15000, cnlvl2[ nl]), transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5) pint = axe.scatter(plon, plat, 10.0 ** 2, color='k', marker='o', transform=ccrs.PlateCarree()) topo = axe.contour(ilon, ilat, phis, [1500, 3000], transform=ccrs. PlateCarree(), colors='black', linewidths=1.2) axe.set_yticks(np.arange(lats, latn, lat_sp), crs=ccrs.PlateCarree()) axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol='')) axe.set_xticks(np.arange(lonl, lonr, lon_sp), crs=ccrs.PlateCarree()) axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol='')) position = fig.add_axes([0.85, bmlo + 0.1, 0.015, 0.7]) cb = plt.colorbar(shad, cax=position, orientation='vertical') cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font) plt.tight_layout(rect=(0, bmlo, 1, 1)) plt.savefig(figdir + 'filt_vor_%s.png' % dtime[nt].strftime('%Y%m%d%H'), bbox_inches='tight', pad_inches=0.01) if create_gif == True: figname = figdir + 'filt_vor_*.png' fn_stream = subprocess.check_output('ls ' + figname, shell=True).decode( 'utf-8') fn_list = fn_stream.split() print(fn_list[0]) print('filenumber : ' + str(len(fn_list))) gif_name = figname.rsplit('_', 1)[0] + '.gif' frames = [] for itm in fn_list: frame = Image.open(itm) frames.append(frame) frames[0].save(gif_name, save_all=True, append_images=frames[1:], duration=1000, loop=0, disposal=1) subprocess.run('rm -f %s' % figname, shell=True) <|reserved_special_token_1|> <|reserved_special_token_0|> import sys import subprocess import xarray as xr import numpy as np import pandas as pd from datetime import datetime import gc import matplotlib import matplotlib.pyplot as plt from matplotlib import colors import cartopy.crs as ccrs import cartopy.feature as cfeat from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter import cmaps from PIL import Image, ImageDraw, ImageSequence def calc_frames(new_time): old_time = datetime(new_time.year - 1, 11, 30, 23) days = (new_time - old_time).days sec = (new_time - old_time).seconds hours = days * 24 + sec / 3600 return int(hours) def read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn): ff = open(filname, 'r') line1 = ff.readline() line2 = ff.readline() line3 = ff.readline() line4 = ff.readline() plat = [] plon = [] line = ff.readline() while line: if line.strip().split(' ')[0] == 'TRACK_ID': num = int(ff.readline().strip().split(' ')[-1]) for nl in range(0, num, 1): data = list(map(float, ff.readline().strip().split(' '))) if str(int(data[0])) == fixtime and data[1] <= flonr and data[1 ] >= flonl and data[2] <= flatn and data[2] >= flats: plat.append(data[2]) plon.append(data[1]) line = ff.readline() ff.close() print('%s total feature point in %s : %d' % (filname, fixtime, len(plat))) return plat, plon lonl = 0 lonr = 150 lats = 15 latn = 70 lat_sp = 20 lon_sp = 30 nrow = 3 ncol = 1 bmlo = 0.1 title_font = 18 label_font = 14 dtime = pd.date_range(start='1995-01-01 00', periods=60, freq='6H', closed=None ) create_gif = True nfilt = 'T63' lev = [850, 500, 250] cnlvl = [[-8, 1]] cnlvl2 = [30, 50, 100] varname = 'z' path = '/home/users/qd201969/ERA5-1HR-lev/' datapath = '/gws/nopw/j04/ncas_generic/users/renql/' figdir = '/home/users/qd201969/uor_track/fig/' f = xr.open_dataset('%sERA5_subdaily/%s/ERA5_NH_%s_%d.nc' % (datapath, varname, varname, dtime[0].year)) lat = f['latitude'].data lon = f['longitude'].data ilon = lon[(lon >= lonl) & (lon <= lonr)] ilat = lat[(lat >= lats) & (lat <= latn)] ds = xr.open_dataset('/home/users/qd201969/gtopo30_0.9x1.25.nc') phis = ds['PHIS'].sel(lon=ilon, lat=ilat, method='nearest').load() phis = phis / 9.8 del ds gc.collect() nl = 0 fcolors = cmaps.BlueDarkRed18 cnlevels = np.arange(cnlvl[nl][0], cnlvl[nl][0] + cnlvl[nl][1] * (fcolors.N - 1), cnlvl[nl][1]) norm = colors.BoundaryNorm(boundaries=cnlevels, ncolors=fcolors.N, extend= 'both') params = {'legend.fontsize': label_font, 'axes.labelsize': label_font, 'axes.titlesize': label_font, 'xtick.labelsize': label_font, 'ytick.labelsize': label_font} plt.rcParams.update(params) for nt in range(len(dtime)): fig = plt.figure(figsize=(12, 12), dpi=100) ax = fig.subplots(nrow, ncol, subplot_kw=dict(projection=ccrs. PlateCarree())) for nl in range(len(lev)): var = f[varname].sel(time=dtime[nt], level=lev[nl], longitude=ilon, latitude=ilat) var.data = var.data / 9.8 path2 = '%sERA5_VOR%d_1hr_%d_DET/' % (path, lev[nl], dtime[nt].year) plat, plon = read_point_fixtime(path2 + 'fft_trs_pos', dtime[nt]. strftime('%Y%m%d%H'), lonl, lonr, lats, latn) fvor = xr.open_dataset('%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc' % (path2, lev[nl], dtime[nt].year, nfilt)) var1 = fvor['var'].sel(time=calc_frames(dtime[nt]), level=1, lon= ilon, lat=ilat, method='nearest').load() var1.values = var1.values * 100000.0 axe = ax[nl] axe.add_feature(cfeat.COASTLINE.with_scale('110m'), edgecolor= 'black', linewidth=0.8, zorder=1) axe.set_title('%s %dhPa (%d)' % (dtime[nt].strftime( '%Y-%m-%d-%H:00'), lev[nl], len(plat)), fontsize=title_font) shad = axe.contourf(ilon, ilat, var1, cnlevels, transform=ccrs. PlateCarree(), cmap=fcolors, extend='both', norm=norm) cont = axe.contour(ilon, ilat, var, np.arange(1000, 15000, cnlvl2[ nl]), transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5) pint = axe.scatter(plon, plat, 10.0 ** 2, color='k', marker='o', transform=ccrs.PlateCarree()) topo = axe.contour(ilon, ilat, phis, [1500, 3000], transform=ccrs. PlateCarree(), colors='black', linewidths=1.2) axe.set_yticks(np.arange(lats, latn, lat_sp), crs=ccrs.PlateCarree()) axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol='')) axe.set_xticks(np.arange(lonl, lonr, lon_sp), crs=ccrs.PlateCarree()) axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol='')) position = fig.add_axes([0.85, bmlo + 0.1, 0.015, 0.7]) cb = plt.colorbar(shad, cax=position, orientation='vertical') cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font) plt.tight_layout(rect=(0, bmlo, 1, 1)) plt.savefig(figdir + 'filt_vor_%s.png' % dtime[nt].strftime('%Y%m%d%H'), bbox_inches='tight', pad_inches=0.01) if create_gif == True: figname = figdir + 'filt_vor_*.png' fn_stream = subprocess.check_output('ls ' + figname, shell=True).decode( 'utf-8') fn_list = fn_stream.split() print(fn_list[0]) print('filenumber : ' + str(len(fn_list))) gif_name = figname.rsplit('_', 1)[0] + '.gif' frames = [] for itm in fn_list: frame = Image.open(itm) frames.append(frame) frames[0].save(gif_name, save_all=True, append_images=frames[1:], duration=1000, loop=0, disposal=1) subprocess.run('rm -f %s' % figname, shell=True) <|reserved_special_token_1|> #!/usr/bin/env python ''' fix a time and then draw the instant geopotential (contour) from /gws/nopw/j04/ncas_generic/users/renql/ERA5_subdaily/ERA5_NH_z_1989.nc, spatial filtered relative vorticity (shaded) from ~/ERA5-1HR-lev/ERA5_VOR850_1hr_1995_DET/ERA5_VOR850_1hr_1995_DET_T63filt.nc and identified feature points from ~/ERA5-1HR-lev/ERA5_VOR850_1hr_1995_DET/fft_trs_pos Loop through the height (850, 500, 250) 20211116 ''' import sys import subprocess import xarray as xr import numpy as np import pandas as pd from datetime import datetime import gc #garbage collector import matplotlib import matplotlib.pyplot as plt from matplotlib import colors import cartopy.crs as ccrs import cartopy.feature as cfeat from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter import cmaps from PIL import Image, ImageDraw, ImageSequence def calc_frames(new_time): old_time = datetime(new_time.year-1, 11, 30, 23) days = (new_time - old_time).days sec = (new_time - old_time).seconds hours = days * 24 + sec/3600 return int(hours) def read_point_fixtime(filname,fixtime,flonl,flonr,flats,flatn): ff = open(filname,"r") line1 = ff.readline() line2 = ff.readline() line3 = ff.readline() line4 = ff.readline() plat = [] plon = [] line = ff.readline() while line: if line.strip().split(" ")[0] == "TRACK_ID": num = int(ff.readline().strip().split(" ")[-1]) for nl in range(0,num,1): data = list(map(float,ff.readline().strip().split(" "))) if str(int(data[0])) == fixtime and \ data[1]<=flonr and data[1] >= flonl and data[2]<=flatn and data[2]>=flats : plat.append(data[2]) plon.append(data[1]) line = ff.readline() ff.close() print("%s total feature point in %s : %d"%(filname,fixtime,len(plat))) return plat, plon lonl=0 #0 # lonr=150#360# lats=15 #0 # latn=70 #90 # lat_sp = 20 lon_sp = 30 nrow = 3 ncol = 1 bmlo = 0.1 title_font=18 label_font=14 dtime = pd.date_range(start='1995-01-01 00',periods=60, freq='6H',closed=None) #dtime = pd.date_range(start='1995-01-01 00',end='1995-01-15 00', freq='6H',closed=None) create_gif = True #False# nfilt="T63" lev = [850,500,250] cnlvl =[[-8 ,1 ]] cnlvl2 = [30,50,100] varname = 'z' path = '/home/users/qd201969/ERA5-1HR-lev/' datapath = "/gws/nopw/j04/ncas_generic/users/renql/"#t/ERA5_NH_t_1989.nc figdir = "/home/users/qd201969/uor_track/fig/" f = xr.open_dataset("%sERA5_subdaily/%s/ERA5_NH_%s_%d.nc"%(datapath,varname,varname,dtime[0].year)) lat = f['latitude'].data lon = f['longitude'].data ilon = lon[(lon>=lonl) & (lon<=lonr)] ilat = lat[(lat>=lats) & (lat<=latn)] ds = xr.open_dataset("/home/users/qd201969/gtopo30_0.9x1.25.nc") phis = ds['PHIS'].sel(lon=ilon,lat=ilat,method="nearest").load() phis = phis/9.8 # transfer from m2/s2 to m del ds gc.collect() nl = 0 fcolors = cmaps.BlueDarkRed18 cnlevels = np.arange(cnlvl[nl][0], cnlvl[nl][0]+cnlvl[nl][1]*(fcolors.N-1), cnlvl[nl][1]) norm = colors.BoundaryNorm(boundaries=cnlevels, ncolors=fcolors.N,extend='both') params = {'legend.fontsize': label_font, 'axes.labelsize': label_font, 'axes.titlesize':label_font, 'xtick.labelsize':label_font, 'ytick.labelsize':label_font} plt.rcParams.update(params) for nt in range(len(dtime)): fig = plt.figure(figsize=(12,12),dpi=100) ax = fig.subplots(nrow,ncol, subplot_kw=dict(projection=ccrs.PlateCarree())) #sharex=True, sharey=True for nl in range(len(lev)): var = f[varname].sel(time=dtime[nt],level=lev[nl],longitude=ilon,latitude=ilat) var.data = var.data/9.8 path2 = "%sERA5_VOR%d_1hr_%d_DET/"%(path,lev[nl],dtime[nt].year) plat, plon = read_point_fixtime(path2+"fft_trs_pos",dtime[nt].strftime('%Y%m%d%H'),lonl,lonr,lats,latn) fvor = xr.open_dataset("%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc"%(path2,lev[nl],dtime[nt].year,nfilt)) var1 = fvor['var'].sel(time=calc_frames(dtime[nt]),level = 1,lon=ilon,lat=ilat,method="nearest").load() #fvor = xr.open_dataset("%sERA5_VOR_1h_dec_jan/ERA5_VOR%d_1hr_dec-jan%d_DET.nc"%(datapath,lev[nl],dtime[nt].year)) #var1 = fvor['var138'].sel(time=dtime[nt],lev=float(lev[nl]*100),lat=ilat,lon=ilon,method="nearest").load() var1.values = var1.values*1e5 axe = ax[nl] axe.add_feature(cfeat.COASTLINE.with_scale('110m'),edgecolor='black', linewidth=0.8, zorder=1) axe.set_title("%s %dhPa (%d)"%(dtime[nt].strftime('%Y-%m-%d-%H:00'), lev[nl], len(plat)),fontsize=title_font) shad = axe.contourf(ilon, ilat, var1, cnlevels, transform=ccrs.PlateCarree(),cmap=fcolors,extend='both',norm=norm) cont = axe.contour(ilon, ilat, var, np.arange(1000,15000,cnlvl2[nl]), transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5) #pint = axe.plot(plon,plat,color='darkviolet', marker='o', markersize=12, transform=ccrs.PlateCarree()) pint = axe.scatter(plon,plat,10.0**2,color='k', marker='o', transform=ccrs.PlateCarree()) topo = axe.contour(ilon, ilat, phis, [1500,3000], transform=ccrs.PlateCarree(),colors='black',linewidths=1.2) axe.set_yticks(np.arange(lats,latn,lat_sp), crs=ccrs.PlateCarree()) axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol='')) axe.set_xticks(np.arange(lonl,lonr,lon_sp), crs=ccrs.PlateCarree()) axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol='')) position = fig.add_axes([0.85, bmlo+0.1, 0.015, 0.7]) #left, bottom, width, height cb = plt.colorbar(shad, cax=position ,orientation='vertical')#, shrink=.9) cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font) #, weight='bold' plt.tight_layout(rect=(0,bmlo,1,1)) plt.savefig(figdir+"filt_vor_%s.png"%(dtime[nt].strftime('%Y%m%d%H')), bbox_inches='tight',pad_inches=0.01) if create_gif == True: figname = figdir+"filt_vor_*.png" fn_stream = subprocess.check_output("ls "+figname, shell=True).decode('utf-8') fn_list = fn_stream.split() print(fn_list[0]) print('filenumber : '+str(len(fn_list))) gif_name = figname.rsplit("_",1)[0]+".gif" frames = [] for itm in fn_list: frame = Image.open(itm) frames.append(frame) frames[0].save(gif_name, save_all=True, append_images=frames[1:],\ duration = 1000, loop=0, disposal=1) subprocess.run('rm -f %s'%(figname),shell=True)
flexible
{ "blob_id": "09a468e11651eb60e0805c151bda270e0ebecca9", "index": 4853, "step-1": "<mask token>\n\n\ndef calc_frames(new_time):\n old_time = datetime(new_time.year - 1, 11, 30, 23)\n days = (new_time - old_time).days\n sec = (new_time - old_time).seconds\n hours = days * 24 + sec / 3600\n return int(hours)\n\n\ndef read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn):\n ff = open(filname, 'r')\n line1 = ff.readline()\n line2 = ff.readline()\n line3 = ff.readline()\n line4 = ff.readline()\n plat = []\n plon = []\n line = ff.readline()\n while line:\n if line.strip().split(' ')[0] == 'TRACK_ID':\n num = int(ff.readline().strip().split(' ')[-1])\n for nl in range(0, num, 1):\n data = list(map(float, ff.readline().strip().split(' ')))\n if str(int(data[0])) == fixtime and data[1] <= flonr and data[1\n ] >= flonl and data[2] <= flatn and data[2] >= flats:\n plat.append(data[2])\n plon.append(data[1])\n line = ff.readline()\n ff.close()\n print('%s total feature point in %s : %d' % (filname, fixtime, len(plat)))\n return plat, plon\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef calc_frames(new_time):\n old_time = datetime(new_time.year - 1, 11, 30, 23)\n days = (new_time - old_time).days\n sec = (new_time - old_time).seconds\n hours = days * 24 + sec / 3600\n return int(hours)\n\n\ndef read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn):\n ff = open(filname, 'r')\n line1 = ff.readline()\n line2 = ff.readline()\n line3 = ff.readline()\n line4 = ff.readline()\n plat = []\n plon = []\n line = ff.readline()\n while line:\n if line.strip().split(' ')[0] == 'TRACK_ID':\n num = int(ff.readline().strip().split(' ')[-1])\n for nl in range(0, num, 1):\n data = list(map(float, ff.readline().strip().split(' ')))\n if str(int(data[0])) == fixtime and data[1] <= flonr and data[1\n ] >= flonl and data[2] <= flatn and data[2] >= flats:\n plat.append(data[2])\n plon.append(data[1])\n line = ff.readline()\n ff.close()\n print('%s total feature point in %s : %d' % (filname, fixtime, len(plat)))\n return plat, plon\n\n\n<mask token>\ndel ds\ngc.collect()\n<mask token>\nplt.rcParams.update(params)\nfor nt in range(len(dtime)):\n fig = plt.figure(figsize=(12, 12), dpi=100)\n ax = fig.subplots(nrow, ncol, subplot_kw=dict(projection=ccrs.\n PlateCarree()))\n for nl in range(len(lev)):\n var = f[varname].sel(time=dtime[nt], level=lev[nl], longitude=ilon,\n latitude=ilat)\n var.data = var.data / 9.8\n path2 = '%sERA5_VOR%d_1hr_%d_DET/' % (path, lev[nl], dtime[nt].year)\n plat, plon = read_point_fixtime(path2 + 'fft_trs_pos', dtime[nt].\n strftime('%Y%m%d%H'), lonl, lonr, lats, latn)\n fvor = xr.open_dataset('%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc' % (path2,\n lev[nl], dtime[nt].year, nfilt))\n var1 = fvor['var'].sel(time=calc_frames(dtime[nt]), level=1, lon=\n ilon, lat=ilat, method='nearest').load()\n var1.values = var1.values * 100000.0\n axe = ax[nl]\n axe.add_feature(cfeat.COASTLINE.with_scale('110m'), edgecolor=\n 'black', linewidth=0.8, zorder=1)\n axe.set_title('%s %dhPa (%d)' % (dtime[nt].strftime(\n '%Y-%m-%d-%H:00'), lev[nl], len(plat)), fontsize=title_font)\n shad = axe.contourf(ilon, ilat, var1, cnlevels, transform=ccrs.\n PlateCarree(), cmap=fcolors, extend='both', norm=norm)\n cont = axe.contour(ilon, ilat, var, np.arange(1000, 15000, cnlvl2[\n nl]), transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5)\n pint = axe.scatter(plon, plat, 10.0 ** 2, color='k', marker='o',\n transform=ccrs.PlateCarree())\n topo = axe.contour(ilon, ilat, phis, [1500, 3000], transform=ccrs.\n PlateCarree(), colors='black', linewidths=1.2)\n axe.set_yticks(np.arange(lats, latn, lat_sp), crs=ccrs.PlateCarree())\n axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\n axe.set_xticks(np.arange(lonl, lonr, lon_sp), crs=ccrs.PlateCarree())\n axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n position = fig.add_axes([0.85, bmlo + 0.1, 0.015, 0.7])\n cb = plt.colorbar(shad, cax=position, orientation='vertical')\n cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font)\n plt.tight_layout(rect=(0, bmlo, 1, 1))\n plt.savefig(figdir + 'filt_vor_%s.png' % dtime[nt].strftime('%Y%m%d%H'),\n bbox_inches='tight', pad_inches=0.01)\nif create_gif == True:\n figname = figdir + 'filt_vor_*.png'\n fn_stream = subprocess.check_output('ls ' + figname, shell=True).decode(\n 'utf-8')\n fn_list = fn_stream.split()\n print(fn_list[0])\n print('filenumber : ' + str(len(fn_list)))\n gif_name = figname.rsplit('_', 1)[0] + '.gif'\n frames = []\n for itm in fn_list:\n frame = Image.open(itm)\n frames.append(frame)\n frames[0].save(gif_name, save_all=True, append_images=frames[1:],\n duration=1000, loop=0, disposal=1)\n subprocess.run('rm -f %s' % figname, shell=True)\n", "step-3": "<mask token>\n\n\ndef calc_frames(new_time):\n old_time = datetime(new_time.year - 1, 11, 30, 23)\n days = (new_time - old_time).days\n sec = (new_time - old_time).seconds\n hours = days * 24 + sec / 3600\n return int(hours)\n\n\ndef read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn):\n ff = open(filname, 'r')\n line1 = ff.readline()\n line2 = ff.readline()\n line3 = ff.readline()\n line4 = ff.readline()\n plat = []\n plon = []\n line = ff.readline()\n while line:\n if line.strip().split(' ')[0] == 'TRACK_ID':\n num = int(ff.readline().strip().split(' ')[-1])\n for nl in range(0, num, 1):\n data = list(map(float, ff.readline().strip().split(' ')))\n if str(int(data[0])) == fixtime and data[1] <= flonr and data[1\n ] >= flonl and data[2] <= flatn and data[2] >= flats:\n plat.append(data[2])\n plon.append(data[1])\n line = ff.readline()\n ff.close()\n print('%s total feature point in %s : %d' % (filname, fixtime, len(plat)))\n return plat, plon\n\n\nlonl = 0\nlonr = 150\nlats = 15\nlatn = 70\nlat_sp = 20\nlon_sp = 30\nnrow = 3\nncol = 1\nbmlo = 0.1\ntitle_font = 18\nlabel_font = 14\ndtime = pd.date_range(start='1995-01-01 00', periods=60, freq='6H', closed=None\n )\ncreate_gif = True\nnfilt = 'T63'\nlev = [850, 500, 250]\ncnlvl = [[-8, 1]]\ncnlvl2 = [30, 50, 100]\nvarname = 'z'\npath = '/home/users/qd201969/ERA5-1HR-lev/'\ndatapath = '/gws/nopw/j04/ncas_generic/users/renql/'\nfigdir = '/home/users/qd201969/uor_track/fig/'\nf = xr.open_dataset('%sERA5_subdaily/%s/ERA5_NH_%s_%d.nc' % (datapath,\n varname, varname, dtime[0].year))\nlat = f['latitude'].data\nlon = f['longitude'].data\nilon = lon[(lon >= lonl) & (lon <= lonr)]\nilat = lat[(lat >= lats) & (lat <= latn)]\nds = xr.open_dataset('/home/users/qd201969/gtopo30_0.9x1.25.nc')\nphis = ds['PHIS'].sel(lon=ilon, lat=ilat, method='nearest').load()\nphis = phis / 9.8\ndel ds\ngc.collect()\nnl = 0\nfcolors = cmaps.BlueDarkRed18\ncnlevels = np.arange(cnlvl[nl][0], cnlvl[nl][0] + cnlvl[nl][1] * (fcolors.N -\n 1), cnlvl[nl][1])\nnorm = colors.BoundaryNorm(boundaries=cnlevels, ncolors=fcolors.N, extend=\n 'both')\nparams = {'legend.fontsize': label_font, 'axes.labelsize': label_font,\n 'axes.titlesize': label_font, 'xtick.labelsize': label_font,\n 'ytick.labelsize': label_font}\nplt.rcParams.update(params)\nfor nt in range(len(dtime)):\n fig = plt.figure(figsize=(12, 12), dpi=100)\n ax = fig.subplots(nrow, ncol, subplot_kw=dict(projection=ccrs.\n PlateCarree()))\n for nl in range(len(lev)):\n var = f[varname].sel(time=dtime[nt], level=lev[nl], longitude=ilon,\n latitude=ilat)\n var.data = var.data / 9.8\n path2 = '%sERA5_VOR%d_1hr_%d_DET/' % (path, lev[nl], dtime[nt].year)\n plat, plon = read_point_fixtime(path2 + 'fft_trs_pos', dtime[nt].\n strftime('%Y%m%d%H'), lonl, lonr, lats, latn)\n fvor = xr.open_dataset('%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc' % (path2,\n lev[nl], dtime[nt].year, nfilt))\n var1 = fvor['var'].sel(time=calc_frames(dtime[nt]), level=1, lon=\n ilon, lat=ilat, method='nearest').load()\n var1.values = var1.values * 100000.0\n axe = ax[nl]\n axe.add_feature(cfeat.COASTLINE.with_scale('110m'), edgecolor=\n 'black', linewidth=0.8, zorder=1)\n axe.set_title('%s %dhPa (%d)' % (dtime[nt].strftime(\n '%Y-%m-%d-%H:00'), lev[nl], len(plat)), fontsize=title_font)\n shad = axe.contourf(ilon, ilat, var1, cnlevels, transform=ccrs.\n PlateCarree(), cmap=fcolors, extend='both', norm=norm)\n cont = axe.contour(ilon, ilat, var, np.arange(1000, 15000, cnlvl2[\n nl]), transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5)\n pint = axe.scatter(plon, plat, 10.0 ** 2, color='k', marker='o',\n transform=ccrs.PlateCarree())\n topo = axe.contour(ilon, ilat, phis, [1500, 3000], transform=ccrs.\n PlateCarree(), colors='black', linewidths=1.2)\n axe.set_yticks(np.arange(lats, latn, lat_sp), crs=ccrs.PlateCarree())\n axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\n axe.set_xticks(np.arange(lonl, lonr, lon_sp), crs=ccrs.PlateCarree())\n axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n position = fig.add_axes([0.85, bmlo + 0.1, 0.015, 0.7])\n cb = plt.colorbar(shad, cax=position, orientation='vertical')\n cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font)\n plt.tight_layout(rect=(0, bmlo, 1, 1))\n plt.savefig(figdir + 'filt_vor_%s.png' % dtime[nt].strftime('%Y%m%d%H'),\n bbox_inches='tight', pad_inches=0.01)\nif create_gif == True:\n figname = figdir + 'filt_vor_*.png'\n fn_stream = subprocess.check_output('ls ' + figname, shell=True).decode(\n 'utf-8')\n fn_list = fn_stream.split()\n print(fn_list[0])\n print('filenumber : ' + str(len(fn_list)))\n gif_name = figname.rsplit('_', 1)[0] + '.gif'\n frames = []\n for itm in fn_list:\n frame = Image.open(itm)\n frames.append(frame)\n frames[0].save(gif_name, save_all=True, append_images=frames[1:],\n duration=1000, loop=0, disposal=1)\n subprocess.run('rm -f %s' % figname, shell=True)\n", "step-4": "<mask token>\nimport sys\nimport subprocess\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport gc\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeat\nfrom cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\nimport cmaps\nfrom PIL import Image, ImageDraw, ImageSequence\n\n\ndef calc_frames(new_time):\n old_time = datetime(new_time.year - 1, 11, 30, 23)\n days = (new_time - old_time).days\n sec = (new_time - old_time).seconds\n hours = days * 24 + sec / 3600\n return int(hours)\n\n\ndef read_point_fixtime(filname, fixtime, flonl, flonr, flats, flatn):\n ff = open(filname, 'r')\n line1 = ff.readline()\n line2 = ff.readline()\n line3 = ff.readline()\n line4 = ff.readline()\n plat = []\n plon = []\n line = ff.readline()\n while line:\n if line.strip().split(' ')[0] == 'TRACK_ID':\n num = int(ff.readline().strip().split(' ')[-1])\n for nl in range(0, num, 1):\n data = list(map(float, ff.readline().strip().split(' ')))\n if str(int(data[0])) == fixtime and data[1] <= flonr and data[1\n ] >= flonl and data[2] <= flatn and data[2] >= flats:\n plat.append(data[2])\n plon.append(data[1])\n line = ff.readline()\n ff.close()\n print('%s total feature point in %s : %d' % (filname, fixtime, len(plat)))\n return plat, plon\n\n\nlonl = 0\nlonr = 150\nlats = 15\nlatn = 70\nlat_sp = 20\nlon_sp = 30\nnrow = 3\nncol = 1\nbmlo = 0.1\ntitle_font = 18\nlabel_font = 14\ndtime = pd.date_range(start='1995-01-01 00', periods=60, freq='6H', closed=None\n )\ncreate_gif = True\nnfilt = 'T63'\nlev = [850, 500, 250]\ncnlvl = [[-8, 1]]\ncnlvl2 = [30, 50, 100]\nvarname = 'z'\npath = '/home/users/qd201969/ERA5-1HR-lev/'\ndatapath = '/gws/nopw/j04/ncas_generic/users/renql/'\nfigdir = '/home/users/qd201969/uor_track/fig/'\nf = xr.open_dataset('%sERA5_subdaily/%s/ERA5_NH_%s_%d.nc' % (datapath,\n varname, varname, dtime[0].year))\nlat = f['latitude'].data\nlon = f['longitude'].data\nilon = lon[(lon >= lonl) & (lon <= lonr)]\nilat = lat[(lat >= lats) & (lat <= latn)]\nds = xr.open_dataset('/home/users/qd201969/gtopo30_0.9x1.25.nc')\nphis = ds['PHIS'].sel(lon=ilon, lat=ilat, method='nearest').load()\nphis = phis / 9.8\ndel ds\ngc.collect()\nnl = 0\nfcolors = cmaps.BlueDarkRed18\ncnlevels = np.arange(cnlvl[nl][0], cnlvl[nl][0] + cnlvl[nl][1] * (fcolors.N -\n 1), cnlvl[nl][1])\nnorm = colors.BoundaryNorm(boundaries=cnlevels, ncolors=fcolors.N, extend=\n 'both')\nparams = {'legend.fontsize': label_font, 'axes.labelsize': label_font,\n 'axes.titlesize': label_font, 'xtick.labelsize': label_font,\n 'ytick.labelsize': label_font}\nplt.rcParams.update(params)\nfor nt in range(len(dtime)):\n fig = plt.figure(figsize=(12, 12), dpi=100)\n ax = fig.subplots(nrow, ncol, subplot_kw=dict(projection=ccrs.\n PlateCarree()))\n for nl in range(len(lev)):\n var = f[varname].sel(time=dtime[nt], level=lev[nl], longitude=ilon,\n latitude=ilat)\n var.data = var.data / 9.8\n path2 = '%sERA5_VOR%d_1hr_%d_DET/' % (path, lev[nl], dtime[nt].year)\n plat, plon = read_point_fixtime(path2 + 'fft_trs_pos', dtime[nt].\n strftime('%Y%m%d%H'), lonl, lonr, lats, latn)\n fvor = xr.open_dataset('%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc' % (path2,\n lev[nl], dtime[nt].year, nfilt))\n var1 = fvor['var'].sel(time=calc_frames(dtime[nt]), level=1, lon=\n ilon, lat=ilat, method='nearest').load()\n var1.values = var1.values * 100000.0\n axe = ax[nl]\n axe.add_feature(cfeat.COASTLINE.with_scale('110m'), edgecolor=\n 'black', linewidth=0.8, zorder=1)\n axe.set_title('%s %dhPa (%d)' % (dtime[nt].strftime(\n '%Y-%m-%d-%H:00'), lev[nl], len(plat)), fontsize=title_font)\n shad = axe.contourf(ilon, ilat, var1, cnlevels, transform=ccrs.\n PlateCarree(), cmap=fcolors, extend='both', norm=norm)\n cont = axe.contour(ilon, ilat, var, np.arange(1000, 15000, cnlvl2[\n nl]), transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5)\n pint = axe.scatter(plon, plat, 10.0 ** 2, color='k', marker='o',\n transform=ccrs.PlateCarree())\n topo = axe.contour(ilon, ilat, phis, [1500, 3000], transform=ccrs.\n PlateCarree(), colors='black', linewidths=1.2)\n axe.set_yticks(np.arange(lats, latn, lat_sp), crs=ccrs.PlateCarree())\n axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\n axe.set_xticks(np.arange(lonl, lonr, lon_sp), crs=ccrs.PlateCarree())\n axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n position = fig.add_axes([0.85, bmlo + 0.1, 0.015, 0.7])\n cb = plt.colorbar(shad, cax=position, orientation='vertical')\n cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font)\n plt.tight_layout(rect=(0, bmlo, 1, 1))\n plt.savefig(figdir + 'filt_vor_%s.png' % dtime[nt].strftime('%Y%m%d%H'),\n bbox_inches='tight', pad_inches=0.01)\nif create_gif == True:\n figname = figdir + 'filt_vor_*.png'\n fn_stream = subprocess.check_output('ls ' + figname, shell=True).decode(\n 'utf-8')\n fn_list = fn_stream.split()\n print(fn_list[0])\n print('filenumber : ' + str(len(fn_list)))\n gif_name = figname.rsplit('_', 1)[0] + '.gif'\n frames = []\n for itm in fn_list:\n frame = Image.open(itm)\n frames.append(frame)\n frames[0].save(gif_name, save_all=True, append_images=frames[1:],\n duration=1000, loop=0, disposal=1)\n subprocess.run('rm -f %s' % figname, shell=True)\n", "step-5": "#!/usr/bin/env python\n'''\nfix a time and then draw the instant geopotential (contour) from \n/gws/nopw/j04/ncas_generic/users/renql/ERA5_subdaily/ERA5_NH_z_1989.nc,\n\nspatial filtered relative vorticity (shaded) from \n~/ERA5-1HR-lev/ERA5_VOR850_1hr_1995_DET/ERA5_VOR850_1hr_1995_DET_T63filt.nc\n\nand identified feature points from \n~/ERA5-1HR-lev/ERA5_VOR850_1hr_1995_DET/fft_trs_pos\n\nLoop through the height (850, 500, 250)\n\n20211116\n'''\nimport sys\nimport subprocess\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nimport gc #garbage collector\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import colors\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeat\nfrom cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\nimport cmaps\nfrom PIL import Image, ImageDraw, ImageSequence\n\ndef calc_frames(new_time):\n old_time = datetime(new_time.year-1, 11, 30, 23)\n days = (new_time - old_time).days\n sec = (new_time - old_time).seconds\n hours = days * 24 + sec/3600\n return int(hours)\n\ndef read_point_fixtime(filname,fixtime,flonl,flonr,flats,flatn):\n ff = open(filname,\"r\") \n line1 = ff.readline()\n line2 = ff.readline()\n line3 = ff.readline()\n line4 = ff.readline()\n \n plat = []\n plon = []\n line = ff.readline()\n while line:\n if line.strip().split(\" \")[0] == \"TRACK_ID\":\n num = int(ff.readline().strip().split(\" \")[-1])\n for nl in range(0,num,1):\n data = list(map(float,ff.readline().strip().split(\" \")))\n if str(int(data[0])) == fixtime and \\\n data[1]<=flonr and data[1] >= flonl and data[2]<=flatn and data[2]>=flats :\n plat.append(data[2])\n plon.append(data[1])\n line = ff.readline()\n ff.close()\n print(\"%s total feature point in %s : %d\"%(filname,fixtime,len(plat)))\n return plat, plon \n\nlonl=0 #0 #\nlonr=150#360#\nlats=15 #0 #\nlatn=70 #90 #\nlat_sp = 20\nlon_sp = 30\n\nnrow = 3\nncol = 1\nbmlo = 0.1\ntitle_font=18\nlabel_font=14\n\ndtime = pd.date_range(start='1995-01-01 00',periods=60, freq='6H',closed=None)\n#dtime = pd.date_range(start='1995-01-01 00',end='1995-01-15 00', freq='6H',closed=None)\ncreate_gif = True #False#\nnfilt=\"T63\"\nlev = [850,500,250]\ncnlvl =[[-8 ,1 ]]\ncnlvl2 = [30,50,100]\nvarname = 'z'\npath = '/home/users/qd201969/ERA5-1HR-lev/'\ndatapath = \"/gws/nopw/j04/ncas_generic/users/renql/\"#t/ERA5_NH_t_1989.nc\nfigdir = \"/home/users/qd201969/uor_track/fig/\"\n\nf = xr.open_dataset(\"%sERA5_subdaily/%s/ERA5_NH_%s_%d.nc\"%(datapath,varname,varname,dtime[0].year))\nlat = f['latitude'].data\nlon = f['longitude'].data\nilon = lon[(lon>=lonl) & (lon<=lonr)]\nilat = lat[(lat>=lats) & (lat<=latn)]\nds = xr.open_dataset(\"/home/users/qd201969/gtopo30_0.9x1.25.nc\")\nphis = ds['PHIS'].sel(lon=ilon,lat=ilat,method=\"nearest\").load()\nphis = phis/9.8 # transfer from m2/s2 to m\ndel ds\ngc.collect()\n\nnl = 0\nfcolors = cmaps.BlueDarkRed18\ncnlevels = np.arange(cnlvl[nl][0], cnlvl[nl][0]+cnlvl[nl][1]*(fcolors.N-1), cnlvl[nl][1])\nnorm = colors.BoundaryNorm(boundaries=cnlevels, ncolors=fcolors.N,extend='both')\n\nparams = {'legend.fontsize': label_font,\n 'axes.labelsize': label_font,\n 'axes.titlesize':label_font,\n 'xtick.labelsize':label_font,\n 'ytick.labelsize':label_font}\nplt.rcParams.update(params)\n\nfor nt in range(len(dtime)):\n fig = plt.figure(figsize=(12,12),dpi=100)\n ax = fig.subplots(nrow,ncol, subplot_kw=dict(projection=ccrs.PlateCarree())) #sharex=True, sharey=True\n for nl in range(len(lev)):\n var = f[varname].sel(time=dtime[nt],level=lev[nl],longitude=ilon,latitude=ilat)\n var.data = var.data/9.8\n\n path2 = \"%sERA5_VOR%d_1hr_%d_DET/\"%(path,lev[nl],dtime[nt].year)\n plat, plon = read_point_fixtime(path2+\"fft_trs_pos\",dtime[nt].strftime('%Y%m%d%H'),lonl,lonr,lats,latn)\n \n fvor = xr.open_dataset(\"%sERA5_VOR%d_1hr_%d_DET_%sfilt.nc\"%(path2,lev[nl],dtime[nt].year,nfilt))\n var1 = fvor['var'].sel(time=calc_frames(dtime[nt]),level = 1,lon=ilon,lat=ilat,method=\"nearest\").load()\n #fvor = xr.open_dataset(\"%sERA5_VOR_1h_dec_jan/ERA5_VOR%d_1hr_dec-jan%d_DET.nc\"%(datapath,lev[nl],dtime[nt].year))\n #var1 = fvor['var138'].sel(time=dtime[nt],lev=float(lev[nl]*100),lat=ilat,lon=ilon,method=\"nearest\").load()\n var1.values = var1.values*1e5\n\n axe = ax[nl]\n axe.add_feature(cfeat.COASTLINE.with_scale('110m'),edgecolor='black', linewidth=0.8, zorder=1) \n axe.set_title(\"%s %dhPa (%d)\"%(dtime[nt].strftime('%Y-%m-%d-%H:00'), lev[nl], len(plat)),fontsize=title_font)\n\n shad = axe.contourf(ilon, ilat, var1, cnlevels,\n transform=ccrs.PlateCarree(),cmap=fcolors,extend='both',norm=norm)\n \n cont = axe.contour(ilon, ilat, var, np.arange(1000,15000,cnlvl2[nl]), \n transform=ccrs.PlateCarree(), colors='gray', linewidths=1.5)\n \n #pint = axe.plot(plon,plat,color='darkviolet', marker='o', markersize=12, transform=ccrs.PlateCarree())\n pint = axe.scatter(plon,plat,10.0**2,color='k', marker='o', transform=ccrs.PlateCarree())\n\n topo = axe.contour(ilon, ilat, phis, [1500,3000],\n transform=ccrs.PlateCarree(),colors='black',linewidths=1.2)\n\n axe.set_yticks(np.arange(lats,latn,lat_sp), crs=ccrs.PlateCarree())\n axe.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))\n axe.set_xticks(np.arange(lonl,lonr,lon_sp), crs=ccrs.PlateCarree())\n axe.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))\n\n position = fig.add_axes([0.85, bmlo+0.1, 0.015, 0.7]) #left, bottom, width, height\n cb = plt.colorbar(shad, cax=position ,orientation='vertical')#, shrink=.9)\n cb.set_label(label='T5~63 Relative Vort (1e5)', size=label_font) #, weight='bold'\n\n plt.tight_layout(rect=(0,bmlo,1,1))\n plt.savefig(figdir+\"filt_vor_%s.png\"%(dtime[nt].strftime('%Y%m%d%H')), bbox_inches='tight',pad_inches=0.01)\n\nif create_gif == True:\n figname = figdir+\"filt_vor_*.png\"\n fn_stream = subprocess.check_output(\"ls \"+figname, shell=True).decode('utf-8')\n fn_list = fn_stream.split()\n print(fn_list[0])\n print('filenumber : '+str(len(fn_list)))\n gif_name = figname.rsplit(\"_\",1)[0]+\".gif\" \n\n frames = []\n for itm in fn_list:\n frame = Image.open(itm)\n frames.append(frame)\n\n frames[0].save(gif_name, save_all=True, append_images=frames[1:],\\\n duration = 1000, loop=0, disposal=1)\n subprocess.run('rm -f %s'%(figname),shell=True)\n\n", "step-ids": [ 2, 3, 4, 5, 6 ] }
[ 2, 3, 4, 5, 6 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> os.chdir('c:\\users\\patty\\desktop') if not os.path.isdir('DailyBibleVerse'): os.makedirs('DailyBibleVerse') <|reserved_special_token_0|> res.raise_for_status() <|reserved_special_token_0|> while os.path.exists( 'c:\\users\\patty\\desktop\\DailyBibleVerse\\verse%s.jpg' % num): num += 1 <|reserved_special_token_0|> for chunk in res.iter_content(100000): File.write(chunk) File.close() <|reserved_special_token_1|> <|reserved_special_token_0|> os.chdir('c:\\users\\patty\\desktop') if not os.path.isdir('DailyBibleVerse'): os.makedirs('DailyBibleVerse') res = requests.get('http://www.verseoftheday.com/') res.raise_for_status() soup = bs4.BeautifulSoup(res.text, 'html.parser') BibleElem = soup.select('#tv-image-wrapper img') BibleUrl = BibleElem[0].get('src') num = 1 while os.path.exists( 'c:\\users\\patty\\desktop\\DailyBibleVerse\\verse%s.jpg' % num): num += 1 File = open('c:\\users\\patty\\desktop\\DailyBibleVerse\\' + 'verse' + str( num) + '.jpg', 'wb') res = requests.get(BibleUrl) for chunk in res.iter_content(100000): File.write(chunk) File.close() <|reserved_special_token_1|> import requests, os, bs4 os.chdir('c:\\users\\patty\\desktop') if not os.path.isdir('DailyBibleVerse'): os.makedirs('DailyBibleVerse') res = requests.get('http://www.verseoftheday.com/') res.raise_for_status() soup = bs4.BeautifulSoup(res.text, 'html.parser') BibleElem = soup.select('#tv-image-wrapper img') BibleUrl = BibleElem[0].get('src') num = 1 while os.path.exists( 'c:\\users\\patty\\desktop\\DailyBibleVerse\\verse%s.jpg' % num): num += 1 File = open('c:\\users\\patty\\desktop\\DailyBibleVerse\\' + 'verse' + str( num) + '.jpg', 'wb') res = requests.get(BibleUrl) for chunk in res.iter_content(100000): File.write(chunk) File.close() <|reserved_special_token_1|> # Downloads images from http://www.verseoftheday.com/ and saves it into a DailyBibleVerse folder import requests, os, bs4 os.chdir('c:\\users\\patty\\desktop') #modify location where you want to create the folder if not os.path.isdir('DailyBibleVerse'): os.makedirs('DailyBibleVerse') res = requests.get('http://www.verseoftheday.com/') #Downloading the page into the 'res' variable res.raise_for_status() soup= bs4.BeautifulSoup(res.text, "html.parser") #BibleCaption= soup.select('#featured .bilingual-left') #If you want verse use this #text= BibleCaption[0].getText() BibleElem = soup.select('#tv-image-wrapper img') #searching the image BibleUrl= BibleElem[0].get('src') num=1 while os.path.exists('c:\\users\\patty\\desktop\\DailyBibleVerse\\verse%s.jpg' %num): num += 1 File = open('c:\\users\\patty\\desktop\\DailyBibleVerse\\' +'verse'+ str(num)+ '.jpg', 'wb') #this is the name res = requests.get(BibleUrl) for chunk in res.iter_content(100000): File.write(chunk) File.close()
flexible
{ "blob_id": "a8fb8ac3c102e460d44e533b1e6b3f8780b1145d", "index": 4609, "step-1": "<mask token>\n", "step-2": "<mask token>\nos.chdir('c:\\\\users\\\\patty\\\\desktop')\nif not os.path.isdir('DailyBibleVerse'):\n os.makedirs('DailyBibleVerse')\n<mask token>\nres.raise_for_status()\n<mask token>\nwhile os.path.exists(\n 'c:\\\\users\\\\patty\\\\desktop\\\\DailyBibleVerse\\\\verse%s.jpg' % num):\n num += 1\n<mask token>\nfor chunk in res.iter_content(100000):\n File.write(chunk)\nFile.close()\n", "step-3": "<mask token>\nos.chdir('c:\\\\users\\\\patty\\\\desktop')\nif not os.path.isdir('DailyBibleVerse'):\n os.makedirs('DailyBibleVerse')\nres = requests.get('http://www.verseoftheday.com/')\nres.raise_for_status()\nsoup = bs4.BeautifulSoup(res.text, 'html.parser')\nBibleElem = soup.select('#tv-image-wrapper img')\nBibleUrl = BibleElem[0].get('src')\nnum = 1\nwhile os.path.exists(\n 'c:\\\\users\\\\patty\\\\desktop\\\\DailyBibleVerse\\\\verse%s.jpg' % num):\n num += 1\nFile = open('c:\\\\users\\\\patty\\\\desktop\\\\DailyBibleVerse\\\\' + 'verse' + str(\n num) + '.jpg', 'wb')\nres = requests.get(BibleUrl)\nfor chunk in res.iter_content(100000):\n File.write(chunk)\nFile.close()\n", "step-4": "import requests, os, bs4\nos.chdir('c:\\\\users\\\\patty\\\\desktop')\nif not os.path.isdir('DailyBibleVerse'):\n os.makedirs('DailyBibleVerse')\nres = requests.get('http://www.verseoftheday.com/')\nres.raise_for_status()\nsoup = bs4.BeautifulSoup(res.text, 'html.parser')\nBibleElem = soup.select('#tv-image-wrapper img')\nBibleUrl = BibleElem[0].get('src')\nnum = 1\nwhile os.path.exists(\n 'c:\\\\users\\\\patty\\\\desktop\\\\DailyBibleVerse\\\\verse%s.jpg' % num):\n num += 1\nFile = open('c:\\\\users\\\\patty\\\\desktop\\\\DailyBibleVerse\\\\' + 'verse' + str(\n num) + '.jpg', 'wb')\nres = requests.get(BibleUrl)\nfor chunk in res.iter_content(100000):\n File.write(chunk)\nFile.close()\n", "step-5": "# Downloads images from http://www.verseoftheday.com/ and saves it into a DailyBibleVerse folder \r\n\r\nimport requests, os, bs4\r\n\r\nos.chdir('c:\\\\users\\\\patty\\\\desktop') #modify location where you want to create the folder\r\nif not os.path.isdir('DailyBibleVerse'):\r\n os.makedirs('DailyBibleVerse')\r\n\r\nres = requests.get('http://www.verseoftheday.com/') #Downloading the page into the 'res' variable\r\nres.raise_for_status()\r\n\r\nsoup= bs4.BeautifulSoup(res.text, \"html.parser\")\r\n\r\n#BibleCaption= soup.select('#featured .bilingual-left') #If you want verse use this\r\n#text= BibleCaption[0].getText()\r\n\r\nBibleElem = soup.select('#tv-image-wrapper img') #searching the image\r\nBibleUrl= BibleElem[0].get('src')\r\n\r\nnum=1\r\nwhile os.path.exists('c:\\\\users\\\\patty\\\\desktop\\\\DailyBibleVerse\\\\verse%s.jpg' %num):\r\n num += 1\r\n \r\nFile = open('c:\\\\users\\\\patty\\\\desktop\\\\DailyBibleVerse\\\\' +'verse'+ str(num)+ '.jpg', 'wb') #this is the name\r\n\r\nres = requests.get(BibleUrl)\r\n\r\nfor chunk in res.iter_content(100000):\r\n File.write(chunk)\r\nFile.close()\r\n\r\n\r\n\r\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr1/local/bin/python import os, sys, re, shutil, random from tempfile import * # program location prog_dir = '/home/jpei/test_promals3d_package/bar/promals_package/bin/' # program names promals_web = prog_dir + "progress_for_web.py" csv_cutoff_g = 5 alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' def run_promals(): csv_cutoff = csv_cutoff_g # check and parse the command line cmd_line = sys.argv if len(cmd_line) <= 1: promals_help() sys.exit(1) elif not os.path.isfile(cmd_line[1]): print >> sys.stderr, "Error reading input file:", cmd_line[1] promals_help() sys.exit(1) else: randomstring = "" infile = os.path.abspath(cmd_line[1]) infiledir = os.path.split(infile)[0] for x in random.sample(alphabet,40): randomstring+=x ranfile = "%s/%s" %(infiledir, randomstring) try: fp = open(ranfile, "w") except: print >> sys.stderr, "Error:" print >> sys.stderr, " The directory containing your input file is not writable:", infiledir print >> sys.stderr, " Input file should be in a writable directory" sys.exit(1) fp.close() os.system("rm -f %s" %ranfile) cmd_line1 = [] outputfile = "" blast_dir = "" resnum = 1 caa_freq = 0.8 for i in range(len(cmd_line)): arg = cmd_line[i] if i == 0: arg = prog_dir + 'promals_c' # change inputfile name to full path name if i == 1: arg = os.path.abspath(arg) inputfile = arg # change outfile name to full path name if arg == '-outfile': if i+1 < len(cmd_line): cmd_line[i+1] = os.path.abspath(cmd_line[i+1]) outputfile = cmd_line[i+1] # change blast_dir name to full path name if arg == '-blast_dir': if i+1 < len(cmd_line): cmd_line[i+1] = os.path.abspath(cmd_line[i+1]) #if arg == '-ssw': arg = '-ss_weight' #if arg == '-aaw': arg = '-score_weight' #if arg == '-max_homologs': arg = '-max_num_sequences' #if arg == '-iter_num': arg = '-iter_number' if arg == '-csv_index': if i+1 < len(cmd_line): csv_cutoff = int(cmd_line[i+1]) if (csv_cutoff<0) or (csv_cutoff>9): csv_cutoff = 5 if arg == "-resnum": resnum = int(cmd_line[i+1]) if arg == "-caa_freq": caa_freq = float(sys.argv[i+1]) cmd_line1.append(arg) if not outputfile: if re.search("\.fa$", inputfile): outputfile = re.sub("\.fa$", "", inputfile) + ".promals.aln" else: outputfile = inputfile + ".promals.aln" if not blast_dir: blast_dir = "%s_blast" %inputfile promals_c = ' '.join(cmd_line1) promals_c = re.sub("\s+-resnum\s+\S+", " ", promals_c) promals_c = re.sub("\s+-caa_freq\s+\S+", " ", promals_c) promals_c = re.sub("\s+-csv_index\s+\S+", " ", promals_c) if "-blast_dir" not in promals_c: promals_c += " -blast_dir %s " %blast_dir outputlogfile = inputfile+".prmls.oUTpUT" promals_c = promals_c + " > " + outputlogfile print "promals command:" print promals_c print sys.stdout.flush() # run programs in a temporary directory to avoid .ncbirc problem cwd = os.getcwd() tmpdir = mkdtemp() os.chdir(tmpdir) os.system("cp %s.ncbirc ." %prog_dir) s1 = os.system(promals_c) if s1 == 0: print "output alignment file is:", outputfile print "blast intermediate files are in:", blast_dir print else: print "Error running promals - check log file for details:", outputlogfile print print "html file command:" print "python %s %s %s -cutoff %d -resnum %d -caa_freq %f" %(promals_web, outputfile, outputlogfile, csv_cutoff, resnum, caa_freq) print sys.stdout.flush() s2 = os.system("python %s %s %s -cutoff %d -resnum %d -caa_freq %f 2>/dev/null" %(promals_web, outputfile, outputlogfile, csv_cutoff, resnum, caa_freq) ) if s2 == 0: print "output html alignment file is:", outputfile + ".html" print else: print "Error generating html file" print os.chdir(cwd) shutil.rmtree(tmpdir) def promals_help(): help_content = ''' promals with 3D information command: promals input_file [options] > input_file.log python promals input_file [options] > input_file.log input: input_file needs to be FASTA format output: Two alignment files will be generated. One is in CLUSTAL format alignment (file name can be specified by option -outfile). The other file is an html file of colored alignment. Options: For alignment strategies: -id_thr [0, 1] Identity threshold that determined the partition of fast and slow alignment processes. If two groups of sequences has average identity above this threshold, align them in a fast way. Otherwise, use slower but more accurate way (by profile-profile alignment with predicted secondary structures and available 3D constraints). Default: 0.6 (corresponding to 60% identity) For using 3D information: -dali [0 or 1] Use DaliLite structural alignment (1) or not use fast alignment (0) ("DaliLite" executable needs to be present in bin/ directory). Default: 0 (it is relatively slow to run DaliLite) -fast [0 or 1] Use fast structural alignment (1) or not use fast alignment (0) ("fast" executable needs to be present in bin/ directory). Default: 1 -tmalign [0 or 1] Use TMalign structural alignment (1) or not use fast TMalign alignment (0) ("TMalign" executable needs to be present in bin/ directory). Default: 1 -struct_weight [0, inf[ Weight of structural constraints relative to sequence constraints. Default: 1.5 For profile scoring: -ss_weight [0,inf[ Weight of predicted secondary structure in profile-profile scoring. Default: 0.2 -score_weight [0,inf[ Weight of amino acids in profile-profile scoring. Default: 0.8 For running PSI-BLAST to get sequence profile: -iter_number <int> Number of PSI-BLAST iterations for profile generation. Default: 3 -evalue [0, inf[ PSI-BLAST evalue cutoff for inclusion. Default: 0.001 -low_id_thr [0,1] Remove PSI-BLAST hits with identity to the query less than this value. Default: 0.2 -blast_dir <file> Directory of running PSI-BLAST and store other intermediate results. -clean_blast_before [0 or 1] Remove any file in the directory that stores intermediate results (specified by -blast_dir option) before running PSI-BLAST. Default: 0. -clean_blast_after [0 or 1] Remove any file in the PSI-BLAST directory after running PSI-BLAST. Default: 0 For output: -outfile <file> The name of output alignment file. -blocksize <int> Number of letters in clustal-format alignment blocks. Default: 70 -resnum [0 or 1] In colored html alignment, show residue numbers for alignment blocks. Default: 1 -caa_freq [0, 1] In colored html alignment, show amino acid consensus symbol if the fraction of a class of residues is higher than this threshold. Default: 0.8 ''' print help_content if __name__ == '__main__': run_promals()
normal
{ "blob_id": "b9386cf8c17b28fd1fea6e587ca4401de247cbea", "index": 7779, "step-1": "#!/usr1/local/bin/python\n\nimport os, sys, re, shutil, random\nfrom tempfile import *\n\n\n# program location\nprog_dir = '/home/jpei/test_promals3d_package/bar/promals_package/bin/'\n\n# program names\npromals_web = prog_dir + \"progress_for_web.py\"\n\ncsv_cutoff_g = 5\n\nalphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\ndef run_promals():\n\n\tcsv_cutoff = csv_cutoff_g\n\t# check and parse the command line\n\tcmd_line = sys.argv\n\tif len(cmd_line) <= 1: \n\t\tpromals_help()\n\t\tsys.exit(1)\n\telif not os.path.isfile(cmd_line[1]):\n print >> sys.stderr, \"Error reading input file:\", cmd_line[1]\n\t\tpromals_help()\n\t\tsys.exit(1)\n else:\n randomstring = \"\"\n infile = os.path.abspath(cmd_line[1])\n infiledir = os.path.split(infile)[0]\n for x in random.sample(alphabet,40):\n randomstring+=x\n ranfile = \"%s/%s\" %(infiledir, randomstring)\n try:\n fp = open(ranfile, \"w\")\n except:\n print >> sys.stderr, \"Error:\"\n print >> sys.stderr, \" The directory containing your input file is not writable:\", infiledir\n print >> sys.stderr, \" Input file should be in a writable directory\"\n sys.exit(1)\n fp.close()\n os.system(\"rm -f %s\" %ranfile)\n\n\tcmd_line1 = []\n\toutputfile = \"\"\n blast_dir = \"\"\n resnum = 1\n caa_freq = 0.8\n\tfor i in range(len(cmd_line)):\n\t\targ = cmd_line[i]\n\t\tif i == 0: arg = prog_dir + 'promals_c'\n # change inputfile name to full path name\n\t\tif i == 1: \n arg = os.path.abspath(arg)\n inputfile = arg\n # change outfile name to full path name\n\t\tif arg == '-outfile':\n\t\t\tif i+1 < len(cmd_line): \n cmd_line[i+1] = os.path.abspath(cmd_line[i+1])\n outputfile = cmd_line[i+1]\n # change blast_dir name to full path name\n\t\tif arg == '-blast_dir':\n\t\t\tif i+1 < len(cmd_line): \n cmd_line[i+1] = os.path.abspath(cmd_line[i+1])\n\t\t#if arg == '-ssw': arg = '-ss_weight'\n\t\t#if arg == '-aaw': arg = '-score_weight'\n\t\t#if arg == '-max_homologs': arg = '-max_num_sequences'\n\t\t#if arg == '-iter_num': arg = '-iter_number'\n\t\tif arg == '-csv_index': \n\t\t\tif i+1 < len(cmd_line):\n\t\t\t\tcsv_cutoff = int(cmd_line[i+1])\n\t\t\t\tif (csv_cutoff<0) or (csv_cutoff>9):\n\t\t\t\t\tcsv_cutoff = 5\n if arg == \"-resnum\":\n resnum = int(cmd_line[i+1])\n if arg == \"-caa_freq\":\n caa_freq = float(sys.argv[i+1])\n\t\tcmd_line1.append(arg)\n\t\n\tif not outputfile:\n\t\tif re.search(\"\\.fa$\", inputfile):\n\t\t\toutputfile = re.sub(\"\\.fa$\", \"\", inputfile) + \".promals.aln\"\n else: outputfile = inputfile + \".promals.aln\"\n\tif not blast_dir:\n blast_dir = \"%s_blast\" %inputfile\n\t\n\tpromals_c = ' '.join(cmd_line1)\n promals_c = re.sub(\"\\s+-resnum\\s+\\S+\", \" \", promals_c)\n promals_c = re.sub(\"\\s+-caa_freq\\s+\\S+\", \" \", promals_c)\n promals_c = re.sub(\"\\s+-csv_index\\s+\\S+\", \" \", promals_c)\n if \"-blast_dir\" not in promals_c:\n promals_c += \" -blast_dir %s \" %blast_dir\n\toutputlogfile = inputfile+\".prmls.oUTpUT\"\n\tpromals_c = promals_c + \" > \" + outputlogfile\n print \"promals command:\"\n\tprint promals_c\n print\n sys.stdout.flush()\n\t\n\t# run programs in a temporary directory to avoid .ncbirc problem\n cwd = os.getcwd()\n tmpdir = mkdtemp()\n os.chdir(tmpdir)\n os.system(\"cp %s.ncbirc .\" %prog_dir)\n\ts1 = os.system(promals_c)\n if s1 == 0:\n print \"output alignment file is:\", outputfile\n print \"blast intermediate files are in:\", blast_dir\n print\n else:\n print \"Error running promals - check log file for details:\", outputlogfile\n print\n print \"html file command:\"\n\tprint \"python %s %s %s -cutoff %d -resnum %d -caa_freq %f\" %(promals_web, outputfile, outputlogfile, csv_cutoff, resnum, caa_freq) \n print\n sys.stdout.flush()\n\ts2 = os.system(\"python %s %s %s -cutoff %d -resnum %d -caa_freq %f 2>/dev/null\" %(promals_web, outputfile, outputlogfile, csv_cutoff, resnum, caa_freq) )\n if s2 == 0:\n print \"output html alignment file is:\", outputfile + \".html\"\n print\n else:\n print \"Error generating html file\"\n print\n\n os.chdir(cwd)\n shutil.rmtree(tmpdir)\n\ndef promals_help():\n\n help_content = '''\n\npromals with 3D information\n \n command: \n promals input_file [options] > input_file.log\n python promals input_file [options] > input_file.log\n\n input:\n input_file needs to be FASTA format\n\n output: \n Two alignment files will be generated. One is in CLUSTAL \n format alignment (file name can be specified by option -outfile). \n The other file is an html file of colored alignment.\n \n Options:\n\n For alignment strategies:\n -id_thr [0, 1] Identity threshold that determined the partition of\n fast and slow alignment processes. If two groups of\n sequences has average identity above this threshold,\n align them in a fast way. Otherwise, use slower but\n more accurate way (by profile-profile alignment with\n predicted secondary structures and available 3D \n constraints). Default: 0.6 (corresponding to 60% identity)\n\n For using 3D information:\n -dali [0 or 1] Use DaliLite structural alignment (1) or not use \n fast alignment (0) (\"DaliLite\" executable needs to \n be present in bin/ directory). Default: 0 (it is \n relatively slow to run DaliLite)\n -fast [0 or 1] Use fast structural alignment (1) or not use fast \n alignment (0) (\"fast\" executable needs to be present \n in bin/ directory). Default: 1\n -tmalign [0 or 1] Use TMalign structural alignment (1) or not use fast \n TMalign alignment (0) (\"TMalign\" executable needs to \n be present in bin/ directory). Default: 1\n -struct_weight [0, inf[ Weight of structural constraints relative to sequence \n constraints. Default: 1.5\n\n For profile scoring:\n -ss_weight [0,inf[ Weight of predicted secondary structure in profile-profile \n scoring. Default: 0.2\n -score_weight [0,inf[ Weight of amino acids in profile-profile scoring. \n Default: 0.8\n\n For running PSI-BLAST to get sequence profile:\n -iter_number <int> Number of PSI-BLAST iterations for profile generation. \n Default: 3\n -evalue [0, inf[ PSI-BLAST evalue cutoff for inclusion. Default: 0.001\n -low_id_thr [0,1] Remove PSI-BLAST hits with identity to the query less than \n this value. Default: 0.2\n -blast_dir <file> Directory of running PSI-BLAST and store other intermediate \n results.\n -clean_blast_before [0 or 1] Remove any file in the directory that stores \n intermediate results (specified by -blast_dir option) before\n running PSI-BLAST. Default: 0. \n -clean_blast_after [0 or 1] Remove any file in the PSI-BLAST directory after running\n PSI-BLAST. Default: 0\n\n For output:\n -outfile <file> The name of output alignment file.\n -blocksize <int> Number of letters in clustal-format alignment blocks. \n Default: 70\n -resnum [0 or 1] In colored html alignment, show residue numbers for \n alignment blocks. Default: 1\n -caa_freq [0, 1] In colored html alignment, show amino acid consensus\n symbol if the fraction of a class of residues is higher\n than this threshold. Default: 0.8\n\n '''\n\n print help_content\n\n\nif __name__ == '__main__':\n\n\trun_promals()\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> for face in detected_faces: x, y, w, h = face cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2) cv.imshow('orig_img', original_image_5) cv.waitKey(0) cv.destroyAllWindows() <|reserved_special_token_1|> <|reserved_special_token_0|> original_image_5 = cv.imread('mans_face.JPG') grayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY) face_cascade = cv.CascadeClassifier('haar_cascade_front.xml') detected_faces = face_cascade.detectMultiScale(grayscale_image) for face in detected_faces: x, y, w, h = face cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2) cv.imshow('orig_img', original_image_5) cv.waitKey(0) cv.destroyAllWindows() <|reserved_special_token_1|> import cv2 as cv original_image_5 = cv.imread('mans_face.JPG') grayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY) face_cascade = cv.CascadeClassifier('haar_cascade_front.xml') detected_faces = face_cascade.detectMultiScale(grayscale_image) for face in detected_faces: x, y, w, h = face cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2) cv.imshow('orig_img', original_image_5) cv.waitKey(0) cv.destroyAllWindows() <|reserved_special_token_1|> import cv2 as cv #! THESE ARE IMAGES THAT AREN'T DOWNSIZED #original_image_1 = cv.imread("hamburger_face.JPG") #original_image_2 = cv.imread("hammock_reading.JPG") #original_image_3 = cv.imread("sofa_face.JPG") #original_image_4 = cv.imread("frisbee_team.JPG") original_image_5 = cv.imread("mans_face.JPG") # TO PRINT OUT ARRAY AND DIMENSIONS # print(original_image) # print(original_image.shape) #grayscale_image = cv.cvtColor(original_image_1, cv.COLOR_BGR2GRAY) #grayscale_image = cv.cvtColor(original_image_2, cv.COLOR_BGR2GRAY) #grayscale_image = cv.cvtColor(original_image_3, cv.COLOR_BGR2GRAY) #grayscale_image = cv.cvtColor(original_image_4, cv.COLOR_BGR2GRAY) grayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY) # TO PRINT OUT GRAYSCALE IMG #cv.imshow("gray_img", grayscale_image) #cv.waitKey(0) #cv.destroyAllWindows() face_cascade = cv.CascadeClassifier('haar_cascade_front.xml') detected_faces = face_cascade.detectMultiScale(grayscale_image) # PRINTS COORDINATES OF FACES #print(detected_faces) for face in detected_faces: x , y , w , h = face cv.rectangle(original_image_5, (x, y), (x + w , y + h ), (0 , 255 , 0), 2) cv.imshow("orig_img", original_image_5) cv.waitKey(0) cv.destroyAllWindows()
flexible
{ "blob_id": "d0bd08bea65878f5fccfc4affecdf53cc36179df", "index": 6633, "step-1": "<mask token>\n", "step-2": "<mask token>\nfor face in detected_faces:\n x, y, w, h = face\n cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2)\ncv.imshow('orig_img', original_image_5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n", "step-3": "<mask token>\noriginal_image_5 = cv.imread('mans_face.JPG')\ngrayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)\nface_cascade = cv.CascadeClassifier('haar_cascade_front.xml')\ndetected_faces = face_cascade.detectMultiScale(grayscale_image)\nfor face in detected_faces:\n x, y, w, h = face\n cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2)\ncv.imshow('orig_img', original_image_5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n", "step-4": "import cv2 as cv\noriginal_image_5 = cv.imread('mans_face.JPG')\ngrayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)\nface_cascade = cv.CascadeClassifier('haar_cascade_front.xml')\ndetected_faces = face_cascade.detectMultiScale(grayscale_image)\nfor face in detected_faces:\n x, y, w, h = face\n cv.rectangle(original_image_5, (x, y), (x + w, y + h), (0, 255, 0), 2)\ncv.imshow('orig_img', original_image_5)\ncv.waitKey(0)\ncv.destroyAllWindows()\n", "step-5": "import cv2 as cv\r\n\r\n#! THESE ARE IMAGES THAT AREN'T DOWNSIZED\r\n#original_image_1 = cv.imread(\"hamburger_face.JPG\")\r\n#original_image_2 = cv.imread(\"hammock_reading.JPG\")\r\n#original_image_3 = cv.imread(\"sofa_face.JPG\")\r\n#original_image_4 = cv.imread(\"frisbee_team.JPG\")\r\noriginal_image_5 = cv.imread(\"mans_face.JPG\")\r\n\r\n# TO PRINT OUT ARRAY AND DIMENSIONS\r\n# print(original_image)\r\n# print(original_image.shape)\r\n\r\n#grayscale_image = cv.cvtColor(original_image_1, cv.COLOR_BGR2GRAY)\r\n#grayscale_image = cv.cvtColor(original_image_2, cv.COLOR_BGR2GRAY)\r\n#grayscale_image = cv.cvtColor(original_image_3, cv.COLOR_BGR2GRAY)\r\n#grayscale_image = cv.cvtColor(original_image_4, cv.COLOR_BGR2GRAY)\r\ngrayscale_image = cv.cvtColor(original_image_5, cv.COLOR_BGR2GRAY)\r\n\r\n# TO PRINT OUT GRAYSCALE IMG\r\n#cv.imshow(\"gray_img\", grayscale_image)\r\n#cv.waitKey(0)\r\n#cv.destroyAllWindows()\r\n\r\nface_cascade = cv.CascadeClassifier('haar_cascade_front.xml')\r\ndetected_faces = face_cascade.detectMultiScale(grayscale_image)\r\n\r\n# PRINTS COORDINATES OF FACES\r\n#print(detected_faces)\r\n\r\nfor face in detected_faces:\r\n x , y , w , h = face\r\n cv.rectangle(original_image_5, (x, y), (x + w , y + h ), (0 , 255 , 0), 2)\r\n\r\ncv.imshow(\"orig_img\", original_image_5)\r\ncv.waitKey(0)\r\ncv.destroyAllWindows()", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#Use bisection search to determine square root def square_calculator(user_input): """ accepts input from a user to determine the square root returns the square root of the user input """ precision = .000000000001 counter = 0 low = 0 high = user_input guess = (low + high) / 2.0 while abs(guess**2 - user_input) >= precision: if guess**2 > user_input: high = guess if guess**2 < user_input: low = guess guess = (low + high) / 2.0 counter+= 1 return(guess, counter) while True: user_input = int(input("Enter a number: ")) answer, counter = square_calculator(user_input) print("The square root of", user_input, "is", round(answer,6)) print("It took", counter, "guesses to figure it out.")
normal
{ "blob_id": "2bc20f3410d068e0592c8a45e3c13c0559059f24", "index": 4498, "step-1": "<mask token>\n", "step-2": "def square_calculator(user_input):\n \"\"\"\n accepts input from a user to determine the square root\n returns the square root of the user input\n \"\"\"\n precision = 1e-12\n counter = 0\n low = 0\n high = user_input\n guess = (low + high) / 2.0\n while abs(guess ** 2 - user_input) >= precision:\n if guess ** 2 > user_input:\n high = guess\n if guess ** 2 < user_input:\n low = guess\n guess = (low + high) / 2.0\n counter += 1\n return guess, counter\n\n\n<mask token>\n", "step-3": "def square_calculator(user_input):\n \"\"\"\n accepts input from a user to determine the square root\n returns the square root of the user input\n \"\"\"\n precision = 1e-12\n counter = 0\n low = 0\n high = user_input\n guess = (low + high) / 2.0\n while abs(guess ** 2 - user_input) >= precision:\n if guess ** 2 > user_input:\n high = guess\n if guess ** 2 < user_input:\n low = guess\n guess = (low + high) / 2.0\n counter += 1\n return guess, counter\n\n\nwhile True:\n user_input = int(input('Enter a number: '))\n answer, counter = square_calculator(user_input)\n print('The square root of', user_input, 'is', round(answer, 6))\n print('It took', counter, 'guesses to figure it out.')\n", "step-4": "#Use bisection search to determine square root\ndef square_calculator(user_input):\n \"\"\"\n accepts input from a user to determine the square root\n returns the square root of the user input\n \"\"\"\n precision = .000000000001\n counter = 0\n low = 0\n high = user_input\n guess = (low + high) / 2.0\n\n while abs(guess**2 - user_input) >= precision:\n if guess**2 > user_input:\n high = guess\n if guess**2 < user_input:\n low = guess\n guess = (low + high) / 2.0\n counter+= 1\n\n return(guess, counter)\n\n\nwhile True:\n user_input = int(input(\"Enter a number: \"))\n answer, counter = square_calculator(user_input)\n\n print(\"The square root of\", user_input, \"is\", round(answer,6))\n print(\"It took\", counter, \"guesses to figure it out.\")", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def link_bits(num: str=None, bit_i: str='0', bit_j: str='0', write: bool=True ) ->int: if num is None: num, bit_i, bit_j = read_input() num = int(num, 2) num_len = num.bit_length() mask = 2 ** num_len - 1 first_i = num >> num_len - int(bit_i) << int(bit_j) last_j = (num << num_len - int(bit_j) & mask) >> num_len - int(bit_j) result = first_i | last_j if write: write_output(f'{result}') return result <|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def link_bits(num: str=None, bit_i: str='0', bit_j: str='0', write: bool=True ) ->int: if num is None: num, bit_i, bit_j = read_input() num = int(num, 2) num_len = num.bit_length() mask = 2 ** num_len - 1 first_i = num >> num_len - int(bit_i) << int(bit_j) last_j = (num << num_len - int(bit_j) & mask) >> num_len - int(bit_j) result = first_i | last_j if write: write_output(f'{result}') return result def get_bits_between(num: str=None, bit_i: str='0', bit_j: str='0', write: bool=True) ->int: if num is None: num, bit_i, bit_j = read_input() num = int(num, 2) mask = 2 ** (num.bit_length() - int(bit_j)) - 1 result = (num >> int(bit_j) << int(bit_i) & mask) >> int(bit_i) if write: write_output(f'{result}') return result <|reserved_special_token_1|> from ..lib import read_input, write_output def link_bits(num: str=None, bit_i: str='0', bit_j: str='0', write: bool=True ) ->int: if num is None: num, bit_i, bit_j = read_input() num = int(num, 2) num_len = num.bit_length() mask = 2 ** num_len - 1 first_i = num >> num_len - int(bit_i) << int(bit_j) last_j = (num << num_len - int(bit_j) & mask) >> num_len - int(bit_j) result = first_i | last_j if write: write_output(f'{result}') return result def get_bits_between(num: str=None, bit_i: str='0', bit_j: str='0', write: bool=True) ->int: if num is None: num, bit_i, bit_j = read_input() num = int(num, 2) mask = 2 ** (num.bit_length() - int(bit_j)) - 1 result = (num >> int(bit_j) << int(bit_i) & mask) >> int(bit_i) if write: write_output(f'{result}') return result <|reserved_special_token_1|> from ..lib import read_input, write_output def link_bits(num: str = None, bit_i: str = '0', bit_j: str = '0', write: bool = True) -> int: if num is None: num, bit_i, bit_j = read_input() num = int(num, 2) num_len = num.bit_length() mask = 2 ** num_len - 1 first_i = (num >> (num_len - int(bit_i))) << int(bit_j) last_j = ((num << (num_len - int(bit_j))) & mask) >> (num_len - int(bit_j)) result = first_i | last_j if write: write_output(f'{result}') return result def get_bits_between(num: str = None, bit_i: str = '0', bit_j: str = '0', write: bool = True) -> int: if num is None: num, bit_i, bit_j = read_input() num = int(num, 2) mask = 2 ** (num.bit_length() - int(bit_j)) - 1 result = ((((num >> int(bit_j)) << int(bit_i)) & mask) >> int(bit_i)) if write: write_output(f'{result}') return result
flexible
{ "blob_id": "113572682ca83408b7c22e0e178f29945d741142", "index": 6672, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef link_bits(num: str=None, bit_i: str='0', bit_j: str='0', write: bool=True\n ) ->int:\n if num is None:\n num, bit_i, bit_j = read_input()\n num = int(num, 2)\n num_len = num.bit_length()\n mask = 2 ** num_len - 1\n first_i = num >> num_len - int(bit_i) << int(bit_j)\n last_j = (num << num_len - int(bit_j) & mask) >> num_len - int(bit_j)\n result = first_i | last_j\n if write:\n write_output(f'{result}')\n return result\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef link_bits(num: str=None, bit_i: str='0', bit_j: str='0', write: bool=True\n ) ->int:\n if num is None:\n num, bit_i, bit_j = read_input()\n num = int(num, 2)\n num_len = num.bit_length()\n mask = 2 ** num_len - 1\n first_i = num >> num_len - int(bit_i) << int(bit_j)\n last_j = (num << num_len - int(bit_j) & mask) >> num_len - int(bit_j)\n result = first_i | last_j\n if write:\n write_output(f'{result}')\n return result\n\n\ndef get_bits_between(num: str=None, bit_i: str='0', bit_j: str='0', write:\n bool=True) ->int:\n if num is None:\n num, bit_i, bit_j = read_input()\n num = int(num, 2)\n mask = 2 ** (num.bit_length() - int(bit_j)) - 1\n result = (num >> int(bit_j) << int(bit_i) & mask) >> int(bit_i)\n if write:\n write_output(f'{result}')\n return result\n", "step-4": "from ..lib import read_input, write_output\n\n\ndef link_bits(num: str=None, bit_i: str='0', bit_j: str='0', write: bool=True\n ) ->int:\n if num is None:\n num, bit_i, bit_j = read_input()\n num = int(num, 2)\n num_len = num.bit_length()\n mask = 2 ** num_len - 1\n first_i = num >> num_len - int(bit_i) << int(bit_j)\n last_j = (num << num_len - int(bit_j) & mask) >> num_len - int(bit_j)\n result = first_i | last_j\n if write:\n write_output(f'{result}')\n return result\n\n\ndef get_bits_between(num: str=None, bit_i: str='0', bit_j: str='0', write:\n bool=True) ->int:\n if num is None:\n num, bit_i, bit_j = read_input()\n num = int(num, 2)\n mask = 2 ** (num.bit_length() - int(bit_j)) - 1\n result = (num >> int(bit_j) << int(bit_i) & mask) >> int(bit_i)\n if write:\n write_output(f'{result}')\n return result\n", "step-5": "from ..lib import read_input, write_output\n\n\ndef link_bits(num: str = None, bit_i: str = '0', bit_j: str = '0', write: bool = True) -> int:\n if num is None:\n num, bit_i, bit_j = read_input()\n num = int(num, 2)\n num_len = num.bit_length()\n mask = 2 ** num_len - 1\n first_i = (num >> (num_len - int(bit_i))) << int(bit_j)\n last_j = ((num << (num_len - int(bit_j))) & mask) >> (num_len - int(bit_j))\n result = first_i | last_j\n if write:\n write_output(f'{result}')\n return result\n\n\ndef get_bits_between(num: str = None, bit_i: str = '0', bit_j: str = '0', write: bool = True) -> int:\n if num is None:\n num, bit_i, bit_j = read_input()\n num = int(num, 2)\n mask = 2 ** (num.bit_length() - int(bit_j)) - 1\n result = ((((num >> int(bit_j)) << int(bit_i)) & mask) >> int(bit_i))\n if write:\n write_output(f'{result}')\n return result\n", "step-ids": [ 0, 1, 2, 3, 4 ] }
[ 0, 1, 2, 3, 4 ]
#!/usr/bin/python import xml.dom.minidom import os import matplotlib.pyplot as plt import cPickle as p import numpy as np def modifyXML(name,numCar): DOMTree = xml.dom.minidom.parse(name) objects=DOMTree.getElementsByTagName('object') for object in objects: if object.getElementsByTagName('name')[0].childNodes[0].nodeValue =='Car': xmin=float(object.getElementsByTagName('xmin')[0].childNodes[0].nodeValue) ymin=float(object.getElementsByTagName('ymin')[0].childNodes[0].nodeValue) xmax=float(object.getElementsByTagName('xmax')[0].childNodes[0].nodeValue) ymax=float(object.getElementsByTagName('ymax')[0].childNodes[0].nodeValue) numCar.append((ymax-ymin)*(xmax-xmin)) dir=os.getcwd()+'/xml' file=os.listdir(dir) numCar=[] for filename in file: #print filename if filename[0]!='.': modifyXML('xml/'+filename,numCar) num_bins=40 size=len(numCar) print 'size of numCar = %d'%size f=open('boxArea.pkl','w') p.dump(numCar,f) f.close() i=0 for x in numCar: if x>40000: i=i+1 print 'num of car bigger than %d is %d'%(40000,i) ''' plt.hist(numCar, num_bins,facecolor='blue', alpha=0.5) plt.show() '''
normal
{ "blob_id": "1c13a9ca3617dc6f1a1f1aa8249cce37062a449b", "index": 8243, "step-1": "#!/usr/bin/python\nimport xml.dom.minidom\nimport os\nimport matplotlib.pyplot as plt\nimport cPickle as p\nimport numpy as np\n\ndef modifyXML(name,numCar):\n\tDOMTree = xml.dom.minidom.parse(name)\n\tobjects=DOMTree.getElementsByTagName('object')\n\tfor object in objects:\n\t\tif object.getElementsByTagName('name')[0].childNodes[0].nodeValue =='Car':\n\t\t\txmin=float(object.getElementsByTagName('xmin')[0].childNodes[0].nodeValue)\n\t\t\tymin=float(object.getElementsByTagName('ymin')[0].childNodes[0].nodeValue)\n\t\t\txmax=float(object.getElementsByTagName('xmax')[0].childNodes[0].nodeValue)\n\t\t\tymax=float(object.getElementsByTagName('ymax')[0].childNodes[0].nodeValue)\n numCar.append((ymax-ymin)*(xmax-xmin))\n\ndir=os.getcwd()+'/xml'\nfile=os.listdir(dir)\nnumCar=[]\nfor filename in file:\n\t#print filename\n\tif filename[0]!='.':\n\t\tmodifyXML('xml/'+filename,numCar)\nnum_bins=40\nsize=len(numCar)\nprint 'size of numCar = %d'%size\n\nf=open('boxArea.pkl','w')\np.dump(numCar,f)\nf.close()\n\ni=0\nfor x in numCar:\n if x>40000:\n i=i+1\nprint 'num of car bigger than %d is %d'%(40000,i)\n'''\nplt.hist(numCar, num_bins,facecolor='blue', alpha=0.5)\nplt.show()\n'''\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
''' Generate the output images and videos, including rendering of the pipeline ''' import os import matplotlib.image as mpimg import cv2 from moviepy.editor import VideoFileClip from networkx.drawing.nx_agraph import to_agraph import lanespipeline import lanefinder from compgraph import CompGraph, CompGraphRunner COMP_GRAPH = lanespipeline.computational_graph DEFAULT_PARAMS = lanespipeline.parameters def create_dir(directory): if not os.path.exists(directory): os.makedirs(directory) def get_full_paths_to_files(files_dir, filenames): return [os.path.join(files_dir, f) for f in filenames] def process_images(im_filenames, cg, params): finder, find_and_draw_lanes = lanefinder.create_objects(cg, params) images = (mpimg.imread(fname) for fname in im_filenames) return (find_and_draw_lanes(im) for im in images) def save_images(images, destination_filenames): for fname, im in zip(destination_filenames, images): mpimg.imsave(fname, im) def process_and_save_video(video_fname_src, video_fname_dst, cg, params): finder, find_and_draw_lanes = lanefinder.create_objects(cg, params) video_src = VideoFileClip(video_fname_src) video_dst = video_src.fl_image(find_and_draw_lanes) video_dst.write_videofile(video_fname_dst, audio=False) def visualize_pipeline(fname_dst, cg=COMP_GRAPH, params=DEFAULT_PARAMS): runner = CompGraphRunner(cg, frozen_tokens=params) ag = to_agraph(runner.token_manager.to_networkx()) ag.layout('dot') ag.draw(fname_dst) if __name__ == '__main__': ''' INITIALIZATION ''' im_dir_src = 'test_images' im_dir_dst = 'test_images_output' create_dir(im_dir_dst) im_files_src = get_full_paths_to_files(im_dir_src, os.listdir(im_dir_src)) im_files_dst = get_full_paths_to_files(im_dir_dst, os.listdir(im_dir_src)) video_dir_src = 'test_videos' video_dir_dst = 'test_videos_output' create_dir(video_dir_dst) video_files = ('solidWhiteRight.mp4', 'solidYellowLeft.mp4') video_files_src = get_full_paths_to_files(video_dir_src, video_files) video_files_dst = get_full_paths_to_files(video_dir_dst, video_files) params_1 = DEFAULT_PARAMS.copy() params_1['canny_lo'] = 50 params_1['canny_hi'] = 150 ''' MEDIA GENERATION ''' visualize_pipeline('pipeline.png') images_dst = process_images(im_files_src, COMP_GRAPH, DEFAULT_PARAMS) save_images(images_dst, im_files_dst) process_and_save_video(video_files_src[0], video_files_dst[0], COMP_GRAPH, DEFAULT_PARAMS) process_and_save_video(video_files_src[1], video_files_dst[1], COMP_GRAPH, params_1)
normal
{ "blob_id": "456d79a69c170a59af742648f16e0171cd5a2412", "index": 1412, "step-1": "<mask token>\n\n\ndef create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef get_full_paths_to_files(files_dir, filenames):\n return [os.path.join(files_dir, f) for f in filenames]\n\n\ndef process_images(im_filenames, cg, params):\n finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)\n images = (mpimg.imread(fname) for fname in im_filenames)\n return (find_and_draw_lanes(im) for im in images)\n\n\ndef save_images(images, destination_filenames):\n for fname, im in zip(destination_filenames, images):\n mpimg.imsave(fname, im)\n\n\n<mask token>\n\n\ndef visualize_pipeline(fname_dst, cg=COMP_GRAPH, params=DEFAULT_PARAMS):\n runner = CompGraphRunner(cg, frozen_tokens=params)\n ag = to_agraph(runner.token_manager.to_networkx())\n ag.layout('dot')\n ag.draw(fname_dst)\n\n\n<mask token>\n", "step-2": "<mask token>\n\n\ndef create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef get_full_paths_to_files(files_dir, filenames):\n return [os.path.join(files_dir, f) for f in filenames]\n\n\ndef process_images(im_filenames, cg, params):\n finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)\n images = (mpimg.imread(fname) for fname in im_filenames)\n return (find_and_draw_lanes(im) for im in images)\n\n\ndef save_images(images, destination_filenames):\n for fname, im in zip(destination_filenames, images):\n mpimg.imsave(fname, im)\n\n\ndef process_and_save_video(video_fname_src, video_fname_dst, cg, params):\n finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)\n video_src = VideoFileClip(video_fname_src)\n video_dst = video_src.fl_image(find_and_draw_lanes)\n video_dst.write_videofile(video_fname_dst, audio=False)\n\n\ndef visualize_pipeline(fname_dst, cg=COMP_GRAPH, params=DEFAULT_PARAMS):\n runner = CompGraphRunner(cg, frozen_tokens=params)\n ag = to_agraph(runner.token_manager.to_networkx())\n ag.layout('dot')\n ag.draw(fname_dst)\n\n\n<mask token>\n", "step-3": "<mask token>\n\n\ndef create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef get_full_paths_to_files(files_dir, filenames):\n return [os.path.join(files_dir, f) for f in filenames]\n\n\ndef process_images(im_filenames, cg, params):\n finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)\n images = (mpimg.imread(fname) for fname in im_filenames)\n return (find_and_draw_lanes(im) for im in images)\n\n\ndef save_images(images, destination_filenames):\n for fname, im in zip(destination_filenames, images):\n mpimg.imsave(fname, im)\n\n\ndef process_and_save_video(video_fname_src, video_fname_dst, cg, params):\n finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)\n video_src = VideoFileClip(video_fname_src)\n video_dst = video_src.fl_image(find_and_draw_lanes)\n video_dst.write_videofile(video_fname_dst, audio=False)\n\n\ndef visualize_pipeline(fname_dst, cg=COMP_GRAPH, params=DEFAULT_PARAMS):\n runner = CompGraphRunner(cg, frozen_tokens=params)\n ag = to_agraph(runner.token_manager.to_networkx())\n ag.layout('dot')\n ag.draw(fname_dst)\n\n\nif __name__ == '__main__':\n \"\"\" INITIALIZATION \"\"\"\n im_dir_src = 'test_images'\n im_dir_dst = 'test_images_output'\n create_dir(im_dir_dst)\n im_files_src = get_full_paths_to_files(im_dir_src, os.listdir(im_dir_src))\n im_files_dst = get_full_paths_to_files(im_dir_dst, os.listdir(im_dir_src))\n video_dir_src = 'test_videos'\n video_dir_dst = 'test_videos_output'\n create_dir(video_dir_dst)\n video_files = 'solidWhiteRight.mp4', 'solidYellowLeft.mp4'\n video_files_src = get_full_paths_to_files(video_dir_src, video_files)\n video_files_dst = get_full_paths_to_files(video_dir_dst, video_files)\n params_1 = DEFAULT_PARAMS.copy()\n params_1['canny_lo'] = 50\n params_1['canny_hi'] = 150\n \"\"\" MEDIA GENERATION \"\"\"\n visualize_pipeline('pipeline.png')\n images_dst = process_images(im_files_src, COMP_GRAPH, DEFAULT_PARAMS)\n save_images(images_dst, im_files_dst)\n process_and_save_video(video_files_src[0], video_files_dst[0],\n COMP_GRAPH, DEFAULT_PARAMS)\n process_and_save_video(video_files_src[1], video_files_dst[1],\n COMP_GRAPH, params_1)\n", "step-4": "<mask token>\nCOMP_GRAPH = lanespipeline.computational_graph\nDEFAULT_PARAMS = lanespipeline.parameters\n\n\ndef create_dir(directory):\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef get_full_paths_to_files(files_dir, filenames):\n return [os.path.join(files_dir, f) for f in filenames]\n\n\ndef process_images(im_filenames, cg, params):\n finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)\n images = (mpimg.imread(fname) for fname in im_filenames)\n return (find_and_draw_lanes(im) for im in images)\n\n\ndef save_images(images, destination_filenames):\n for fname, im in zip(destination_filenames, images):\n mpimg.imsave(fname, im)\n\n\ndef process_and_save_video(video_fname_src, video_fname_dst, cg, params):\n finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)\n video_src = VideoFileClip(video_fname_src)\n video_dst = video_src.fl_image(find_and_draw_lanes)\n video_dst.write_videofile(video_fname_dst, audio=False)\n\n\ndef visualize_pipeline(fname_dst, cg=COMP_GRAPH, params=DEFAULT_PARAMS):\n runner = CompGraphRunner(cg, frozen_tokens=params)\n ag = to_agraph(runner.token_manager.to_networkx())\n ag.layout('dot')\n ag.draw(fname_dst)\n\n\nif __name__ == '__main__':\n \"\"\" INITIALIZATION \"\"\"\n im_dir_src = 'test_images'\n im_dir_dst = 'test_images_output'\n create_dir(im_dir_dst)\n im_files_src = get_full_paths_to_files(im_dir_src, os.listdir(im_dir_src))\n im_files_dst = get_full_paths_to_files(im_dir_dst, os.listdir(im_dir_src))\n video_dir_src = 'test_videos'\n video_dir_dst = 'test_videos_output'\n create_dir(video_dir_dst)\n video_files = 'solidWhiteRight.mp4', 'solidYellowLeft.mp4'\n video_files_src = get_full_paths_to_files(video_dir_src, video_files)\n video_files_dst = get_full_paths_to_files(video_dir_dst, video_files)\n params_1 = DEFAULT_PARAMS.copy()\n params_1['canny_lo'] = 50\n params_1['canny_hi'] = 150\n \"\"\" MEDIA GENERATION \"\"\"\n visualize_pipeline('pipeline.png')\n images_dst = process_images(im_files_src, COMP_GRAPH, DEFAULT_PARAMS)\n save_images(images_dst, im_files_dst)\n process_and_save_video(video_files_src[0], video_files_dst[0],\n COMP_GRAPH, DEFAULT_PARAMS)\n process_and_save_video(video_files_src[1], video_files_dst[1],\n COMP_GRAPH, params_1)\n", "step-5": "'''\nGenerate the output images and videos, including rendering of the pipeline\n'''\n\nimport os\nimport matplotlib.image as mpimg\nimport cv2\nfrom moviepy.editor import VideoFileClip\nfrom networkx.drawing.nx_agraph import to_agraph\n\nimport lanespipeline\nimport lanefinder\nfrom compgraph import CompGraph, CompGraphRunner\n\n\nCOMP_GRAPH = lanespipeline.computational_graph\nDEFAULT_PARAMS = lanespipeline.parameters\n\n\ndef create_dir(directory):\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\ndef get_full_paths_to_files(files_dir, filenames):\n\n return [os.path.join(files_dir, f) for f in filenames]\n\n\ndef process_images(im_filenames, cg, params):\n\n finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)\n\n images = (mpimg.imread(fname) for fname in im_filenames)\n\n return (find_and_draw_lanes(im) for im in images)\n\n\ndef save_images(images, destination_filenames):\n\n for fname, im in zip(destination_filenames, images):\n mpimg.imsave(fname, im)\n\n\ndef process_and_save_video(video_fname_src, video_fname_dst, cg, params):\n\n finder, find_and_draw_lanes = lanefinder.create_objects(cg, params)\n\n video_src = VideoFileClip(video_fname_src)\n\n video_dst = video_src.fl_image(find_and_draw_lanes)\n video_dst.write_videofile(video_fname_dst, audio=False)\n\n\ndef visualize_pipeline(fname_dst, cg=COMP_GRAPH, params=DEFAULT_PARAMS):\n\n runner = CompGraphRunner(cg, frozen_tokens=params)\n\n ag = to_agraph(runner.token_manager.to_networkx())\n ag.layout('dot')\n ag.draw(fname_dst)\n\n\nif __name__ == '__main__':\n\n ''' INITIALIZATION '''\n\n im_dir_src = 'test_images'\n im_dir_dst = 'test_images_output'\n create_dir(im_dir_dst)\n\n im_files_src = get_full_paths_to_files(im_dir_src, os.listdir(im_dir_src))\n im_files_dst = get_full_paths_to_files(im_dir_dst, os.listdir(im_dir_src))\n\n video_dir_src = 'test_videos'\n video_dir_dst = 'test_videos_output'\n create_dir(video_dir_dst)\n\n video_files = ('solidWhiteRight.mp4', 'solidYellowLeft.mp4')\n video_files_src = get_full_paths_to_files(video_dir_src, video_files)\n video_files_dst = get_full_paths_to_files(video_dir_dst, video_files)\n\n params_1 = DEFAULT_PARAMS.copy()\n params_1['canny_lo'] = 50\n params_1['canny_hi'] = 150\n\n ''' MEDIA GENERATION '''\n\n visualize_pipeline('pipeline.png')\n\n images_dst = process_images(im_files_src, COMP_GRAPH, DEFAULT_PARAMS)\n save_images(images_dst, im_files_dst)\n\n process_and_save_video(video_files_src[0], video_files_dst[0], COMP_GRAPH, DEFAULT_PARAMS)\n process_and_save_video(video_files_src[1], video_files_dst[1], COMP_GRAPH, params_1)\n", "step-ids": [ 5, 6, 7, 8, 10 ] }
[ 5, 6, 7, 8, 10 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> def Schout2ConTank(a, b, d): th = d * b / sqrt(a ** 2 - b ** 2) k = 1 / (d * sqrt(a ** 2 - b ** 2)) s = sqrt(d / sqrt(a ** 2 - b ** 2)) return th, k, s <|reserved_special_token_1|> from numpy import sqrt def Schout2ConTank(a, b, d): th = d * b / sqrt(a ** 2 - b ** 2) k = 1 / (d * sqrt(a ** 2 - b ** 2)) s = sqrt(d / sqrt(a ** 2 - b ** 2)) return th, k, s <|reserved_special_token_1|> from numpy import sqrt def Schout2ConTank(a, b, d): # This function converts parameters from Schoutens notation to Cont-Tankov # notation ## Code th = d * b / sqrt(a ** 2 - b ** 2) k = 1 / (d * sqrt(a ** 2 - b ** 2)) s = sqrt(d / sqrt(a ** 2 - b ** 2)) return th, k, s
flexible
{ "blob_id": "4dda122a8c3a2aab62bb202945f6fb9cb73cf772", "index": 8330, "step-1": "<mask token>\n", "step-2": "<mask token>\n\n\ndef Schout2ConTank(a, b, d):\n th = d * b / sqrt(a ** 2 - b ** 2)\n k = 1 / (d * sqrt(a ** 2 - b ** 2))\n s = sqrt(d / sqrt(a ** 2 - b ** 2))\n return th, k, s\n", "step-3": "from numpy import sqrt\n\n\ndef Schout2ConTank(a, b, d):\n th = d * b / sqrt(a ** 2 - b ** 2)\n k = 1 / (d * sqrt(a ** 2 - b ** 2))\n s = sqrt(d / sqrt(a ** 2 - b ** 2))\n return th, k, s\n", "step-4": "from numpy import sqrt\n\n\ndef Schout2ConTank(a, b, d):\n # This function converts parameters from Schoutens notation to Cont-Tankov\n # notation\n\n ## Code\n th = d * b / sqrt(a ** 2 - b ** 2)\n k = 1 / (d * sqrt(a ** 2 - b ** 2))\n s = sqrt(d / sqrt(a ** 2 - b ** 2))\n return th, k, s\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
import json from .errors import TorrentNotValid, TorrentHashNotFound, FailedLogin, HttpException class QBittorrentClient: """ QBittorent client """ def __init__(self, *, connector): self.connector = connector def login(self, username: str, password: str): return self.connector.login(username, password) def logout(self): return self.connector.logout() def get_application_version(self): """ Grab the application version of QBittorent. Returns ------- str """ return self.connector.request('GET', '/app/version') def get_api_version(self): """ Grab the api version. Returns ------- str """ return self.connector.request('GET', '/app/webapiVersion') def get_log(self, **kwargs): """ Grabs the log. Parameters ---------- normal: bool, optional Include normal messages info: bool, optional Include info messages warning: bool, optional Include warning messages critical: bool, optional Include critical messages last_known_id: int, optional Exclude messages with "message id" <= last_known_id Returns ------- dict """ payload = {'normal': kwargs.get('normal', True), 'info': kwargs.get ('info', True), 'warning': kwargs.get('warning', True), 'critical': kwargs.get('critical', True), 'last_known_id': kwargs.get('last_known_id', -1)} return self.connector.request('GET', '/log/main', payload=payload) def get_torrents(self, **kwargs): """ Gets the list of torrents. Parameters ---------- filter: str, optional Filter torrent list. Allowed filters: all, downloading, completed, paused, active, inactive, resumed category: str, optional Get torrents with the given category Empty string means "without category" No "category" parameter means "any category" sort: str, optional Sort torrents by given key. reverse: bool, optional Enable reverse sorting. limit: int, optional Limit the number of torrents returned offset: int, optional Set offset (if less than 0, offset from end) hashes: list or str, optional Filter by hashes. Returns ------- dict Property Type Description hash string Torrent hash name string Torrent name size integer Total size (bytes) of files selected for download progress float Torrent progress (percentage/100) dlspeed integer Torrent download speed (bytes/s) upspeed integer Torrent upload speed (bytes/s) priority integer Torrent priority. Returns -1 if queuing is disabled or torrent is in seed mode num_seeds integer Number of seeds connected to num_complete integer Number of seeds in the swarm num_leechs integer Number of leechers connected to num_incomplete integer Number of leechers in the swarm ratio float Torrent share ratio. Max ratio value: 9999. eta integer Torrent ETA (seconds) state string Torrent state. See table here below for the possible values seq_dl bool True if sequential download is enabled f_l_piece_prio bool True if first last piece are prioritized category string Category of the torrent super_seeding bool True if super seeding is enabled force_start bool True if force start is enabled for this torrent Possible values of state: Value Description error Some error occurred, applies to paused torrents missingFiles Torrent data files is missing uploading Torrent is being seeded and data is being transferred pausedUP Torrent is paused and has finished downloading queuedUP Queuing is enabled and torrent is queued for upload stalledUP Torrent is being seeded, but no connection were made checkingUP Torrent has finished downloading and is being checked forcedUP Torrent is forced to uploading and ignore queue limit allocating Torrent is allocating disk space for download downloading Torrent is being downloaded and data is being transferred metaDL Torrent has just started downloading and is fetching metadata pausedDL Torrent is paused and has NOT finished downloading queuedDL Queuing is enabled and torrent is queued for download stalledDL Torrent is being downloaded, but no connection were made checkingDL Same as checkingUP, but torrent has NOT finished downloading forceDL Torrent is forced to downloading to ignore queue limit checkingResumeData Checking resume data on qBt startup moving Torrent is moving to another location unknown Unknown status """ defaults = {'filter': None, 'category': None, 'sort': None, 'reverse': None, 'limit': None, 'offset': None} payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or kwargs.get(k)} hashes = kwargs.get('hashes') if hashes: payload['hashes'] = '|'.join(hashes) if isinstance(hashes, list ) else hashes return self.connector.request('POST', '/torrents/info', payload=payload ) def get_torrent_info(self, torrent_hash: str): payload = {'hash': torrent_hash} return self.connector.request('POST', '/torrents/properties', payload=payload) def add_torrents(self, *links: str, **kwargs): """ Adds torrents """ defaults = {'torrents': None, 'savepath': None, 'cookie': None, 'category': None, 'skip_checking': None, 'root_folder': None, 'rename': None, 'upLimit': None, 'dlLimit': None, 'autoTMM': None, 'sequentialDownload': None, 'firstLastPiecePrio': None} payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or kwargs.get(k)} if len(links): payload['urls'] = '\n'.join(links) return self.connector.request('POST', '/torrents/add', payload=payload) def pause_torrents(self, *hashes: str): """ Pauses torrents. """ payload = {'hashes': '|'.join(hashes)} return self.connector.request('POST', '/torrents/pause', payload= payload) def resume_torrent(self, hashes: list): """ Resumes a single torrent. """ payload = {'hashes': '|'.join(hashes)} return self.connector.request('POST', '/torrents/resume', payload= payload)
normal
{ "blob_id": "8ce2db0a28de8ddd504b744f3c9210d1a0ed7d45", "index": 699, "step-1": "<mask token>\n\n\nclass QBittorrentClient:\n <mask token>\n <mask token>\n\n def login(self, username: str, password: str):\n return self.connector.login(username, password)\n\n def logout(self):\n return self.connector.logout()\n\n def get_application_version(self):\n \"\"\"\n Grab the application version of QBittorent.\n\n Returns\n -------\n str\n \"\"\"\n return self.connector.request('GET', '/app/version')\n\n def get_api_version(self):\n \"\"\"\n Grab the api version.\n\n Returns\n -------\n str\n\n \"\"\"\n return self.connector.request('GET', '/app/webapiVersion')\n <mask token>\n <mask token>\n <mask token>\n\n def add_torrents(self, *links: str, **kwargs):\n \"\"\"\n Adds torrents\n \"\"\"\n defaults = {'torrents': None, 'savepath': None, 'cookie': None,\n 'category': None, 'skip_checking': None, 'root_folder': None,\n 'rename': None, 'upLimit': None, 'dlLimit': None, 'autoTMM':\n None, 'sequentialDownload': None, 'firstLastPiecePrio': None}\n payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or\n kwargs.get(k)}\n if len(links):\n payload['urls'] = '\\n'.join(links)\n return self.connector.request('POST', '/torrents/add', payload=payload)\n\n def pause_torrents(self, *hashes: str):\n \"\"\"\n Pauses torrents.\n \"\"\"\n payload = {'hashes': '|'.join(hashes)}\n return self.connector.request('POST', '/torrents/pause', payload=\n payload)\n\n def resume_torrent(self, hashes: list):\n \"\"\"\n Resumes a single torrent.\n \"\"\"\n payload = {'hashes': '|'.join(hashes)}\n return self.connector.request('POST', '/torrents/resume', payload=\n payload)\n", "step-2": "<mask token>\n\n\nclass QBittorrentClient:\n <mask token>\n\n def __init__(self, *, connector):\n self.connector = connector\n\n def login(self, username: str, password: str):\n return self.connector.login(username, password)\n\n def logout(self):\n return self.connector.logout()\n\n def get_application_version(self):\n \"\"\"\n Grab the application version of QBittorent.\n\n Returns\n -------\n str\n \"\"\"\n return self.connector.request('GET', '/app/version')\n\n def get_api_version(self):\n \"\"\"\n Grab the api version.\n\n Returns\n -------\n str\n\n \"\"\"\n return self.connector.request('GET', '/app/webapiVersion')\n\n def get_log(self, **kwargs):\n \"\"\"\n Grabs the log.\n\n Parameters\n ----------\n normal: bool, optional\n Include normal messages\n info: bool, optional\n Include info messages\n warning: bool, optional\n Include warning messages\n critical: bool, optional\n Include critical messages\n last_known_id: int, optional\n Exclude messages with \"message id\" <= last_known_id\n\n Returns\n -------\n dict\n \"\"\"\n payload = {'normal': kwargs.get('normal', True), 'info': kwargs.get\n ('info', True), 'warning': kwargs.get('warning', True),\n 'critical': kwargs.get('critical', True), 'last_known_id':\n kwargs.get('last_known_id', -1)}\n return self.connector.request('GET', '/log/main', payload=payload)\n <mask token>\n\n def get_torrent_info(self, torrent_hash: str):\n payload = {'hash': torrent_hash}\n return self.connector.request('POST', '/torrents/properties',\n payload=payload)\n\n def add_torrents(self, *links: str, **kwargs):\n \"\"\"\n Adds torrents\n \"\"\"\n defaults = {'torrents': None, 'savepath': None, 'cookie': None,\n 'category': None, 'skip_checking': None, 'root_folder': None,\n 'rename': None, 'upLimit': None, 'dlLimit': None, 'autoTMM':\n None, 'sequentialDownload': None, 'firstLastPiecePrio': None}\n payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or\n kwargs.get(k)}\n if len(links):\n payload['urls'] = '\\n'.join(links)\n return self.connector.request('POST', '/torrents/add', payload=payload)\n\n def pause_torrents(self, *hashes: str):\n \"\"\"\n Pauses torrents.\n \"\"\"\n payload = {'hashes': '|'.join(hashes)}\n return self.connector.request('POST', '/torrents/pause', payload=\n payload)\n\n def resume_torrent(self, hashes: list):\n \"\"\"\n Resumes a single torrent.\n \"\"\"\n payload = {'hashes': '|'.join(hashes)}\n return self.connector.request('POST', '/torrents/resume', payload=\n payload)\n", "step-3": "<mask token>\n\n\nclass QBittorrentClient:\n \"\"\"\n QBittorent client\n\n \"\"\"\n\n def __init__(self, *, connector):\n self.connector = connector\n\n def login(self, username: str, password: str):\n return self.connector.login(username, password)\n\n def logout(self):\n return self.connector.logout()\n\n def get_application_version(self):\n \"\"\"\n Grab the application version of QBittorent.\n\n Returns\n -------\n str\n \"\"\"\n return self.connector.request('GET', '/app/version')\n\n def get_api_version(self):\n \"\"\"\n Grab the api version.\n\n Returns\n -------\n str\n\n \"\"\"\n return self.connector.request('GET', '/app/webapiVersion')\n\n def get_log(self, **kwargs):\n \"\"\"\n Grabs the log.\n\n Parameters\n ----------\n normal: bool, optional\n Include normal messages\n info: bool, optional\n Include info messages\n warning: bool, optional\n Include warning messages\n critical: bool, optional\n Include critical messages\n last_known_id: int, optional\n Exclude messages with \"message id\" <= last_known_id\n\n Returns\n -------\n dict\n \"\"\"\n payload = {'normal': kwargs.get('normal', True), 'info': kwargs.get\n ('info', True), 'warning': kwargs.get('warning', True),\n 'critical': kwargs.get('critical', True), 'last_known_id':\n kwargs.get('last_known_id', -1)}\n return self.connector.request('GET', '/log/main', payload=payload)\n\n def get_torrents(self, **kwargs):\n \"\"\"\n Gets the list of torrents.\n\n Parameters\n ----------\n filter: str, optional\n Filter torrent list.\n Allowed filters: all, downloading, completed, paused, active, inactive, resumed\n category: str, optional\n Get torrents with the given category\n Empty string means \"without category\"\n No \"category\" parameter means \"any category\"\n sort: str, optional\n Sort torrents by given key.\n reverse: bool, optional\n Enable reverse sorting.\n limit: int, optional\n Limit the number of torrents returned\n offset: int, optional\n Set offset (if less than 0, offset from end)\n hashes: list or str, optional\n Filter by hashes.\n \n Returns\n -------\n dict\n\n Property Type Description\n hash string Torrent hash\n name string Torrent name\n size integer Total size (bytes) of files selected for download\n progress float Torrent progress (percentage/100)\n dlspeed integer Torrent download speed (bytes/s)\n upspeed integer Torrent upload speed (bytes/s)\n priority integer Torrent priority. Returns -1 if queuing is disabled or torrent is in seed mode\n num_seeds integer Number of seeds connected to\n num_complete integer Number of seeds in the swarm\n num_leechs integer Number of leechers connected to\n num_incomplete integer Number of leechers in the swarm\n ratio float Torrent share ratio. Max ratio value: 9999.\n eta integer Torrent ETA (seconds)\n state string Torrent state. See table here below for the possible values\n seq_dl bool True if sequential download is enabled\n f_l_piece_prio bool True if first last piece are prioritized\n category string Category of the torrent\n super_seeding bool True if super seeding is enabled\n force_start bool True if force start is enabled for this torrent\n\n Possible values of state:\n \n Value Description\n error Some error occurred, applies to paused torrents\n missingFiles Torrent data files is missing\n uploading Torrent is being seeded and data is being transferred\n pausedUP Torrent is paused and has finished downloading\n queuedUP Queuing is enabled and torrent is queued for upload\n stalledUP Torrent is being seeded, but no connection were made\n checkingUP Torrent has finished downloading and is being checked\n forcedUP Torrent is forced to uploading and ignore queue limit\n allocating Torrent is allocating disk space for download\n downloading Torrent is being downloaded and data is being transferred\n metaDL Torrent has just started downloading and is fetching metadata\n pausedDL Torrent is paused and has NOT finished downloading\n queuedDL Queuing is enabled and torrent is queued for download\n stalledDL Torrent is being downloaded, but no connection were made\n checkingDL Same as checkingUP, but torrent has NOT finished downloading\n forceDL Torrent is forced to downloading to ignore queue limit\n checkingResumeData Checking resume data on qBt startup\n moving Torrent is moving to another location\n unknown Unknown status\n\n \"\"\"\n defaults = {'filter': None, 'category': None, 'sort': None,\n 'reverse': None, 'limit': None, 'offset': None}\n payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or\n kwargs.get(k)}\n hashes = kwargs.get('hashes')\n if hashes:\n payload['hashes'] = '|'.join(hashes) if isinstance(hashes, list\n ) else hashes\n return self.connector.request('POST', '/torrents/info', payload=payload\n )\n\n def get_torrent_info(self, torrent_hash: str):\n payload = {'hash': torrent_hash}\n return self.connector.request('POST', '/torrents/properties',\n payload=payload)\n\n def add_torrents(self, *links: str, **kwargs):\n \"\"\"\n Adds torrents\n \"\"\"\n defaults = {'torrents': None, 'savepath': None, 'cookie': None,\n 'category': None, 'skip_checking': None, 'root_folder': None,\n 'rename': None, 'upLimit': None, 'dlLimit': None, 'autoTMM':\n None, 'sequentialDownload': None, 'firstLastPiecePrio': None}\n payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or\n kwargs.get(k)}\n if len(links):\n payload['urls'] = '\\n'.join(links)\n return self.connector.request('POST', '/torrents/add', payload=payload)\n\n def pause_torrents(self, *hashes: str):\n \"\"\"\n Pauses torrents.\n \"\"\"\n payload = {'hashes': '|'.join(hashes)}\n return self.connector.request('POST', '/torrents/pause', payload=\n payload)\n\n def resume_torrent(self, hashes: list):\n \"\"\"\n Resumes a single torrent.\n \"\"\"\n payload = {'hashes': '|'.join(hashes)}\n return self.connector.request('POST', '/torrents/resume', payload=\n payload)\n", "step-4": "import json\nfrom .errors import TorrentNotValid, TorrentHashNotFound, FailedLogin, HttpException\n\n\nclass QBittorrentClient:\n \"\"\"\n QBittorent client\n\n \"\"\"\n\n def __init__(self, *, connector):\n self.connector = connector\n\n def login(self, username: str, password: str):\n return self.connector.login(username, password)\n\n def logout(self):\n return self.connector.logout()\n\n def get_application_version(self):\n \"\"\"\n Grab the application version of QBittorent.\n\n Returns\n -------\n str\n \"\"\"\n return self.connector.request('GET', '/app/version')\n\n def get_api_version(self):\n \"\"\"\n Grab the api version.\n\n Returns\n -------\n str\n\n \"\"\"\n return self.connector.request('GET', '/app/webapiVersion')\n\n def get_log(self, **kwargs):\n \"\"\"\n Grabs the log.\n\n Parameters\n ----------\n normal: bool, optional\n Include normal messages\n info: bool, optional\n Include info messages\n warning: bool, optional\n Include warning messages\n critical: bool, optional\n Include critical messages\n last_known_id: int, optional\n Exclude messages with \"message id\" <= last_known_id\n\n Returns\n -------\n dict\n \"\"\"\n payload = {'normal': kwargs.get('normal', True), 'info': kwargs.get\n ('info', True), 'warning': kwargs.get('warning', True),\n 'critical': kwargs.get('critical', True), 'last_known_id':\n kwargs.get('last_known_id', -1)}\n return self.connector.request('GET', '/log/main', payload=payload)\n\n def get_torrents(self, **kwargs):\n \"\"\"\n Gets the list of torrents.\n\n Parameters\n ----------\n filter: str, optional\n Filter torrent list.\n Allowed filters: all, downloading, completed, paused, active, inactive, resumed\n category: str, optional\n Get torrents with the given category\n Empty string means \"without category\"\n No \"category\" parameter means \"any category\"\n sort: str, optional\n Sort torrents by given key.\n reverse: bool, optional\n Enable reverse sorting.\n limit: int, optional\n Limit the number of torrents returned\n offset: int, optional\n Set offset (if less than 0, offset from end)\n hashes: list or str, optional\n Filter by hashes.\n \n Returns\n -------\n dict\n\n Property Type Description\n hash string Torrent hash\n name string Torrent name\n size integer Total size (bytes) of files selected for download\n progress float Torrent progress (percentage/100)\n dlspeed integer Torrent download speed (bytes/s)\n upspeed integer Torrent upload speed (bytes/s)\n priority integer Torrent priority. Returns -1 if queuing is disabled or torrent is in seed mode\n num_seeds integer Number of seeds connected to\n num_complete integer Number of seeds in the swarm\n num_leechs integer Number of leechers connected to\n num_incomplete integer Number of leechers in the swarm\n ratio float Torrent share ratio. Max ratio value: 9999.\n eta integer Torrent ETA (seconds)\n state string Torrent state. See table here below for the possible values\n seq_dl bool True if sequential download is enabled\n f_l_piece_prio bool True if first last piece are prioritized\n category string Category of the torrent\n super_seeding bool True if super seeding is enabled\n force_start bool True if force start is enabled for this torrent\n\n Possible values of state:\n \n Value Description\n error Some error occurred, applies to paused torrents\n missingFiles Torrent data files is missing\n uploading Torrent is being seeded and data is being transferred\n pausedUP Torrent is paused and has finished downloading\n queuedUP Queuing is enabled and torrent is queued for upload\n stalledUP Torrent is being seeded, but no connection were made\n checkingUP Torrent has finished downloading and is being checked\n forcedUP Torrent is forced to uploading and ignore queue limit\n allocating Torrent is allocating disk space for download\n downloading Torrent is being downloaded and data is being transferred\n metaDL Torrent has just started downloading and is fetching metadata\n pausedDL Torrent is paused and has NOT finished downloading\n queuedDL Queuing is enabled and torrent is queued for download\n stalledDL Torrent is being downloaded, but no connection were made\n checkingDL Same as checkingUP, but torrent has NOT finished downloading\n forceDL Torrent is forced to downloading to ignore queue limit\n checkingResumeData Checking resume data on qBt startup\n moving Torrent is moving to another location\n unknown Unknown status\n\n \"\"\"\n defaults = {'filter': None, 'category': None, 'sort': None,\n 'reverse': None, 'limit': None, 'offset': None}\n payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or\n kwargs.get(k)}\n hashes = kwargs.get('hashes')\n if hashes:\n payload['hashes'] = '|'.join(hashes) if isinstance(hashes, list\n ) else hashes\n return self.connector.request('POST', '/torrents/info', payload=payload\n )\n\n def get_torrent_info(self, torrent_hash: str):\n payload = {'hash': torrent_hash}\n return self.connector.request('POST', '/torrents/properties',\n payload=payload)\n\n def add_torrents(self, *links: str, **kwargs):\n \"\"\"\n Adds torrents\n \"\"\"\n defaults = {'torrents': None, 'savepath': None, 'cookie': None,\n 'category': None, 'skip_checking': None, 'root_folder': None,\n 'rename': None, 'upLimit': None, 'dlLimit': None, 'autoTMM':\n None, 'sequentialDownload': None, 'firstLastPiecePrio': None}\n payload = {k: kwargs.get(k, v) for k, v in defaults.items() if v or\n kwargs.get(k)}\n if len(links):\n payload['urls'] = '\\n'.join(links)\n return self.connector.request('POST', '/torrents/add', payload=payload)\n\n def pause_torrents(self, *hashes: str):\n \"\"\"\n Pauses torrents.\n \"\"\"\n payload = {'hashes': '|'.join(hashes)}\n return self.connector.request('POST', '/torrents/pause', payload=\n payload)\n\n def resume_torrent(self, hashes: list):\n \"\"\"\n Resumes a single torrent.\n \"\"\"\n payload = {'hashes': '|'.join(hashes)}\n return self.connector.request('POST', '/torrents/resume', payload=\n payload)\n", "step-5": null, "step-ids": [ 8, 11, 13, 14 ] }
[ 8, 11, 13, 14 ]
import multiprocessing name = "flask_gunicorn" workers = multiprocessing.cpu_count() * 2 + 1 loglevel = "debug" bind = f"0.0.0.0:18080"
normal
{ "blob_id": "2ad326f739b42b9c7c252078b8c28e90da17b95d", "index": 1802, "step-1": "<mask token>\n", "step-2": "<mask token>\nname = 'flask_gunicorn'\nworkers = multiprocessing.cpu_count() * 2 + 1\nloglevel = 'debug'\nbind = f'0.0.0.0:18080'\n", "step-3": "import multiprocessing\nname = 'flask_gunicorn'\nworkers = multiprocessing.cpu_count() * 2 + 1\nloglevel = 'debug'\nbind = f'0.0.0.0:18080'\n", "step-4": "import multiprocessing\n\nname = \"flask_gunicorn\"\nworkers = multiprocessing.cpu_count() * 2 + 1\nloglevel = \"debug\"\nbind = f\"0.0.0.0:18080\"\n\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
#!/usr/bin/env python import sys, re, urllib, urllib2, string, time, os from urllib2 import Request, urlopen, URLError, HTTPError from urlparse import urlparse joomla_version="undefined" #used for joomla veersin info provided_url="" #the selected provided url verbose_flag = 0 # If set to 1, prints verbose information default_input_path = "" # The default input file path default_output_path = "" # The default output file path if os.name == "nt": path_slash = "\\" else: path_slash = "/" # Prints usage def print_usage(): """ print_usage() Prints help screen and exits. """ print "" print "" print " JoomFind v0.1" print "" print " Script made by Jasdev Singh" print "" print " This script is made only for educational and offline self-testing " print " purposes. The creator is not responsible or accountable for any " print " damage or loss caused that you perform with this script. " print "" print " Usage example:" print '\tpython joomfind.py -f filepath | -v' print "" print " Put URL(s) to scan in a newline delimited file" print " URL(s) must point to homepage of the CMS " print "" print " Options:" print " -f filename (specify input file)" print " -v, --verbose (show detailed output)" print " --help (displays this help text)" print "" return # Testing if URL is reachable, with error handling def test_url(): """ test_url() Checks whether URL is rechable. Prints relevant infomation. """ global provided_url global verbose_flag # extracting url provided_url = urlparse(provided_url).scheme+"://"+urlparse(provided_url).netloc print provided_url if verbose_flag: print "\t[.] Checking if connection can be established...",# + provided_url try: response = urllib2.urlopen(provided_url) except HTTPError, e: if verbose_flag: print "[!] Failed" return 0 except URLError, e: if verbose_flag: print "[!] Failed" return 0 else: valid_target = 1 if verbose_flag: print "Success" return 1 # Scans for the HTML meta tag information def scan_target_metatag(): """ scan_target_metatag() Scans the meta-tag of the website. The meta-tag has information that can lead to the detection of Joomla. """ target_meta_url=provided_url+"/index.php" if verbose_flag: print "\t[.] Trying to access meta tag information...", #+ target_meta_url try: response = urllib2.urlopen(target_meta_url) html = response.read(2000) #print html # Now extract the interesting information get_metatag = string.find(html, "Joomla! - Open Source Content Management") # If the target is not vulnerable exit if get_metatag == -1: meta_flag=0 if verbose_flag: print "Failed" else: meta_flag=1 if verbose_flag: print "Success" #print "meta flag="+str(meta_flag) return meta_flag except: if verbose_flag: print "Failed" # Tests whether the URL has a '/administrator' login page def scan_admin_url(): """ scan_admin_url() Scans the administrator URL of the website. The administrator URL, if reachable, is a clue that Joomla is being used. """ target_admin_url=provided_url+"/administrator/index.php" if verbose_flag: print "\t[.] Trying to access admin login page...", #+ target_admin_url try: response = urllib2.urlopen(target_admin_url) except HTTPError, e: admin_flag=0 #print "admin flag="+str(admin_flag) if verbose_flag: print "Failed" return admin_flag else: admin_flag=1 #print "admin flag="+str(admin_flag) if verbose_flag: print "Success" return admin_flag # Scans content of 'com_content' component def scan_com_content(): """ scan_com_content() Scans the content.xml file of the default component of the website. The content.xml file, if readable, is a clue that Joomla is being used. """ target_com_content=provided_url+"/administrator/components/com_content/content.xml" if verbose_flag: print "\t[.] Trying to access com_content component...", #+ target_com_content try: response = urllib2.urlopen(target_com_content) html = response.read() get_com = string.find(html, "Joomla") except HTTPError, e: com_component_flag=0 #print "com_component flag="+str(com_component_flag) if verbose_flag: print "Failed" return com_component_flag else: if get_com==-1: com_component_flag=0 if verbose_flag: print "Failed" else: com_component_flag=1 if verbose_flag: print "Success" #print "com_component flag="+str(com_component_flag) return com_component_flag # Scans the robots.txt file def scan_robots_txt(): """ scan_robots_txt() Scans the robots.txt file of website. The robots.txt file, if readable, has clues that Joomla is being used. """ target_robots_txt=provided_url+"/robots.txt" if verbose_flag: print "\t[.] Trying to access robots.txt file...",#+target_robots_txt try: response = urllib2.urlopen(target_robots_txt) html = response.read() get_robots = string.find(html, "Joomla") except HTTPError, e: robots_flag=0 #print "robots flag="+str(robots_flag) if verbose_flag: print "Failed" return robots_flag else: if get_robots==-1: robots_flag=0 if verbose_flag: print "Failed" else: robots_flag=1 if verbose_flag: print "Success" #print "robots flag="+str(robots_flag) return robots_flag # Scans the htaccess.txt file def scan_htaccess(): """ scan_htaccess() Scans the htaccess file of website. The htaccess file, if readable, has clues that Joomla is being used. """ target_htacess=provided_url+"/htaccess.txt" if verbose_flag: print "\t[.] Trying to access htaccess file...",#+target_htacess try: response = urllib2.urlopen(target_htacess) html = response.read() get_htaccess = string.find(html, "Joomla") except HTTPError, e: htaccess_flag=0 #print "htaccess flag="+str(htaccess_flag) if verbose_flag: print "Failed" return htaccess_flag else: if get_htaccess==-1: htaccess_flag=0 if verbose_flag: print "Failed" else: htaccess_flag=1 if verbose_flag: print "Success" #print "htaccess flag="+str(htaccess_flag) return htaccess_flag # Scans the system.css file def scan_system_css(): """ scan_system_css() Scans the system.css file of website. The system.css file, if readable, has clues that Joomla is being used. """ pass # Scans the MooTools.js file def scan_mootools(): """ scan_mootools() Scans the mootools.js file of website. The mootools.js file, if readable, has clues that Joomla is being used. """ target_mootools=provided_url+"/media/system/js/mootools-more.js" if verbose_flag: print "\t[.] Trying to access MooTools file...", #+ target_mootools try: response = urllib2.urlopen(target_mootools) html = response.read(3300) #print html get_mootools = string.find(html, 'MooTools.More={version:"1.4.0.1"') except HTTPError, e: mootools_flag=0 #print "mootools flag="+str(mootools_flag) if verbose_flag: print "Failed" return mootools_flag else: if get_mootools==-1: mootools_flag=0 if verbose_flag: print "Failed" else: mootools_flag=1 if verbose_flag: print "Success" joomla_version="2.x or 3.x" #print "mootools flag="+str(mootools_flag) return mootools_flag # Scans the en-GB.xml file def scan_engb_ini(): """ scan_engb_ini() Scans the en-GB.ini file of website. The en-GB.ini file, if readable, has clues that Joomla is being used. """ target_engb=provided_url+"/language/en-GB/en-GB.xml" if verbose_flag: print "\t[.] Trying to access en-GB file...", #+ target_engb try: response = urllib2.urlopen(target_engb) html = response.read(200) #print html t1 = string.find(html, '<version>') target_engb = html[t1+9:t1+14] except HTTPError, e: engb_flag=0 #print "engb flag="+str(engb_flag) if verbose_flag: print "Failed" return engb_flag else: if t1==-1: engb_flag=0 if verbose_flag: print "Failed" else: engb_flag=1 if verbose_flag: print "Success" global joomla_version joomla_version=target_engb #print "engb flag="+str(engb_flag) return engb_flag # Computes the result of the scans def compute_result(a,b,c,d,e,f,g): """ compute_result() Computes the final result. """ if (a or b or c or d or e or f or g)and ((a+b+c+d+e+f+g)>=3): return 1 else: return 0 # Reads URL's from an input file and processes them def process_from_file(): """ process_from_file() Starts processing the URL's from the input file. """ global default_input_path print "JoomFind v 1.0" print "\n\nTrying to read URL(s) form " + default_input_path + " file...\n" try: if not default_input_path: f = open("urls.txt") else: f=open(default_input_path) cwd=os.getcwd() file_path = cwd + path_slash + f.name # extracting url's to list from file start_urls = [url.strip() for url in f.readlines() if url[0] not in ['#',' ',"\n"]] if not start_urls: print "File is empty. Add some URL(s) first.\n" f.close() return 0 except: print "File not found. Make sure it exists.\n" return 0 #print start_urls num=str(len(start_urls)) print "Found " + num + " URL(s) on " + time.asctime(time.localtime(time.time())) + "\n" of=open(default_output_path,'a+') of.write("\n\n\tScanning " + num + " URL(s) ") of.write("\n\n\tDate\Time : " + time.asctime(time.localtime(time.time())) ) of.write("\n\n\tInput file path : " + default_input_path + "\n\n") of.close() for url in start_urls: global provided_url provided_url=url print "\nWorking on URL " + str(start_urls.index(url)+1) + ": " + provided_url processing() print "\nAll done! Check '" + default_output_path +"' file for results.\n" # Calls other scans and writes results to output file def processing(): """ processing() Calls other helper functions. """ err=test_url() of=open(default_output_path,'a+') if err!=0: metaf=scan_target_metatag() adminf=scan_admin_url() comf=scan_com_content() robotsf=scan_robots_txt() htf=scan_htaccess() moof=scan_mootools() engbf=scan_engb_ini() result=compute_result(metaf,adminf,comf,robotsf,htf,moof,engbf) if result==1: #print "THE TARGET IS USING JOOMLA CMS" #print "Joomla version is " + joomla_version of.write("\nJOOMLA USED (version : " + joomla_version + ") --> " + provided_url + "\n") else: #print "JOOMLA NOT USED" of.write("\nJOMLA NOT USED --> " + provided_url + "\n") else: of.write("\nBAD URL --> " + provided_url + "\n") of.close() return 0 # main method def main(): """ main() Starting point of program execution. """ # Checking if argument was provided if len(sys.argv) <=1: print_usage() sys.exit(1) for arg in sys.argv: # Checking if help was called if arg == "-h" or arg == "--help": print_usage() sys.exit(1) # Checking for verbose mode if arg == "-v" or arg == "--verbose": global verbose_flag verbose_flag=1 # Checking for input file if arg == "-f" or arg == "--file": global default_input_path global default_output_path default_input_path = sys.argv[2] default_output_path=default_input_path[:-4] + "_results.txt" #if arg == "-u" or arg == "--url": # input_url = sys.argv[2] if os.name == "nt": os.system('cls') else: os.system('clear') process_from_file() if __name__=="__main__": main() #EOF
normal
{ "blob_id": "9de2589cfb5bebba789ece8df9a0fcfbedb01173", "index": 2440, "step-1": "#!/usr/bin/env python\r\n\r\nimport sys, re, urllib, urllib2, string, time, os\r\nfrom urllib2 import Request, urlopen, URLError, HTTPError\r\nfrom urlparse import urlparse\r\n\r\njoomla_version=\"undefined\" #used for joomla veersin info\r\n\r\nprovided_url=\"\" #the selected provided url\r\n\r\nverbose_flag = 0 # If set to 1, prints verbose information\r\n\r\ndefault_input_path = \"\" # The default input file path\r\n \r\ndefault_output_path = \"\" # The default output file path\r\n\r\nif os.name == \"nt\":\r\n path_slash = \"\\\\\"\r\nelse:\r\n path_slash = \"/\"\r\n\r\n# Prints usage\r\ndef print_usage():\r\n \"\"\"\r\n\tprint_usage()\r\n\t\r\n\tPrints help screen and exits.\r\n\r\n \"\"\"\r\n print \"\"\r\n print \"\"\r\n print \" JoomFind v0.1\"\r\n print \"\"\r\n print \" Script made by Jasdev Singh\"\r\n print \"\"\r\n print \" This script is made only for educational and offline self-testing \"\r\n print \" purposes. The creator is not responsible or accountable for any \"\r\n print \" damage or loss caused that you perform with this script. \"\r\n print \"\"\r\n print \" Usage example:\"\r\n print '\\tpython joomfind.py -f filepath | -v'\r\n print \"\"\r\n print \" Put URL(s) to scan in a newline delimited file\"\r\n print \" URL(s) must point to homepage of the CMS \"\r\n print \"\"\r\n print \" Options:\"\r\n print \" -f filename (specify input file)\"\r\n print \" -v, --verbose (show detailed output)\"\r\n print \" --help (displays this help text)\"\r\n print \"\"\r\n return\r\n\r\n\r\n# Testing if URL is reachable, with error handling\r\ndef test_url():\r\n \"\"\"\r\n\ttest_url()\r\n\t\r\n\tChecks whether URL is rechable. Prints relevant infomation.\r\n\r\n \"\"\"\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1\r\n\r\n# Scans for the HTML meta tag information\r\ndef scan_target_metatag():\r\n \"\"\"\r\n\tscan_target_metatag()\r\n\t\r\n\tScans the meta-tag of the website. \r\n\r\n\tThe meta-tag has information that can lead to the detection of Joomla.\r\n\r\n \"\"\"\r\n target_meta_url=provided_url+\"/index.php\"\r\n if verbose_flag: print \"\\t[.] Trying to access meta tag information...\", #+ target_meta_url\r\n try:\r\n \tresponse = urllib2.urlopen(target_meta_url)\r\n \thtml = response.read(2000)\r\n \t#print html\r\n \t# Now extract the interesting information\r\n \tget_metatag = string.find(html, \"Joomla! - Open Source Content Management\")\r\n\r\n \t# If the target is not vulnerable exit\r\n \tif get_metatag == -1:\r\n meta_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n meta_flag=1\r\n if verbose_flag: print \"Success\"\r\n #print \"meta flag=\"+str(meta_flag)\r\n return meta_flag\r\n\r\n except:\r\n\tif verbose_flag: print \"Failed\"\r\n\r\n# Tests whether the URL has a '/administrator' login page\r\ndef scan_admin_url():\r\n \"\"\"\r\n\tscan_admin_url()\r\n\t\r\n\tScans the administrator URL of the website. \r\n\r\n\tThe administrator URL, if reachable, is a clue that Joomla is being used.\r\n\r\n \"\"\"\r\n target_admin_url=provided_url+\"/administrator/index.php\"\r\n if verbose_flag: print \"\\t[.] Trying to access admin login page...\", #+ target_admin_url\r\n try:\r\n response = urllib2.urlopen(target_admin_url)\r\n except HTTPError, e:\r\n admin_flag=0\r\n #print \"admin flag=\"+str(admin_flag)\r\n if verbose_flag: print \"Failed\"\r\n return admin_flag\r\n else:\r\n admin_flag=1\r\n #print \"admin flag=\"+str(admin_flag)\r\n if verbose_flag: print \"Success\"\r\n return admin_flag\r\n\r\n# Scans content of 'com_content' component\r\ndef scan_com_content():\r\n \"\"\"\r\n\tscan_com_content()\r\n\t\r\n\tScans the content.xml file of the default component of the website. \r\n\r\n\tThe content.xml file, if readable, is a clue that Joomla is being used.\r\n\r\n \"\"\"\r\n target_com_content=provided_url+\"/administrator/components/com_content/content.xml\"\r\n if verbose_flag: print \"\\t[.] Trying to access com_content component...\", #+ target_com_content\r\n try:\r\n response = urllib2.urlopen(target_com_content)\r\n html = response.read()\r\n get_com = string.find(html, \"Joomla\")\r\n except HTTPError, e:\r\n com_component_flag=0\r\n #print \"com_component flag=\"+str(com_component_flag)\r\n if verbose_flag: print \"Failed\"\r\n return com_component_flag\r\n else:\r\n if get_com==-1:\r\n com_component_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n com_component_flag=1\r\n if verbose_flag: print \"Success\"\r\n #print \"com_component flag=\"+str(com_component_flag)\r\n return com_component_flag\r\n\r\n# Scans the robots.txt file\r\ndef scan_robots_txt():\r\n \"\"\"\r\n\tscan_robots_txt()\r\n\t\r\n\tScans the robots.txt file of website. \r\n\r\n\tThe robots.txt file, if readable, has clues that Joomla is being used.\r\n\r\n \"\"\"\r\n target_robots_txt=provided_url+\"/robots.txt\"\r\n if verbose_flag: print \"\\t[.] Trying to access robots.txt file...\",#+target_robots_txt\r\n try:\r\n response = urllib2.urlopen(target_robots_txt)\r\n html = response.read()\r\n get_robots = string.find(html, \"Joomla\")\r\n except HTTPError, e:\r\n robots_flag=0\r\n #print \"robots flag=\"+str(robots_flag)\r\n if verbose_flag: print \"Failed\"\r\n return robots_flag\r\n else:\r\n if get_robots==-1:\r\n robots_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n robots_flag=1\r\n if verbose_flag: print \"Success\"\r\n #print \"robots flag=\"+str(robots_flag)\r\n return robots_flag\r\n\r\n# Scans the htaccess.txt file\r\ndef scan_htaccess():\r\n \"\"\"\r\n\tscan_htaccess()\r\n\t\r\n\tScans the htaccess file of website. \r\n\r\n\tThe htaccess file, if readable, has clues that Joomla is being used.\r\n\r\n \"\"\"\r\n target_htacess=provided_url+\"/htaccess.txt\"\r\n if verbose_flag: print \"\\t[.] Trying to access htaccess file...\",#+target_htacess\r\n try:\r\n response = urllib2.urlopen(target_htacess)\r\n html = response.read()\r\n get_htaccess = string.find(html, \"Joomla\")\r\n except HTTPError, e:\r\n htaccess_flag=0\r\n #print \"htaccess flag=\"+str(htaccess_flag)\r\n if verbose_flag: print \"Failed\"\r\n return htaccess_flag\r\n else:\r\n if get_htaccess==-1:\r\n htaccess_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n htaccess_flag=1\r\n if verbose_flag: print \"Success\"\r\n #print \"htaccess flag=\"+str(htaccess_flag)\r\n return htaccess_flag\r\n\r\n# Scans the system.css file \r\ndef scan_system_css():\r\n \"\"\"\r\n\tscan_system_css()\r\n\t\r\n\tScans the system.css file of website. \r\n\r\n\tThe system.css file, if readable, has clues that Joomla is being used.\r\n\r\n \"\"\"\r\n pass\r\n\r\n# Scans the MooTools.js file\r\ndef scan_mootools():\r\n \"\"\"\r\n\tscan_mootools()\r\n\t\r\n\tScans the mootools.js file of website. \r\n\r\n\tThe mootools.js file, if readable, has clues that Joomla is being used.\r\n\r\n \"\"\"\r\n target_mootools=provided_url+\"/media/system/js/mootools-more.js\"\r\n if verbose_flag: print \"\\t[.] Trying to access MooTools file...\", #+ target_mootools\r\n try:\r\n response = urllib2.urlopen(target_mootools)\r\n html = response.read(3300)\r\n #print html\r\n get_mootools = string.find(html, 'MooTools.More={version:\"1.4.0.1\"')\r\n except HTTPError, e:\r\n mootools_flag=0\r\n #print \"mootools flag=\"+str(mootools_flag)\r\n if verbose_flag: print \"Failed\"\r\n return mootools_flag\r\n else:\r\n if get_mootools==-1:\r\n mootools_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n mootools_flag=1\r\n if verbose_flag: print \"Success\"\r\n joomla_version=\"2.x or 3.x\"\r\n #print \"mootools flag=\"+str(mootools_flag)\r\n return mootools_flag \r\n\r\n# Scans the en-GB.xml file\r\ndef scan_engb_ini():\r\n \"\"\"\r\n\tscan_engb_ini()\r\n\t\r\n\tScans the en-GB.ini file of website. \r\n\r\n\tThe en-GB.ini file, if readable, has clues that Joomla is being used.\r\n\r\n \"\"\"\r\n target_engb=provided_url+\"/language/en-GB/en-GB.xml\"\r\n if verbose_flag: print \"\\t[.] Trying to access en-GB file...\", #+ target_engb\r\n try:\r\n response = urllib2.urlopen(target_engb)\r\n html = response.read(200)\r\n #print html\r\n t1 = string.find(html, '<version>')\r\n target_engb = html[t1+9:t1+14]\r\n \r\n except HTTPError, e:\r\n engb_flag=0\r\n #print \"engb flag=\"+str(engb_flag)\r\n if verbose_flag: print \"Failed\"\r\n return engb_flag\r\n else:\r\n if t1==-1:\r\n engb_flag=0\r\n if verbose_flag: print \"Failed\"\r\n else:\r\n engb_flag=1\r\n if verbose_flag: print \"Success\"\r\n global joomla_version\r\n joomla_version=target_engb\r\n #print \"engb flag=\"+str(engb_flag)\r\n return engb_flag \r\n\r\n# Computes the result of the scans\r\ndef compute_result(a,b,c,d,e,f,g):\r\n \"\"\"\r\n\tcompute_result()\r\n\t\r\n\tComputes the final result. \r\n\r\n \"\"\"\r\n if (a or b or c or d or e or f or g)and ((a+b+c+d+e+f+g)>=3):\r\n return 1\r\n else:\r\n return 0\r\n\r\n# Reads URL's from an input file and processes them\r\ndef process_from_file():\r\n \"\"\"\r\n\tprocess_from_file()\r\n\t\r\n\tStarts processing the URL's from the input file. \r\n\r\n \"\"\"\r\n global default_input_path\r\n print \"JoomFind v 1.0\"\r\n print \"\\n\\nTrying to read URL(s) form \" + default_input_path + \" file...\\n\"\r\n try:\r\n if not default_input_path:\r\n f = open(\"urls.txt\")\r\n else:\r\n f=open(default_input_path)\r\n cwd=os.getcwd()\r\n file_path = cwd + path_slash + f.name\r\n\t# extracting url's to list from file\r\n start_urls = [url.strip() for url in f.readlines() if url[0] not in ['#',' ',\"\\n\"]]\r\n if not start_urls:\r\n print \"File is empty. Add some URL(s) first.\\n\"\r\n f.close()\r\n return 0\r\n except:\r\n print \"File not found. Make sure it exists.\\n\"\r\n return 0\r\n #print start_urls\r\n \r\n num=str(len(start_urls))\r\n print \"Found \" + num + \" URL(s) on \" + time.asctime(time.localtime(time.time())) + \"\\n\"\r\n \r\n of=open(default_output_path,'a+')\r\n of.write(\"\\n\\n\\tScanning \" + num + \" URL(s) \")\r\n of.write(\"\\n\\n\\tDate\\Time : \" + time.asctime(time.localtime(time.time())) )\r\n of.write(\"\\n\\n\\tInput file path : \" + default_input_path + \"\\n\\n\")\r\n of.close()\r\n \r\n for url in start_urls:\r\n global provided_url\r\n provided_url=url\r\n print \"\\nWorking on URL \" + str(start_urls.index(url)+1) + \": \" + provided_url\r\n processing()\r\n print \"\\nAll done! Check '\" + default_output_path +\"' file for results.\\n\" \r\n\r\n\r\n# Calls other scans and writes results to output file\r\ndef processing():\r\n \"\"\"\r\n\tprocessing()\r\n\t\r\n\tCalls other helper functions. \r\n\r\n \"\"\"\r\n err=test_url()\r\n of=open(default_output_path,'a+')\r\n if err!=0: \r\n metaf=scan_target_metatag()\r\n adminf=scan_admin_url()\r\n comf=scan_com_content()\r\n robotsf=scan_robots_txt()\r\n htf=scan_htaccess()\r\n moof=scan_mootools()\r\n engbf=scan_engb_ini()\r\n result=compute_result(metaf,adminf,comf,robotsf,htf,moof,engbf)\r\n if result==1:\r\n #print \"THE TARGET IS USING JOOMLA CMS\"\r\n #print \"Joomla version is \" + joomla_version\r\n of.write(\"\\nJOOMLA USED (version : \" + joomla_version + \") --> \" + provided_url + \"\\n\")\r\n else:\r\n #print \"JOOMLA NOT USED\"\r\n of.write(\"\\nJOMLA NOT USED --> \" + provided_url + \"\\n\")\r\n else:\r\n of.write(\"\\nBAD URL --> \" + provided_url + \"\\n\")\r\n of.close()\r\n return 0\r\n\r\n# main method\r\ndef main():\r\n \"\"\"\r\n\tmain()\r\n\t\r\n\tStarting point of program execution. \r\n\r\n \"\"\"\r\n# Checking if argument was provided\r\n if len(sys.argv) <=1:\r\n print_usage()\r\n sys.exit(1)\r\n \r\n for arg in sys.argv:\r\n # Checking if help was called\r\n if arg == \"-h\" or arg == \"--help\":\r\n print_usage()\r\n sys.exit(1)\r\n \r\n # Checking for verbose mode \r\n if arg == \"-v\" or arg == \"--verbose\":\r\n global verbose_flag\r\n verbose_flag=1\r\n\r\n # Checking for input file\r\n if arg == \"-f\" or arg == \"--file\":\r\n global default_input_path\r\n global default_output_path\r\n default_input_path = sys.argv[2]\r\n default_output_path=default_input_path[:-4] + \"_results.txt\"\r\n\r\n #if arg == \"-u\" or arg == \"--url\":\r\n # input_url = sys.argv[2]\r\n\t \r\n if os.name == \"nt\":\r\n os.system('cls')\r\n else:\r\n os.system('clear')\r\n \r\n process_from_file()\r\n\r\n\r\nif __name__==\"__main__\":\r\n main()\r\n \r\n#EOF\r\n", "step-2": null, "step-3": null, "step-4": null, "step-5": null, "step-ids": [ 0 ] }
[ 0 ]
<|reserved_special_token_0|> class Question: <|reserved_special_token_0|> <|reserved_special_token_0|> def findSecond(self, sentenceDoc, verb, children): for child in children: if child.dep_ == 'attr' or child.dep_ == 'nsubj': temp = self.nounArray.findWord(child.orth_) subjectChildren = [] for ch in child.children: subjectChildren.append(ch) if not subjectChildren: subjectChildren = children subjectChildren.remove(child) self.findThird(sentenceDoc, temp, verb, subjectChildren, False) break <|reserved_special_token_0|> def writeOtter(self, first, second, third): self.file.write('-rdf("' + first + '", "' + second + '", "' + third + '").\n') <|reserved_special_token_1|> <|reserved_special_token_0|> class Question: <|reserved_special_token_0|> def findFirst(self, sentence): sentenceDoc = self.nlp(sentence) for word in sentenceDoc: if word.dep_ == 'ROOT': verb = self.verbArray.findWord(word.orth_) children = [] for ch in word.children: children.append(ch) self.findSecond(sentenceDoc, verb, children) break def findSecond(self, sentenceDoc, verb, children): for child in children: if child.dep_ == 'attr' or child.dep_ == 'nsubj': temp = self.nounArray.findWord(child.orth_) subjectChildren = [] for ch in child.children: subjectChildren.append(ch) if not subjectChildren: subjectChildren = children subjectChildren.remove(child) self.findThird(sentenceDoc, temp, verb, subjectChildren, False) break <|reserved_special_token_0|> def writeOtter(self, first, second, third): self.file.write('-rdf("' + first + '", "' + second + '", "' + third + '").\n') <|reserved_special_token_1|> <|reserved_special_token_0|> class Question: def __init__(self, nlp, otter, nounArray, verbArray): self.nlp = nlp self.nounArray = nounArray self.verbArray = verbArray self.file = otter def findFirst(self, sentence): sentenceDoc = self.nlp(sentence) for word in sentenceDoc: if word.dep_ == 'ROOT': verb = self.verbArray.findWord(word.orth_) children = [] for ch in word.children: children.append(ch) self.findSecond(sentenceDoc, verb, children) break def findSecond(self, sentenceDoc, verb, children): for child in children: if child.dep_ == 'attr' or child.dep_ == 'nsubj': temp = self.nounArray.findWord(child.orth_) subjectChildren = [] for ch in child.children: subjectChildren.append(ch) if not subjectChildren: subjectChildren = children subjectChildren.remove(child) self.findThird(sentenceDoc, temp, verb, subjectChildren, False) break def findThird(self, sentenceDoc, subject, verb, children, flag): for child in children: if child.dep_ == 'appos' or child.dep_ == 'pobj': temp = self.nounArray.findWord(child.orth_) if temp is None: w = datastructure.Word(child.orth_) w.addType(child.pos_) w.addUri(wordUri.findUri(w)) print(subject.uri, '- ' + verb.uri + ' -', w.uri) self.writeOtter(subject.uri, verb.uri, w.uri) else: print(subject.uri, '- ' + verb.uri + ' -', temp.uri) self.writeOtter(subject.uri, verb.uri, temp.uri) if child.dep_ == 'prep' or child.dep_ == 'acomp': if not flag: verb = datastructure.Word(child.orth_) verb.addType(child.pos_) verb.addUri(wordUri.findUri(verb)) verbChildren = [] for ch in child.children: verbChildren.append(ch) self.findThird(sentenceDoc, subject, verb, verbChildren, True) def writeOtter(self, first, second, third): self.file.write('-rdf("' + first + '", "' + second + '", "' + third + '").\n') <|reserved_special_token_1|> import datastructure import wordUri class Question: def __init__(self, nlp, otter, nounArray, verbArray): self.nlp = nlp self.nounArray = nounArray self.verbArray = verbArray self.file = otter def findFirst(self, sentence): sentenceDoc = self.nlp(sentence) for word in sentenceDoc: if word.dep_ == 'ROOT': verb = self.verbArray.findWord(word.orth_) children = [] for ch in word.children: children.append(ch) self.findSecond(sentenceDoc, verb, children) break def findSecond(self, sentenceDoc, verb, children): for child in children: if child.dep_ == 'attr' or child.dep_ == 'nsubj': temp = self.nounArray.findWord(child.orth_) subjectChildren = [] for ch in child.children: subjectChildren.append(ch) if not subjectChildren: subjectChildren = children subjectChildren.remove(child) self.findThird(sentenceDoc, temp, verb, subjectChildren, False) break def findThird(self, sentenceDoc, subject, verb, children, flag): for child in children: if child.dep_ == 'appos' or child.dep_ == 'pobj': temp = self.nounArray.findWord(child.orth_) if temp is None: w = datastructure.Word(child.orth_) w.addType(child.pos_) w.addUri(wordUri.findUri(w)) print(subject.uri, '- ' + verb.uri + ' -', w.uri) self.writeOtter(subject.uri, verb.uri, w.uri) else: print(subject.uri, '- ' + verb.uri + ' -', temp.uri) self.writeOtter(subject.uri, verb.uri, temp.uri) if child.dep_ == 'prep' or child.dep_ == 'acomp': if not flag: verb = datastructure.Word(child.orth_) verb.addType(child.pos_) verb.addUri(wordUri.findUri(verb)) verbChildren = [] for ch in child.children: verbChildren.append(ch) self.findThird(sentenceDoc, subject, verb, verbChildren, True) def writeOtter(self, first, second, third): self.file.write('-rdf("' + first + '", "' + second + '", "' + third + '").\n') <|reserved_special_token_1|> import datastructure import wordUri class Question: def __init__(self, nlp, otter, nounArray, verbArray): self.nlp = nlp self.nounArray = nounArray self.verbArray = verbArray self.file = otter def findFirst(self, sentence): sentenceDoc = self.nlp(sentence) for word in sentenceDoc: if word.dep_ == "ROOT": verb = self.verbArray.findWord(word.orth_) children = [] for ch in word.children: children.append(ch) self.findSecond(sentenceDoc, verb, children) break def findSecond(self, sentenceDoc, verb, children): for child in children: if child.dep_ == "attr" or child.dep_ == "nsubj": temp = self.nounArray.findWord(child.orth_) subjectChildren = [] for ch in child.children: subjectChildren.append(ch) if not subjectChildren: subjectChildren = children subjectChildren.remove(child) self.findThird(sentenceDoc, temp, verb, subjectChildren, False) break def findThird(self, sentenceDoc, subject, verb, children, flag): for child in children: if child.dep_ == "appos" or child.dep_ == "pobj": temp = self.nounArray.findWord(child.orth_) if temp is None: w = datastructure.Word(child.orth_) w.addType(child.pos_) w.addUri(wordUri.findUri(w)) #w.addUri(w.word + "URI") print(subject.uri, "- " + verb.uri + " -", w.uri) self.writeOtter(subject.uri, verb.uri, w.uri) else: print(subject.uri, "- " + verb.uri + " -", temp.uri) self.writeOtter(subject.uri, verb.uri, temp.uri) #self.recoursiveFind(sentenceDoc, subject, verb, child) if child.dep_ == "prep" or child.dep_ == "acomp": if not flag: verb = datastructure.Word(child.orth_) verb.addType(child.pos_) verb.addUri(wordUri.findUri(verb)) verbChildren = [] for ch in child.children: verbChildren.append(ch) self.findThird(sentenceDoc, subject, verb, verbChildren, True) def writeOtter(self, first, second, third): self.file.write("-rdf(\"" + first + "\", \"" + second + "\", \"" + third + "\").\n")
flexible
{ "blob_id": "4d63a5f09164b78faa731af6dce41969edc2c4f5", "index": 848, "step-1": "<mask token>\n\n\nclass Question:\n <mask token>\n <mask token>\n\n def findSecond(self, sentenceDoc, verb, children):\n for child in children:\n if child.dep_ == 'attr' or child.dep_ == 'nsubj':\n temp = self.nounArray.findWord(child.orth_)\n subjectChildren = []\n for ch in child.children:\n subjectChildren.append(ch)\n if not subjectChildren:\n subjectChildren = children\n subjectChildren.remove(child)\n self.findThird(sentenceDoc, temp, verb, subjectChildren, False)\n break\n <mask token>\n\n def writeOtter(self, first, second, third):\n self.file.write('-rdf(\"' + first + '\", \"' + second + '\", \"' + third +\n '\").\\n')\n", "step-2": "<mask token>\n\n\nclass Question:\n <mask token>\n\n def findFirst(self, sentence):\n sentenceDoc = self.nlp(sentence)\n for word in sentenceDoc:\n if word.dep_ == 'ROOT':\n verb = self.verbArray.findWord(word.orth_)\n children = []\n for ch in word.children:\n children.append(ch)\n self.findSecond(sentenceDoc, verb, children)\n break\n\n def findSecond(self, sentenceDoc, verb, children):\n for child in children:\n if child.dep_ == 'attr' or child.dep_ == 'nsubj':\n temp = self.nounArray.findWord(child.orth_)\n subjectChildren = []\n for ch in child.children:\n subjectChildren.append(ch)\n if not subjectChildren:\n subjectChildren = children\n subjectChildren.remove(child)\n self.findThird(sentenceDoc, temp, verb, subjectChildren, False)\n break\n <mask token>\n\n def writeOtter(self, first, second, third):\n self.file.write('-rdf(\"' + first + '\", \"' + second + '\", \"' + third +\n '\").\\n')\n", "step-3": "<mask token>\n\n\nclass Question:\n\n def __init__(self, nlp, otter, nounArray, verbArray):\n self.nlp = nlp\n self.nounArray = nounArray\n self.verbArray = verbArray\n self.file = otter\n\n def findFirst(self, sentence):\n sentenceDoc = self.nlp(sentence)\n for word in sentenceDoc:\n if word.dep_ == 'ROOT':\n verb = self.verbArray.findWord(word.orth_)\n children = []\n for ch in word.children:\n children.append(ch)\n self.findSecond(sentenceDoc, verb, children)\n break\n\n def findSecond(self, sentenceDoc, verb, children):\n for child in children:\n if child.dep_ == 'attr' or child.dep_ == 'nsubj':\n temp = self.nounArray.findWord(child.orth_)\n subjectChildren = []\n for ch in child.children:\n subjectChildren.append(ch)\n if not subjectChildren:\n subjectChildren = children\n subjectChildren.remove(child)\n self.findThird(sentenceDoc, temp, verb, subjectChildren, False)\n break\n\n def findThird(self, sentenceDoc, subject, verb, children, flag):\n for child in children:\n if child.dep_ == 'appos' or child.dep_ == 'pobj':\n temp = self.nounArray.findWord(child.orth_)\n if temp is None:\n w = datastructure.Word(child.orth_)\n w.addType(child.pos_)\n w.addUri(wordUri.findUri(w))\n print(subject.uri, '- ' + verb.uri + ' -', w.uri)\n self.writeOtter(subject.uri, verb.uri, w.uri)\n else:\n print(subject.uri, '- ' + verb.uri + ' -', temp.uri)\n self.writeOtter(subject.uri, verb.uri, temp.uri)\n if child.dep_ == 'prep' or child.dep_ == 'acomp':\n if not flag:\n verb = datastructure.Word(child.orth_)\n verb.addType(child.pos_)\n verb.addUri(wordUri.findUri(verb))\n verbChildren = []\n for ch in child.children:\n verbChildren.append(ch)\n self.findThird(sentenceDoc, subject, verb, verbChildren, True)\n\n def writeOtter(self, first, second, third):\n self.file.write('-rdf(\"' + first + '\", \"' + second + '\", \"' + third +\n '\").\\n')\n", "step-4": "import datastructure\nimport wordUri\n\n\nclass Question:\n\n def __init__(self, nlp, otter, nounArray, verbArray):\n self.nlp = nlp\n self.nounArray = nounArray\n self.verbArray = verbArray\n self.file = otter\n\n def findFirst(self, sentence):\n sentenceDoc = self.nlp(sentence)\n for word in sentenceDoc:\n if word.dep_ == 'ROOT':\n verb = self.verbArray.findWord(word.orth_)\n children = []\n for ch in word.children:\n children.append(ch)\n self.findSecond(sentenceDoc, verb, children)\n break\n\n def findSecond(self, sentenceDoc, verb, children):\n for child in children:\n if child.dep_ == 'attr' or child.dep_ == 'nsubj':\n temp = self.nounArray.findWord(child.orth_)\n subjectChildren = []\n for ch in child.children:\n subjectChildren.append(ch)\n if not subjectChildren:\n subjectChildren = children\n subjectChildren.remove(child)\n self.findThird(sentenceDoc, temp, verb, subjectChildren, False)\n break\n\n def findThird(self, sentenceDoc, subject, verb, children, flag):\n for child in children:\n if child.dep_ == 'appos' or child.dep_ == 'pobj':\n temp = self.nounArray.findWord(child.orth_)\n if temp is None:\n w = datastructure.Word(child.orth_)\n w.addType(child.pos_)\n w.addUri(wordUri.findUri(w))\n print(subject.uri, '- ' + verb.uri + ' -', w.uri)\n self.writeOtter(subject.uri, verb.uri, w.uri)\n else:\n print(subject.uri, '- ' + verb.uri + ' -', temp.uri)\n self.writeOtter(subject.uri, verb.uri, temp.uri)\n if child.dep_ == 'prep' or child.dep_ == 'acomp':\n if not flag:\n verb = datastructure.Word(child.orth_)\n verb.addType(child.pos_)\n verb.addUri(wordUri.findUri(verb))\n verbChildren = []\n for ch in child.children:\n verbChildren.append(ch)\n self.findThird(sentenceDoc, subject, verb, verbChildren, True)\n\n def writeOtter(self, first, second, third):\n self.file.write('-rdf(\"' + first + '\", \"' + second + '\", \"' + third +\n '\").\\n')\n", "step-5": "import datastructure\nimport wordUri\n\n\nclass Question:\n def __init__(self, nlp, otter, nounArray, verbArray):\n self.nlp = nlp\n self.nounArray = nounArray\n self.verbArray = verbArray\n self.file = otter\n\n\n def findFirst(self, sentence):\n sentenceDoc = self.nlp(sentence)\n for word in sentenceDoc:\n if word.dep_ == \"ROOT\":\n verb = self.verbArray.findWord(word.orth_)\n\n children = []\n for ch in word.children:\n children.append(ch)\n self.findSecond(sentenceDoc, verb, children)\n break\n\n def findSecond(self, sentenceDoc, verb, children):\n\n for child in children:\n if child.dep_ == \"attr\" or child.dep_ == \"nsubj\":\n temp = self.nounArray.findWord(child.orth_)\n\n subjectChildren = []\n for ch in child.children:\n subjectChildren.append(ch)\n\n if not subjectChildren:\n subjectChildren = children\n subjectChildren.remove(child)\n self.findThird(sentenceDoc, temp, verb, subjectChildren, False)\n break\n\n def findThird(self, sentenceDoc, subject, verb, children, flag):\n for child in children:\n if child.dep_ == \"appos\" or child.dep_ == \"pobj\":\n temp = self.nounArray.findWord(child.orth_)\n if temp is None:\n w = datastructure.Word(child.orth_)\n w.addType(child.pos_)\n w.addUri(wordUri.findUri(w))\n #w.addUri(w.word + \"URI\")\n print(subject.uri, \"- \" + verb.uri + \" -\", w.uri)\n\n self.writeOtter(subject.uri, verb.uri, w.uri)\n\n else:\n print(subject.uri, \"- \" + verb.uri + \" -\", temp.uri)\n self.writeOtter(subject.uri, verb.uri, temp.uri)\n\n #self.recoursiveFind(sentenceDoc, subject, verb, child)\n if child.dep_ == \"prep\" or child.dep_ == \"acomp\":\n if not flag:\n verb = datastructure.Word(child.orth_)\n verb.addType(child.pos_)\n verb.addUri(wordUri.findUri(verb))\n\n verbChildren = []\n for ch in child.children:\n verbChildren.append(ch)\n\n self.findThird(sentenceDoc, subject, verb, verbChildren, True)\n\n def writeOtter(self, first, second, third):\n self.file.write(\"-rdf(\\\"\" + first + \"\\\", \\\"\" + second + \"\\\", \\\"\" + third + \"\\\").\\n\")\n", "step-ids": [ 3, 4, 6, 7, 8 ] }
[ 3, 4, 6, 7, 8 ]
<|reserved_special_token_0|> <|reserved_special_token_1|> <|reserved_special_token_0|> if r == n: print('too late') else: l = list(range(1, r + 1)) for _ in range(n): l.remove(int(input())) print(l[0]) <|reserved_special_token_1|> r, n = map(int, input().split()) if r == n: print('too late') else: l = list(range(1, r + 1)) for _ in range(n): l.remove(int(input())) print(l[0]) <|reserved_special_token_1|> r, n = map(int, input().split()) if r == n: print("too late") else: l = list(range(1, r+1)) for _ in range(n): l.remove(int(input())) print(l[0])
flexible
{ "blob_id": "381d3f0890a2916d2e0a21a6a47a5f87afde622d", "index": 9241, "step-1": "<mask token>\n", "step-2": "<mask token>\nif r == n:\n print('too late')\nelse:\n l = list(range(1, r + 1))\n for _ in range(n):\n l.remove(int(input()))\n print(l[0])\n", "step-3": "r, n = map(int, input().split())\nif r == n:\n print('too late')\nelse:\n l = list(range(1, r + 1))\n for _ in range(n):\n l.remove(int(input()))\n print(l[0])\n", "step-4": "r, n = map(int, input().split())\nif r == n:\n print(\"too late\")\nelse:\n l = list(range(1, r+1))\n for _ in range(n):\n l.remove(int(input()))\n print(l[0])\n", "step-5": null, "step-ids": [ 0, 1, 2, 3 ] }
[ 0, 1, 2, 3 ]
<|reserved_special_token_0|> class AnnouncedPuResults(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class Meta: managed = False db_table = 'announced_pu_results' class AnnouncedStateResults(models.Model): result_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_state_results' class AnnouncedWardResults(models.Model): result_id = models.IntegerField(primary_key=True) ward_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_ward_results' class Lga(models.Model): uniqueid = models.IntegerField(primary_key=True) lga_id = models.IntegerField() lga_name = models.CharField(max_length=50) state_id = models.IntegerField() lga_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'lga' class Party(models.Model): id = models.IntegerField(primary_key=True) partyid = models.CharField(max_length=11) partyname = models.CharField(max_length=11) class Meta: managed = False db_table = 'party' class PollingUnit(models.Model): uniqueid = models.IntegerField(primary_key=True) polling_unit_id = models.IntegerField() ward_id = models.IntegerField() lga_id = models.IntegerField() uniquewardid = models.IntegerField(blank=True, null=True) polling_unit_number = models.CharField(max_length=50, blank=True, null=True ) polling_unit_name = models.CharField(max_length=50, blank=True, null=True) polling_unit_description = models.TextField(blank=True, null=True) lat = models.CharField(max_length=255, blank=True, null=True) lon = models.CharField(max_length=255, blank=True, null=True) entered_by_user = models.CharField(max_length=50, blank=True, null=True) date_entered = models.DateTimeField(blank=True, null=True) user_ip_address = models.CharField(max_length=50, blank=True, null=True) class Meta: managed = False db_table = 'polling_unit' class States(models.Model): state_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) class Meta: managed = False db_table = 'states' class Ward(models.Model): uniqueid = models.IntegerField(primary_key=True) ward_id = models.IntegerField() ward_name = models.CharField(max_length=50) lga_id = models.IntegerField() ward_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'ward' <|reserved_special_token_1|> <|reserved_special_token_0|> class AnnouncedLgaResults(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class Meta: managed = False db_table = 'announced_lga_results' class AnnouncedPuResults(models.Model): result_id = models.IntegerField(primary_key=True) polling_unit_uniqueid = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_pu_results' class AnnouncedStateResults(models.Model): result_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_state_results' class AnnouncedWardResults(models.Model): result_id = models.IntegerField(primary_key=True) ward_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_ward_results' class Lga(models.Model): uniqueid = models.IntegerField(primary_key=True) lga_id = models.IntegerField() lga_name = models.CharField(max_length=50) state_id = models.IntegerField() lga_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'lga' class Party(models.Model): id = models.IntegerField(primary_key=True) partyid = models.CharField(max_length=11) partyname = models.CharField(max_length=11) class Meta: managed = False db_table = 'party' class PollingUnit(models.Model): uniqueid = models.IntegerField(primary_key=True) polling_unit_id = models.IntegerField() ward_id = models.IntegerField() lga_id = models.IntegerField() uniquewardid = models.IntegerField(blank=True, null=True) polling_unit_number = models.CharField(max_length=50, blank=True, null=True ) polling_unit_name = models.CharField(max_length=50, blank=True, null=True) polling_unit_description = models.TextField(blank=True, null=True) lat = models.CharField(max_length=255, blank=True, null=True) lon = models.CharField(max_length=255, blank=True, null=True) entered_by_user = models.CharField(max_length=50, blank=True, null=True) date_entered = models.DateTimeField(blank=True, null=True) user_ip_address = models.CharField(max_length=50, blank=True, null=True) class Meta: managed = False db_table = 'polling_unit' class States(models.Model): state_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) class Meta: managed = False db_table = 'states' class Ward(models.Model): uniqueid = models.IntegerField(primary_key=True) ward_id = models.IntegerField() ward_name = models.CharField(max_length=50) lga_id = models.IntegerField() ward_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'ward' <|reserved_special_token_1|> <|reserved_special_token_0|> class Agentname(models.Model): <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> <|reserved_special_token_0|> class Meta: managed = False db_table = 'agentname' class AnnouncedLgaResults(models.Model): result_id = models.IntegerField(primary_key=True) lga_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_lga_results' class AnnouncedPuResults(models.Model): result_id = models.IntegerField(primary_key=True) polling_unit_uniqueid = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_pu_results' class AnnouncedStateResults(models.Model): result_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_state_results' class AnnouncedWardResults(models.Model): result_id = models.IntegerField(primary_key=True) ward_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_ward_results' class Lga(models.Model): uniqueid = models.IntegerField(primary_key=True) lga_id = models.IntegerField() lga_name = models.CharField(max_length=50) state_id = models.IntegerField() lga_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'lga' class Party(models.Model): id = models.IntegerField(primary_key=True) partyid = models.CharField(max_length=11) partyname = models.CharField(max_length=11) class Meta: managed = False db_table = 'party' class PollingUnit(models.Model): uniqueid = models.IntegerField(primary_key=True) polling_unit_id = models.IntegerField() ward_id = models.IntegerField() lga_id = models.IntegerField() uniquewardid = models.IntegerField(blank=True, null=True) polling_unit_number = models.CharField(max_length=50, blank=True, null=True ) polling_unit_name = models.CharField(max_length=50, blank=True, null=True) polling_unit_description = models.TextField(blank=True, null=True) lat = models.CharField(max_length=255, blank=True, null=True) lon = models.CharField(max_length=255, blank=True, null=True) entered_by_user = models.CharField(max_length=50, blank=True, null=True) date_entered = models.DateTimeField(blank=True, null=True) user_ip_address = models.CharField(max_length=50, blank=True, null=True) class Meta: managed = False db_table = 'polling_unit' class States(models.Model): state_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) class Meta: managed = False db_table = 'states' class Ward(models.Model): uniqueid = models.IntegerField(primary_key=True) ward_id = models.IntegerField() ward_name = models.CharField(max_length=50) lga_id = models.IntegerField() ward_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'ward' <|reserved_special_token_1|> from django.db import models class Agentname(models.Model): name_id = models.IntegerField(primary_key=True) firstname = models.CharField(max_length=255) lastname = models.CharField(max_length=255) email = models.CharField(max_length=255, blank=True, null=True) phone = models.CharField(max_length=13) pollingunit_uniqueid = models.IntegerField() class Meta: managed = False db_table = 'agentname' class AnnouncedLgaResults(models.Model): result_id = models.IntegerField(primary_key=True) lga_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_lga_results' class AnnouncedPuResults(models.Model): result_id = models.IntegerField(primary_key=True) polling_unit_uniqueid = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_pu_results' class AnnouncedStateResults(models.Model): result_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_state_results' class AnnouncedWardResults(models.Model): result_id = models.IntegerField(primary_key=True) ward_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_ward_results' class Lga(models.Model): uniqueid = models.IntegerField(primary_key=True) lga_id = models.IntegerField() lga_name = models.CharField(max_length=50) state_id = models.IntegerField() lga_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'lga' class Party(models.Model): id = models.IntegerField(primary_key=True) partyid = models.CharField(max_length=11) partyname = models.CharField(max_length=11) class Meta: managed = False db_table = 'party' class PollingUnit(models.Model): uniqueid = models.IntegerField(primary_key=True) polling_unit_id = models.IntegerField() ward_id = models.IntegerField() lga_id = models.IntegerField() uniquewardid = models.IntegerField(blank=True, null=True) polling_unit_number = models.CharField(max_length=50, blank=True, null=True ) polling_unit_name = models.CharField(max_length=50, blank=True, null=True) polling_unit_description = models.TextField(blank=True, null=True) lat = models.CharField(max_length=255, blank=True, null=True) lon = models.CharField(max_length=255, blank=True, null=True) entered_by_user = models.CharField(max_length=50, blank=True, null=True) date_entered = models.DateTimeField(blank=True, null=True) user_ip_address = models.CharField(max_length=50, blank=True, null=True) class Meta: managed = False db_table = 'polling_unit' class States(models.Model): state_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) class Meta: managed = False db_table = 'states' class Ward(models.Model): uniqueid = models.IntegerField(primary_key=True) ward_id = models.IntegerField() ward_name = models.CharField(max_length=50) lga_id = models.IntegerField() ward_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'ward' <|reserved_special_token_1|> # This is an auto-generated Django model module. # You'll have to do the following manually to clean this up: # * Rearrange models' order # * Make sure each model has one field with primary_key=True # * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior # * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table # Feel free to rename the models, but don't rename db_table values or field names. from django.db import models class Agentname(models.Model): name_id = models.IntegerField(primary_key=True) firstname = models.CharField(max_length=255) lastname = models.CharField(max_length=255) email = models.CharField(max_length=255, blank=True, null=True) phone = models.CharField(max_length=13) pollingunit_uniqueid = models.IntegerField() class Meta: managed = False db_table = 'agentname' class AnnouncedLgaResults(models.Model): result_id = models.IntegerField(primary_key=True) lga_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_lga_results' class AnnouncedPuResults(models.Model): result_id = models.IntegerField(primary_key=True) polling_unit_uniqueid = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_pu_results' class AnnouncedStateResults(models.Model): result_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_state_results' class AnnouncedWardResults(models.Model): result_id = models.IntegerField(primary_key=True) ward_name = models.CharField(max_length=50) party_abbreviation = models.CharField(max_length=4) party_score = models.IntegerField() entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'announced_ward_results' class Lga(models.Model): uniqueid = models.IntegerField(primary_key=True) lga_id = models.IntegerField() lga_name = models.CharField(max_length=50) state_id = models.IntegerField() lga_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'lga' class Party(models.Model): id = models.IntegerField(primary_key=True) partyid = models.CharField(max_length=11) partyname = models.CharField(max_length=11) class Meta: managed = False db_table = 'party' class PollingUnit(models.Model): uniqueid = models.IntegerField(primary_key=True) polling_unit_id = models.IntegerField() ward_id = models.IntegerField() lga_id = models.IntegerField() uniquewardid = models.IntegerField(blank=True, null=True) polling_unit_number = models.CharField(max_length=50, blank=True, null=True) polling_unit_name = models.CharField(max_length=50, blank=True, null=True) polling_unit_description = models.TextField(blank=True, null=True) lat = models.CharField(max_length=255, blank=True, null=True) lon = models.CharField(max_length=255, blank=True, null=True) entered_by_user = models.CharField(max_length=50, blank=True, null=True) date_entered = models.DateTimeField(blank=True, null=True) user_ip_address = models.CharField(max_length=50, blank=True, null=True) class Meta: managed = False db_table = 'polling_unit' class States(models.Model): state_id = models.IntegerField(primary_key=True) state_name = models.CharField(max_length=50) class Meta: managed = False db_table = 'states' class Ward(models.Model): uniqueid = models.IntegerField(primary_key=True) ward_id = models.IntegerField() ward_name = models.CharField(max_length=50) lga_id = models.IntegerField() ward_description = models.TextField(blank=True, null=True) entered_by_user = models.CharField(max_length=50) date_entered = models.DateTimeField() user_ip_address = models.CharField(max_length=50) class Meta: managed = False db_table = 'ward'
flexible
{ "blob_id": "5ce5fbfa33c241fc316d5e414df01a39bfc9be18", "index": 7063, "step-1": "<mask token>\n\n\nclass AnnouncedPuResults(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'announced_pu_results'\n\n\nclass AnnouncedStateResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_state_results'\n\n\nclass AnnouncedWardResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n ward_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_ward_results'\n\n\nclass Lga(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n lga_id = models.IntegerField()\n lga_name = models.CharField(max_length=50)\n state_id = models.IntegerField()\n lga_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'lga'\n\n\nclass Party(models.Model):\n id = models.IntegerField(primary_key=True)\n partyid = models.CharField(max_length=11)\n partyname = models.CharField(max_length=11)\n\n\n class Meta:\n managed = False\n db_table = 'party'\n\n\nclass PollingUnit(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n polling_unit_id = models.IntegerField()\n ward_id = models.IntegerField()\n lga_id = models.IntegerField()\n uniquewardid = models.IntegerField(blank=True, null=True)\n polling_unit_number = models.CharField(max_length=50, blank=True, null=True\n )\n polling_unit_name = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_description = models.TextField(blank=True, null=True)\n lat = models.CharField(max_length=255, blank=True, null=True)\n lon = models.CharField(max_length=255, blank=True, null=True)\n entered_by_user = models.CharField(max_length=50, blank=True, null=True)\n date_entered = models.DateTimeField(blank=True, null=True)\n user_ip_address = models.CharField(max_length=50, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'polling_unit'\n\n\nclass States(models.Model):\n state_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'states'\n\n\nclass Ward(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n ward_id = models.IntegerField()\n ward_name = models.CharField(max_length=50)\n lga_id = models.IntegerField()\n ward_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'ward'\n", "step-2": "<mask token>\n\n\nclass AnnouncedLgaResults(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'announced_lga_results'\n\n\nclass AnnouncedPuResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n polling_unit_uniqueid = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_pu_results'\n\n\nclass AnnouncedStateResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_state_results'\n\n\nclass AnnouncedWardResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n ward_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_ward_results'\n\n\nclass Lga(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n lga_id = models.IntegerField()\n lga_name = models.CharField(max_length=50)\n state_id = models.IntegerField()\n lga_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'lga'\n\n\nclass Party(models.Model):\n id = models.IntegerField(primary_key=True)\n partyid = models.CharField(max_length=11)\n partyname = models.CharField(max_length=11)\n\n\n class Meta:\n managed = False\n db_table = 'party'\n\n\nclass PollingUnit(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n polling_unit_id = models.IntegerField()\n ward_id = models.IntegerField()\n lga_id = models.IntegerField()\n uniquewardid = models.IntegerField(blank=True, null=True)\n polling_unit_number = models.CharField(max_length=50, blank=True, null=True\n )\n polling_unit_name = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_description = models.TextField(blank=True, null=True)\n lat = models.CharField(max_length=255, blank=True, null=True)\n lon = models.CharField(max_length=255, blank=True, null=True)\n entered_by_user = models.CharField(max_length=50, blank=True, null=True)\n date_entered = models.DateTimeField(blank=True, null=True)\n user_ip_address = models.CharField(max_length=50, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'polling_unit'\n\n\nclass States(models.Model):\n state_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'states'\n\n\nclass Ward(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n ward_id = models.IntegerField()\n ward_name = models.CharField(max_length=50)\n lga_id = models.IntegerField()\n ward_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'ward'\n", "step-3": "<mask token>\n\n\nclass Agentname(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n managed = False\n db_table = 'agentname'\n\n\nclass AnnouncedLgaResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n lga_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_lga_results'\n\n\nclass AnnouncedPuResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n polling_unit_uniqueid = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_pu_results'\n\n\nclass AnnouncedStateResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_state_results'\n\n\nclass AnnouncedWardResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n ward_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_ward_results'\n\n\nclass Lga(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n lga_id = models.IntegerField()\n lga_name = models.CharField(max_length=50)\n state_id = models.IntegerField()\n lga_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'lga'\n\n\nclass Party(models.Model):\n id = models.IntegerField(primary_key=True)\n partyid = models.CharField(max_length=11)\n partyname = models.CharField(max_length=11)\n\n\n class Meta:\n managed = False\n db_table = 'party'\n\n\nclass PollingUnit(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n polling_unit_id = models.IntegerField()\n ward_id = models.IntegerField()\n lga_id = models.IntegerField()\n uniquewardid = models.IntegerField(blank=True, null=True)\n polling_unit_number = models.CharField(max_length=50, blank=True, null=True\n )\n polling_unit_name = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_description = models.TextField(blank=True, null=True)\n lat = models.CharField(max_length=255, blank=True, null=True)\n lon = models.CharField(max_length=255, blank=True, null=True)\n entered_by_user = models.CharField(max_length=50, blank=True, null=True)\n date_entered = models.DateTimeField(blank=True, null=True)\n user_ip_address = models.CharField(max_length=50, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'polling_unit'\n\n\nclass States(models.Model):\n state_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'states'\n\n\nclass Ward(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n ward_id = models.IntegerField()\n ward_name = models.CharField(max_length=50)\n lga_id = models.IntegerField()\n ward_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'ward'\n", "step-4": "from django.db import models\n\n\nclass Agentname(models.Model):\n name_id = models.IntegerField(primary_key=True)\n firstname = models.CharField(max_length=255)\n lastname = models.CharField(max_length=255)\n email = models.CharField(max_length=255, blank=True, null=True)\n phone = models.CharField(max_length=13)\n pollingunit_uniqueid = models.IntegerField()\n\n\n class Meta:\n managed = False\n db_table = 'agentname'\n\n\nclass AnnouncedLgaResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n lga_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_lga_results'\n\n\nclass AnnouncedPuResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n polling_unit_uniqueid = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_pu_results'\n\n\nclass AnnouncedStateResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_state_results'\n\n\nclass AnnouncedWardResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n ward_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'announced_ward_results'\n\n\nclass Lga(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n lga_id = models.IntegerField()\n lga_name = models.CharField(max_length=50)\n state_id = models.IntegerField()\n lga_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'lga'\n\n\nclass Party(models.Model):\n id = models.IntegerField(primary_key=True)\n partyid = models.CharField(max_length=11)\n partyname = models.CharField(max_length=11)\n\n\n class Meta:\n managed = False\n db_table = 'party'\n\n\nclass PollingUnit(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n polling_unit_id = models.IntegerField()\n ward_id = models.IntegerField()\n lga_id = models.IntegerField()\n uniquewardid = models.IntegerField(blank=True, null=True)\n polling_unit_number = models.CharField(max_length=50, blank=True, null=True\n )\n polling_unit_name = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_description = models.TextField(blank=True, null=True)\n lat = models.CharField(max_length=255, blank=True, null=True)\n lon = models.CharField(max_length=255, blank=True, null=True)\n entered_by_user = models.CharField(max_length=50, blank=True, null=True)\n date_entered = models.DateTimeField(blank=True, null=True)\n user_ip_address = models.CharField(max_length=50, blank=True, null=True)\n\n\n class Meta:\n managed = False\n db_table = 'polling_unit'\n\n\nclass States(models.Model):\n state_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'states'\n\n\nclass Ward(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n ward_id = models.IntegerField()\n ward_name = models.CharField(max_length=50)\n lga_id = models.IntegerField()\n ward_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n\n class Meta:\n managed = False\n db_table = 'ward'\n", "step-5": "# This is an auto-generated Django model module.\n# You'll have to do the following manually to clean this up:\n# * Rearrange models' order\n# * Make sure each model has one field with primary_key=True\n# * Make sure each ForeignKey and OneToOneField has `on_delete` set to the desired behavior\n# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table\n# Feel free to rename the models, but don't rename db_table values or field names.\nfrom django.db import models\n\n\nclass Agentname(models.Model):\n name_id = models.IntegerField(primary_key=True)\n firstname = models.CharField(max_length=255)\n lastname = models.CharField(max_length=255)\n email = models.CharField(max_length=255, blank=True, null=True)\n phone = models.CharField(max_length=13)\n pollingunit_uniqueid = models.IntegerField()\n\n class Meta:\n managed = False\n db_table = 'agentname'\n\n\nclass AnnouncedLgaResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n lga_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'announced_lga_results'\n\n\nclass AnnouncedPuResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n polling_unit_uniqueid = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'announced_pu_results'\n\n\nclass AnnouncedStateResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'announced_state_results'\n\n\nclass AnnouncedWardResults(models.Model):\n result_id = models.IntegerField(primary_key=True)\n ward_name = models.CharField(max_length=50)\n party_abbreviation = models.CharField(max_length=4)\n party_score = models.IntegerField()\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'announced_ward_results'\n\n\nclass Lga(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n lga_id = models.IntegerField()\n lga_name = models.CharField(max_length=50)\n state_id = models.IntegerField()\n lga_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'lga'\n\n\nclass Party(models.Model):\n id = models.IntegerField(primary_key=True)\n partyid = models.CharField(max_length=11)\n partyname = models.CharField(max_length=11)\n\n class Meta:\n managed = False\n db_table = 'party'\n\n\nclass PollingUnit(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n polling_unit_id = models.IntegerField()\n ward_id = models.IntegerField()\n lga_id = models.IntegerField()\n uniquewardid = models.IntegerField(blank=True, null=True)\n polling_unit_number = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_name = models.CharField(max_length=50, blank=True, null=True)\n polling_unit_description = models.TextField(blank=True, null=True)\n lat = models.CharField(max_length=255, blank=True, null=True)\n lon = models.CharField(max_length=255, blank=True, null=True)\n entered_by_user = models.CharField(max_length=50, blank=True, null=True)\n date_entered = models.DateTimeField(blank=True, null=True)\n user_ip_address = models.CharField(max_length=50, blank=True, null=True)\n\n class Meta:\n managed = False\n db_table = 'polling_unit'\n\n\nclass States(models.Model):\n state_id = models.IntegerField(primary_key=True)\n state_name = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'states'\n\n\nclass Ward(models.Model):\n uniqueid = models.IntegerField(primary_key=True)\n ward_id = models.IntegerField()\n ward_name = models.CharField(max_length=50)\n lga_id = models.IntegerField()\n ward_description = models.TextField(blank=True, null=True)\n entered_by_user = models.CharField(max_length=50)\n date_entered = models.DateTimeField()\n user_ip_address = models.CharField(max_length=50)\n\n class Meta:\n managed = False\n db_table = 'ward'\n", "step-ids": [ 15, 17, 19, 21, 22 ] }
[ 15, 17, 19, 21, 22 ]