diff --git "a/2237.jsonl" "b/2237.jsonl" new file mode 100644--- /dev/null +++ "b/2237.jsonl" @@ -0,0 +1,653 @@ +{"seq_id":"624130046","text":"import socket\nimport platform\nimport time\nfrom datetime import datetime\n\nextend = \"\\\\\"\nfile_main = r\"C:\\Users\\Public\\ProgFiles\"\nfile_path = r\"C:\\Users\\Public\\ProgFiles\\tmpFiles\"\nsystem_information = \"system.txt\"\n\n\nwith open(file_path + extend + system_information, \"w\") as f:\n hostname = socket.gethostname()\n IPAddr = socket.gethostbyname(hostname)\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n\n f.write(\"Processor: \" + (platform.processor() + \"\\n\"))\n f.write(\"System: \" + platform.system() + \" \" + platform.version() + \"\\n\")\n f.write(\"Machine: \" + platform.machine() + \"\\n\")\n f.write(\"Hostname: \" + hostname + \"\\n\")\n f.write(\"IP Address: \" + IPAddr + \"\\n\")\n f.write(\"Current Time: \" + current_time + \"\\n\")\n","sub_path":"PCInfo.py","file_name":"PCInfo.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"651348351","text":"import pandas as pd\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nmuscles = ['bwm_anterior', 'bwm_far_posterior', 'bwm_head_row_1', 'bwm_head_row_2', 'bwm_posterior']\n\ntimes = [300, 360, 420, 480, 545, 615, 715]\n\nresult = pd.DataFrame(index = muscles, columns = times)\n\ncolors = ['red', 'blue', 'green', 'pink', 'orange']\ncolor_dict = {}\n\nfor i, muscle in enumerate(muscles):\n df = pd.read_csv('js_distance/across_time/' + muscle + '_distance.csv', index_col=0)\n s = df.iloc[0]\n s.drop(s.index.tolist()[0], inplace=True)\n s.index = times[len(s.index)*-1:]\n\n color_dict[muscle] = colors[i]\n result.loc[muscle] = s\n\nprint(result)\n\nresult = result.T\nlines = result.plot.line(color=color_dict)\nplt.title('JSD of cell compared to first time point')\nplt.rcParams.update({'font.size': 52})\nfig = matplotlib.pyplot.gcf()\n# fig.set_size_inches(15, 10)\n# plt.show()\nplt.savefig('asdf.png', bbox_inches='tight', pad_inches=.25)\n","sub_path":"js_distance/plot_js_distance_by_cell.py","file_name":"plot_js_distance_by_cell.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"286908556","text":"from Jumpscale import j\nfrom collections import namedtuple\nfrom .asimap.server import Server\nimport os\n\nTESTTOOLS = j.baseclasses.testtools\n\n\nclass ImapServer(j.baseclasses.factory, TESTTOOLS):\n __jslocation__ = \"j.servers.imap\"\n\n def start(self, address=\"0.0.0.0\", port=7143):\n self.get_instance(address, port).serve_forever()\n\n def get_instance(self, address, port):\n models = self.get_models()\n return Server(address, port, models).server\n\n def get_models(self):\n try:\n bcdb = j.data.bcdb.get(name=\"mails\")\n except j.exceptions.Input:\n bcdb = j.data.bcdb.new(name=\"mails\")\n\n models = os.path.join(self._dirpath, \"..\", \"models\")\n bcdb.models_add(models)\n folder_model = bcdb.model_get(url=\"jumpscale.email.folder\")\n if not folder_model.find(name=\"inbox\"):\n folder = folder_model.new()\n folder.name = \"inbox\"\n folder.subscribed = True\n folder.save()\n\n message_model = bcdb.model_get(url=\"jumpscale.email.message\")\n Models = namedtuple(\"Models\", \"message folder\")\n models = Models(message_model, folder_model)\n return models\n\n def test(self, name=\"\"):\n \"\"\"\n kosmos 'j.servers.imap.test()'\n\n \"\"\"\n self._tests_run(name=name)\n","sub_path":"JumpscaleLibs/servers/mail/imap/imapfactory.py","file_name":"imapfactory.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"307442827","text":"\"\"\"\nSample Python/Pygame Programs\nSimpson College Computer Science\nhttp://programarcadegames.com/\nhttp://simpson.edu/computer-science/\n\nMain module for platform scroller example.\n\nFrom:\nhttp://programarcadegames.com/python_examples/sprite_sheets/\n\nExplanation video: http://youtu.be/czBDKWJqOao\n\nPart of a series:\nhttp://programarcadegames.com/python_examples/f.php?file=move_with_walls_example.py\nhttp://programarcadegames.com/python_examples/f.php?file=maze_runner.py\nhttp://programarcadegames.com/python_examples/f.php?file=platform_jumper.py\nhttp://programarcadegames.com/python_examples/f.php?file=platform_scroller.py\nhttp://programarcadegames.com/python_examples/f.php?file=platform_moving.py\nhttp://programarcadegames.com/python_examples/sprite_sheets/\n\nGame art from Kenney.nl:\nhttp://opengameart.org/content/platformer-art-deluxe\n\n\"\"\"\n\nimport pathlib\nfrom os import path\nimport math\nimport random\nimport time\n\nimport pygame\nfrom pygame import mixer\n\nimport levels\nfrom buttons import Button, Panel\nfrom constants import *\nfrom enemy import Enemy\nfrom platforms import *\nfrom player import Player\nfrom tilemap import TileMap\nfrom voter_mail import PowerUp\n\npygame.mixer.init()\npygame.init()\n# Set the height and width of the screen\nscreen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\npygame.display.set_caption(\"Team Project\")\n\nimg_dir = path.join(path.dirname(__file__), 'img')\nplayButton = pygame.image.load(path.join(img_dir, \"btn1.png\")).convert()\ngameMenu = pygame.image.load(\"images/presidentrunMenu.png\")\ngameOver = pygame.image.load(\"images/game_over.png\")\nhelpnav1 = pygame.image.load(\"images/help.png\")\nbackButton = pygame.image.load(path.join(img_dir,\"btn2.png\")).convert()\nbackArrow = pygame.image.load(path.join(img_dir, \"arrow.png\")).convert()\nlevel1Icon = pygame.image.load(\"images/screen.png\").convert()\nstoreLink = pygame.image.load(path.join(img_dir, \"storebtnOrange.png\")).convert()\nheartImg = pygame.image.load(path.join(img_dir, \"hearts.png\")).convert()\nstoreImg = pygame.image.load(\"images/store.png\").convert()\nvendetta = pygame.image.load(\"images/vendetta.png\").convert()\nspeedBoost = pygame.image.load(\"images/speedBoost.png\").convert()\nhealth = pygame.image.load(\"images/health.png\").convert()\nplayerImg = pygame.image.load(path.join(img_dir, \"p1_walk02.png\")).convert()\nplayerImg.set_colorkey(BLACK)\nplayerImg = pygame.transform.scale(playerImg, (160,190))\nheartImg.set_colorkey(WHITE)\n#music_file = 'music/senorita2.wav'\n# mixer.music.load(music_file)\n# mixer.music.play(-1)\n# music_file = 'music\\Piano Fantasia Song For Denise.mp3'\n# pygame.mixer.music.load(music_file)\n# pygame.mixer.music.play(-1) \n\nstoreImages = [vendetta, speedBoost, health]\n# enhancer = ImageEnhance.Brightness(newImg)\n# im_output = enhancer.enhance(0.5)\n# im_output.save('images/brighter-img.png')\n\n# We add all the sprites in a group the reason for this is so that we can check for\n# collision between different groups of sprites.\nbtn1 = Button(playButton, 200, 90, 0,13, \"Play\", \"vertical\") #btn1 = Button(playButton, 220, 105, 0,10, \"Play\", \"vertical\")\nbtn5 = Button(playButton, 200, 50, 0,11, \"Next\", \"vertical\")\nbtn4 = Button(playButton, 200, 75, 0,39, \"Help\", \"vertical\") #btn4 = Button(playButton, 220, 105, 0,50, \"Help\", \"vertical\")\nbtn3 = Button(backArrow, 60, 90, 0,10, \"Back/Arrow\")\nbtn7 = Button(backButton, 220, 120, 0, 30, \"Back\")\nbtn6 = Button(playButton, 220, 105, 500,20, \"Save\")\nbtn8 = Button(storeLink, 125, 50,SCREEN_WIDTH/2-57, SCREEN_HEIGHT - 220, \"Store\")\n\nactive_sprite_list = pygame.sprite.Group()\nbuttons = pygame.sprite.Group()\npanels = pygame.sprite.Group()\nwalls = pygame.sprite.Group()\n\n# player = Player()\nlevel_list = []\nlevel_list.append(levels.Level_01())\nlevel_list.append(levels.Level_02())\nlevel_list.append(levels.Level_03())\n# create the game selection\nlevels = []\nfor x in range(3):\n levels.append(Panel(level1Icon, 200, 150, 100 + x * 210, 100, 'Level{}'.format(x + 1)))\n\nshop = []\nfor x in range(3):\n shop.append(Panel(storeImages[x], 100, 100, 360 + x * 105, 110,'Item{}'.format(x + 1)))\n\ndef draw_healthBar(surf, x, y, pct):\n if pct < 0:\n pct = 0\n\n length = 100\n height = 15\n outline = pygame.Rect(x, y, length, height)\n pygame.draw.rect(surf, (0, 255, 0), (x, y, pct, height))\n pygame.draw.rect(surf, (255, 255, 255),outline, 2)\n\ndef draw_lives(surf, x, y, lives, img):\n for i in range(lives):\n img_rect = img.get_rect()\n img_rect.x = x + 40 * i\n img_rect.y = y\n surf.blit(img, img_rect)\n\ndef refresh():\n for actives in active_sprite_list:\n actives.kill()\n for btn in buttons:\n btn.kill()\n\ndef cameraMovement(current_level):\n if current_level.player.rect.x >= 240:\n diff = current_level.player.rect.x - 240\n current_level.player.rect.x = 240\n current_level.shift_worldX(-diff)\n\n if current_level.player.rect.y <= 0:\n diff = current_level.player.rect.y\n current_level.player.rect.y = 0\n current_level.shift_worldY(-diff)\n \n if current_level.player.rect.y > 90:\n current_level.shift_worldX(-current_level.world_shiftX)\n current_level.shift_worldY(-current_level.world_shiftY)\n current_level.player.rect.y = 380\n\n if current_level.player.rect.y >= 50:\n diff = current_level.player.rect.y - 50\n current_level.player.rect.y = 50\n current_level.shift_worldY(-diff)\n\ndef playerMovement(current_level, event):\n current_level.player.go_right()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP: #and current_level.player.touchingGround:\n current_level.player.jump()\n\ndef mainMenu():\n refresh()\n #for oldmembers in active_sprite_list:\n # oldmembers.kill()\n screen.blit(gameMenu, (0,0))\n buttons.add(btn1)\n buttons.add(btn4)\n buttons.add(btn8)\n active_sprite_list.add(btn1)\n active_sprite_list.add(btn4)\n active_sprite_list.add(btn8)\n #music_file = 'music/senorita2.wav'\n # mixer.music.load(music_file)\n # mixer.music.play(-1)\n\ndef draw_text(surf, text, size, x, y):\n font_name = pygame.font.match_font('arial')\n font = pygame.font.Font(font_name, size)\n text_surface = font.render(text, True, WHITE)\n text_rect = text_surface.get_rect()\n text_rect.midtop = (x, y)\n surf.blit(text_surface, text_rect)\n\ndef lavaCollision(current_level):\n lava_hits = pygame.sprite.spritecollide(current_level.player, current_level.lava_platform, False)\n for lava in lava_hits:\n current_level.player.life -= 1\n current_level.resetMov()\n current_level.shift_worldX(-current_level.world_shiftX)\n current_level.shift_worldY(40 -current_level.world_shiftY)\n # if (player.life <= 0 and player.health <= 0):\n # mainMenu()\n # player.life = 3\n # player.health = 100\n\n #this is the hurt sound effect\n pain = mixer.Sound('music/bigOuch.wav')\n pain.play()\n #player.jump()\n\n\ndef mailCollide(current_level):\n hits = pygame.sprite.spritecollide(current_level.player, current_level.vote_list, False)\n for hit in hits:\n # current_level.player.health += .5\n hit.kill()\n\n# When you clicke the play button this should bring you to all the levels\n# inside the game.\ndef levelSelection():\n refresh()\n for lvl in range(3):\n active_sprite_list.add(levels[lvl])\n panels.add(levels[lvl])\n #pygame.draw.rect(screen, (255,0,0), (100 + x * 210, 100, 200, 150))\n # this is the back button. It takes you back\n buttons.add(btn3) \n btn3.rect.x = 315\n btn3.rect.y = 400\n active_sprite_list.add(btn3)\n \n#def gameOver():\n# refresh()\n \n# def gameSettings():\n# refresh()\n \n# def gameHelp():\n# refresh()\n \ndef gameStore():\n refresh()\n for shape_shop in range(3):\n active_sprite_list.add(shop[shape_shop])\n panels.add(shop[shape_shop])\n\n# enhancements = ['jump', 'run', 'life']\n\n#When the game starts the user will be placed 340 pixels away from the left screen.\n# level_list[0].player.rect.x = 140\n# # After the player will then be shifted upwards\n# player.rect.y = SCREEN_HEIGHT - player.rect.height - 400\n\ndef level1():\n refresh()\n\n screen.fill(BLUE)\n level_list[0].player.level = level_list[0]\n level_list[0].draw(screen)\n active_sprite_list.add(level_list[0].player)\n draw_healthBar(screen, 5, 5, level_list[0].player.health)\n draw_lives(screen, SCREEN_WIDTH - 140, 5, level_list[0].player.life, heartImg)\n \ndef level2():\n refresh()\n\n screen.fill(BLUE)\n\n level_list[1].player.level = level_list[1]\n\n level_list[1].draw(screen)\n # # This draws the player health bar.\n active_sprite_list.add(level_list[1].player)\n draw_healthBar(screen, 5, 5, level_list[1].player.health)\n draw_lives(screen, SCREEN_WIDTH - 140, 5, level_list[1].player.life, heartImg)\n \ndef level3():\n refresh()\n screen.fill(BLUE)\n\n level_list[2].player.level = level_list[2]\n\n level_list[2].draw(screen)\n active_sprite_list.add(level_list[2].player)\n draw_healthBar(screen, 5, 5, level_list[2].player.health)\n draw_lives(screen, SCREEN_WIDTH - 140, 5, level_list[2].player.life, heartImg)\n\ndef main():\n \"\"\" Main Program \"\"\"\n global screen\n\n # Create the player\n #player = Player()\n #mob = Enemy()\n\n #Loop until the user clicks the close button.\n done = False\n\n # Used to manage how fast the screen updates\n clock = pygame.time.Clock()\n\n index = 1\n # -------- Main Program Loop -----------\n while not done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n if event.type == pygame.MOUSEMOTION:\n if index == 1 or index == 3 or index == 4 or index == 5 or index == 7: #or index == 8:\n for listener in buttons:\n listener.hover(event)\n \n if event.type == pygame.MOUSEBUTTONDOWN:\n if index <= 10: #or index == 8:\n for listener in buttons:\n value = listener.onClick(event, index)\n index = value\n if (index == 8):\n pygame.image.save(screen, \"images/screen.png\")\n if index == 7:\n for panel in panels:\n value = panel.onClick(event, index)\n index = value\n if index == 6:\n level1()\n playerMovement(level_list[0], event)\n cameraMovement(level_list[0])\n mailCollide(level_list[0])\n lavaCollision(level_list[0])\n level_list[0].level_change = 0\n for mov in level_list[0].enemy_mov:\n # pygame.time.wait(1)\n # write the code in here that moves the enemy\n pass\n\n # find the distance between the player and the enemy\n for pos in level_list[0].enemy_sprite:\n # distance = math.sqrt((player.rect.x - pos.rect.x) ** 2 + (player.rect.y - pos.rect.y) ** 2)\n pygame.draw.circle(screen, RED, (pos.rect.centerx, pos.rect.centery), 40, 1)\n \n movhit = pygame.sprite.spritecollide(level_list[0].player, level_list[0].enemy_mov, False)\n for hits in movhit:\n if (level_list[0].player.touchingGround == False):\n level_list[0].player.bounce(22) \n else:\n level_list[0].player.health -= 1\n if (level_list[0].player.health <= 0): \n # player.rect.x = 340\n # # After the player will then be shifted upwards\n # player.rect.y = 200\n level_list[0].shift_worldX(-level_list[0].world_shiftX)\n level_list[0].shift_worldY(40 -level_list[0].world_shiftY)\n level_list[0].resetMov()\n # current_level.shift_worldY(0)\n level_list[0].player.health = 100\n level_list[0].player.life -= 1\n if (level_list[0].player.life <= 0):\n level_list[0].player.rect.x = 140\n # After the player will then be shifted upwards\n level_list[0].player.rect.y = SCREEN_HEIGHT - level_list[0].player.rect.height - 400\n level_list[0].restart()\n level_list[0].player.life = 3\n level_list[0].player.health = 100\n index = 3\n else:\n level1()\n\n hit = pygame.sprite.spritecollide(level_list[0].player, level_list[0].enemy_sprite, False)\n for hits in hit:\n if (level_list[0].player.touchingGround == False):\n level_list[0].player.bounce(22) \n else:\n level_list[0].player.health -= 1\n if (level_list[0].player.health <= 0): \n # player.rect.x = 340\n # # After the player will then be shifted upwards\n # player.rect.y = 200\n level_list[0].shift_worldX(-level_list[0].world_shiftX)\n level_list[0].shift_worldY(40 -level_list[0].world_shiftY)\n level_list[0].resetMov()\n # current_level.shift_worldY(0)\n level_list[0].player.health = 100\n level_list[0].player.life -= 1\n if (level_list[0].player.life <= 0):\n level_list[0].restart()\n level_list[0].player.life = 3\n level_list[0].player.health = 100\n index = 3\n else:\n level1()\n\n hit = pygame.sprite.spritecollide(level_list[0].player, level_list[0].new_level, False)\n for door in hit:\n time.sleep(1)\n level_list[0].shift_worldX(-level_list[0].world_shiftX)\n level_list[0].shift_worldY(40 -level_list[0].world_shiftY)\n index = 8\n level_list[0].player.health = 100\n level_list[0].player.life = 3\n level_list[0].restart()\n\n if index == 8:\n level2()\n playerMovement(level_list[1], event)\n cameraMovement(level_list[1])\n mailCollide(level_list[1])\n lavaCollision(level_list[1])\n level_list[1].level_change = 1\n hit = pygame.sprite.spritecollide(level_list[1].player, level_list[1].enemy_sprite, False)\n\n for hits in hit:\n if (level_list[1].player.touchingGround == False):\n level_list[1].player.bounce(22) \n else:\n level_list[1].player.health -= 1\n # if (player.touchingGround == False and player.rect.y > hits.rect.y):\n # player.bounce(22)\n # if (player.touchingGround == False):\n # player.jump()\n # else:\n #this is the hurt sound effect\n #if pygame.mixer.get_busy() == False:\n pain = mixer.Sound('music/bigOuch.wav')\n pain.play()\n # if player.change_y < 0 and player.y > level_list[2].enemy_sprite.y:\n # player.jump()\n #else:\n #player.health -= 1\n\n level_list[1].player.health -= 1\n #player.jump()\n if (level_list[1].player.health <= 0): \n # player.rect.x = 340\n # # After the player will then be shifted upwards\n # player.rect.y = 200\n level_list[1].shift_worldX(-level_list[1].world_shiftX)\n level_list[1].shift_worldY(40 -level_list[1].world_shiftY)\n level_list[0].resetMov()\n # current_level.shift_worldY(0)\n level_list[1].player.health = 100\n level_list[1].player.life -= 1\n if (level_list[1].player.life <= 0):\n level_list[1].restart()\n level_list[1].player.life = 3\n level_list[1].player.health = 10\n index = 3\n else:\n level2()\n hit = pygame.sprite.spritecollide(level_list[1].player, level_list[1].new_level, False)\n for door in hit:\n level_list[1].shift_worldX(-level_list[1].world_shiftX)\n level_list[1].shift_worldY(40 -level_list[1].world_shiftY)\n index = 9\n level_list[1].player.health = 100\n level_list[1].player.life = 3\n level_list[1].restart()\n\n if index == 9:\n level3()\n playerMovement(level_list[2],event)\n cameraMovement(level_list[2])\n mailCollide(level_list[2])\n lavaCollision(level_list[2])\n level_list[2].level_change = 2\n hit = pygame.sprite.spritecollide(level_list[2].player, level_list[2].enemy_sprite, False)\n for hits in hit:\n if (level_list[2].player.touchingGround == False):\n level_list[2].player.bounce(22) \n else:\n level_list[2].player.health -= 1\n if (level_list[2].player.health <= 0): \n # player.rect.x = 340\n # # After the player will then be shifted upwards\n # player.rect.y = 200\n level_list[2].shift_worldX(-level_list[2].world_shiftX)\n level_list[2].shift_worldY(40 -level_list[2].world_shiftY)\n level_list[0].resetMov()\n # current_level.shift_worldY(0)\n level_list[2].player.health = 100\n level_list[2].player.life -= 1\n if (level_list[2].player.life <= 0):\n level_list[2].restart()\n level_list[2].player.life = 3\n level_list[2].player.health = 100\n index = 3\n\n else:\n level3()\n hit = pygame.sprite.spritecollide(level_list[2].player, level_list[2].new_level, False)\n for door in hit:\n level_list[2].shift_worldX(-level_list[2].world_shiftX)\n level_list[2].shift_worldY(40 -level_list[2].world_shiftY)\n mainMenu()\n index = 1\n level_list[2].player.health = 100\n level_list[2].player.life = 3\n level_list[2].restart()\n level_list[2].resetMov()\n\n if index == 7:\n refresh()\n screen.fill((0,0,0))\n levelSelection()\n if index == 3:\n # game over screen\n for oldmembers in active_sprite_list:\n oldmembers.kill()\n screen.blit(gameOver, (0,0))\n buttons.add(btn3)\n active_sprite_list.add(btn3)\n btn3.rect.x = 725\n btn5.rect.x = 570\n btn3.rect.y = 430\n btn5.rect.y = 20\n #game help\n if index == 4:\n # fun features within the game\n for oldmembers in active_sprite_list:\n oldmembers.kill()\n screen.blit(helpnav1, (0,0))\n buttons.add(btn3)\n active_sprite_list.add(btn3)\n\n btn3.rect.x = 725\n btn5.rect.x = 570\n btn3.rect.y = 430\n btn5.rect.y = 20\n if index == 10:\n gameStore()\n screen.blit(storeImg, (0,0))\n screen.blit(playerImg, (75, 175))\n\n btn3.rect.x = 670\n buttons.add(btn3)\n active_sprite_list.add(btn3)\n if index == 5:\n # add the next button in here.\n for oldmembers in active_sprite_list:\n oldmembers.kill()\n screen.fill((0,0,0))\n # this is the back button. It takes you back\n buttons.add(btn7)\n active_sprite_list.add(btn7)\n elif index == 1:\n mainMenu()\n \n\n active_sprite_list.update()\n active_sprite_list.draw(screen)\n\n # Limit to 60 frames per second\n clock.tick(60)\n \n \n pygame.display.flip()\n\n # Be IDLE friendly. If you forget this line, the program will 'hang'\n # on exit.\n pygame.quit()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"131464668","text":"# python3\n\"\"\"Transformer model for morphological inflection.\"\"\"\n\n\nimport enum\nimport sys\nimport numpy as np\nimport tensorflow as tf\nfrom . import evaluation as eval_lib\n\nTRAIN_STEP_SIGNATURE = [\n tf.TensorSpec(shape=(None, None), dtype=tf.int64),\n tf.TensorSpec(shape=(None, None), dtype=tf.int64),\n]\n\n\n################################################################################\n### Classes\n################################################################################\n\n\nclass ModelFormat(enum.Enum):\n \"\"\"Enum object describing data format and number of sources.\"\"\"\n\n TRANSFORMER = 'TRANSFORMER'\n\n def uses_kann_style_features(self):\n return self in (ModelFormat.TRANSFORMER,)\n\n def is_transformer(self):\n return self in (ModelFormat.TRANSFORMER,)\n\n\nclass Model(object):\n \"\"\"Interface for using transformer model for inflection.\"\"\"\n\n def __init__(self, hparams, all_data, flags):\n \"\"\"Initialize a model based on the transformer architecture.\n\n Args:\n hparams: TensorFlow hyperparameters.\n all_data: holds any relevant datasets and related objects\n needed to construct and use the model. For the transformer based model, we\n only use the following components of all_data..\n src_language_index) TensorFlow DataSet object containing an index for\n mapping between string and integerized representations.\n trg_language_index) Same as above but for the target language.\n dataset_train) TensorFlow dataset containing the batched training data.\n dataset_dev) Same as above but for dev.\n dataset_test) Same as above but for test.\n src_max_len_seq) Length of the longest source sequence in the train set\n or in the train set used to build the model we are restoring.\n flags: Command line arguments.\n \"\"\"\n\n self.flags = flags\n self.hparams = hparams\n self.all_data = all_data\n self.src_language_index = all_data.src_language_index\n self.trg_language_index = all_data.trg_language_index\n self.dataset_train = self.all_data.dataset_train\n self.dev_srcs, self.dev_trgs = self.all_data.dataset_dev\n self.test_srcs, self.test_trgs = self.all_data.dataset_test\n self.max_len = self.all_data.src_max_len_seq\n self.best_checkpoint_path = None\n self.loss_object = None\n self.input_vocab_size = self.src_language_index.vocab_size + 2\n self.target_vocab_size = self.trg_language_index.vocab_size + 2\n self.dev_acc = 0\n self.test_acc = 0\n self.base_wf_tags_2_loss = {}\n\n # Set Optimizer and loss objects.\n self.set_optimizer()\n self.set_loss_objects()\n\n # Build Transformer model and checkpoint manager.\n self.transformer = Transformer(\n self.hparams.num_layers, self.hparams.d_model, self.hparams.num_heads,\n self.hparams.dff, self.input_vocab_size, self.target_vocab_size,\n self.hparams.dropout_rate)\n self.set_or_restore_checkpoint()\n\n def set_optimizer(self):\n learning_rate = CustomSchedule(self.hparams.d_model,\n warmup_steps=self.hparams.warmup_steps)\n self.optimizer = tf.keras.optimizers.Adam(\n learning_rate, beta_1=self.hparams.beta_1, beta_2=self.hparams.beta_2,\n epsilon=self.hparams.epsilon)\n\n def set_loss_objects(self):\n self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=True, reduction='none')\n self.train_loss = tf.keras.metrics.Mean(name='train_loss')\n self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(\n name='train_accuracy')\n\n def set_or_restore_checkpoint(self):\n self.checkpoint = tf.train.Checkpoint(transformer=self.transformer,\n optimizer=self.optimizer)\n self.ckpt_manager = tf.train.CheckpointManager(\n self.checkpoint, self.hparams.checkpoint_dir, max_to_keep=1)\n # If checkpoint was provided at command line, restore it.\n if self.hparams.checkpoint_to_restore:\n sys.stderr.write(\n '\\t\\t\\tRestoring model from checkpoint:\\n\\t\\t\\t{}\\n'.format(\n self.hparams.checkpoint_to_restore))\n sys.stderr.flush()\n self.checkpoint.restore(self.hparams.checkpoint_to_restore)\n\n def loss_function(self, real, pred):\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss_ = self.loss_object(real, pred)\n\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n\n return tf.reduce_mean(loss_)\n\n # Compiles train_step into a TF graph for faster execution.\n @tf.function(input_signature=TRAIN_STEP_SIGNATURE)\n def train_step(self, inp, trg):\n \"\"\"Runs one batch of training as a graph-executable function.\"\"\"\n\n trg_input = trg[:, :-1]\n trg_real = trg[:, 1:]\n\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp,\n trg_input)\n with tf.GradientTape() as tape:\n predictions, _ = self.transformer(inp, trg_input, True, enc_padding_mask,\n combined_mask, dec_padding_mask)\n loss = self.loss_function(trg_real, predictions)\n\n gradients = tape.gradient(loss, self.transformer.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients,\n self.transformer.trainable_variables))\n\n self.train_loss(loss)\n self.train_accuracy(trg_real, predictions)\n\n def train(self):\n \"\"\"Trains a transformer model.\"\"\"\n\n sys.stderr.write('\\t\\t\\tMax epochs: {}'.format(self.hparams.max_num_epochs))\n sys.stderr.write(', Max patience: {}'.format(self.hparams.patience))\n sys.stderr.write(', Warm up steps: {}\\n'.format(self.hparams.warmup_steps))\n\n self.dev_acc = -1.0\n num_epochs_without_improvement = 0\n for epoch in range(self.hparams.max_num_epochs):\n\n sys.stderr.flush()\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n\n for (_, (inp, trg)) in enumerate(self.dataset_train):\n\n self.train_step(inp, trg)\n\n dev_acc = self.validate()\n sys.stderr.write(\n '\\t\\t\\tEpoch {} Loss {:.4f} Dev Acc {:.4f}\\n'.format(\n epoch + 1, self.train_loss.result(), dev_acc))\n\n if dev_acc > self.dev_acc:\n self.best_checkpoint_path = self.ckpt_manager.save()\n sys.stderr.write(\n '\\t\\t\\t\\tSaving checkpoint for epoch {} at \\n\\t\\t\\t\\t{}\\n'.format(\n epoch + 1, self.best_checkpoint_path))\n self.dev_acc = dev_acc\n num_epochs_without_improvement = 0\n else:\n num_epochs_without_improvement += 1\n if self.dev_acc > 0.0 and num_epochs_without_improvement >= self.hparams.patience:\n sys.stderr.write(\n '\\t\\t\\tStopping early at epoch: {}\\n'.format(epoch + 1))\n break\n\n # Restore the best performing model.\n sys.stderr.write(\n '\\t\\t\\t\\tRestoring best checkpoint: \\n\\t\\t\\t\\t{}\\n'.format(self.best_checkpoint_path))\n self.checkpoint.restore(self.best_checkpoint_path)\n\n return self.best_checkpoint_path\n\n def validate(self, dev=True, best_checkpoint_path=None, predictions_file=None):\n \"\"\"Validates a trained model on unseen sequences.\"\"\"\n\n if dev:\n val_srcs, val_trgs = self.dev_srcs, self.dev_trgs\n else:\n val_srcs, val_trgs = self.test_srcs, self.test_trgs\n pred_seqs, src_seqs, trg_seqs = {}, {}, {}\n src_feat_bundles, trg_feat_bundles = {}, {}\n\n preds = self.translate(val_srcs)\n\n for val_id in range(len(val_srcs)):\n src = val_srcs[val_id]\n trg = val_trgs[val_id]\n pred = preds[val_id]\n pred_seqs[val_id], src_seqs[val_id], trg_seqs[val_id] = pred, src, trg\n # Transformer assumes all features are passed as part of the sequence.\n src_feat_bundles[val_id], trg_feat_bundles[val_id] = '', ''\n\n exact_match_accuracy = eval_lib.evaluate(pred_seqs, src_seqs, trg_seqs, src_feat_bundles,trg_feat_bundles, predictions_file=predictions_file)\n\n return exact_match_accuracy\n\n def translate(self, val_srcs):\n \"\"\"Gets predictions and converts them from integerized form to strings.\"\"\"\n\n predicted_sequences = []\n\n while val_srcs:\n val_srcs_batch = val_srcs[:self.hparams.val_batch_size]\n val_srcs = val_srcs[self.hparams.val_batch_size:]\n results, _ = self.evaluate(val_srcs_batch)\n\n # Convert batch results from integer to string space.\n for result in results:\n prediction = []\n result = result.numpy()[1:]\n for pred_idx in result:\n if pred_idx <= len(self.trg_language_index.tokens):\n prediction.append(self.trg_language_index.tokens[pred_idx - 1])\n else:\n break\n predicted_sequences.append(' '.join(prediction))\n\n return predicted_sequences\n\n def evaluate(self, inp_sequences):\n \"\"\"Gets predictions and attention weights from a set of input sequences.\n\n Args:\n inp_sequences: List of sequences in character space. These sequences will\n contain SRC_* and TRG_* features as the transformer model, as implemented,\n requires these to represented as part of the input. The distribution of\n these features w.r.t. the normal sequence elements, i.e., characters, is\n determined by command line arguments.\n Returns:\n output: Matrix of predictions for each input sequence in integer space\n to be parsed by the translate function.\n attention_weights: Attention weight matrices for every input sequence.\n \"\"\"\n\n batch_len = len(inp_sequences)\n\n # Integerize input sentences.\n encoded_inp_sequences = []\n for inp_sequence in inp_sequences:\n middle = []\n for ch in inp_sequence.split():\n try:\n middle.append(self.src_language_index.tokens.index(ch) + 1)\n except ValueError: # Handle OOV characters\n # OOVs should be extremely rare because characters are closed class.\n middle.append(0)\n encoded_inp_sequence = [self.src_language_index.vocab_size] + middle + [\n self.src_language_index.vocab_size+1]\n encoded_inp_sequences.append(encoded_inp_sequence)\n # Pad encoder input.\n encoder_input = tf.keras.preprocessing.sequence.pad_sequences(\n encoded_inp_sequences, maxlen=self.max_len, padding='post')\n # Convert encoder input to TF tensor.\n encoder_input = tf.convert_to_tensor(encoder_input)\n\n # Initialize decoder input.\n decoder_input = [[self.trg_language_index.vocab_size]]*batch_len\n output = tf.convert_to_tensor(decoder_input)\n\n # Start incrementally decoding.\n for _ in range(self.max_len):\n\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(\n encoder_input, output)\n\n # predictions.shape == (batch_size, seq_len, vocab_size)\n predictions, attention_weights = self.transformer(\n encoder_input, output, False, enc_padding_mask, combined_mask,\n dec_padding_mask)\n\n # Select the last integerized sequence element from the seq_len dimension.\n predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size)\n\n predicted_ids = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)\n\n # Concatentate predicted_id to output to get step i+1 decoder input.\n output = tf.concat([output, predicted_ids], axis=-1)\n\n return output, attention_weights\n\n def validate_forced(self, dev=False, predictions_file=None):\n \"\"\"Gets losses by forcing each trg so we can determine the best trg for each src.\"\"\"\n\n if dev:\n srcs = self.dev_srcs\n trgs = self.dev_trgs\n else:\n srcs = self.test_srcs\n trgs = self.test_trgs\n\n while srcs:\n srcs_batch_raw = srcs[:self.hparams.val_batch_size]\n srcs = srcs[self.hparams.val_batch_size:]\n trgs_batch_raw = trgs[:self.hparams.val_batch_size]\n trgs = trgs[self.hparams.val_batch_size:]\n\n # Convert srcs and trgs to integerized tensors.\n srcs_batch = self.prepare_for_forced_validation(srcs_batch_raw, self.src_language_index)\n trgs_batch = self.prepare_for_forced_validation(trgs_batch_raw, self.trg_language_index)\n assert len(srcs_batch) == len(trgs_batch)\n\n # Get losses.\n losses = self.forced_val_step(srcs_batch, trgs_batch)\n assert len(losses) == len(srcs_batch)\n\n for idx in range(len(srcs_batch)):\n\n s = srcs_batch_raw[idx].split()\n base = ''\n tag_tup = [None, None, None]\n for x in s:\n if len(x) == 1:\n base += x\n if x.startswith('TRG_'):\n tag_tup[0] = x\n elif x.startswith('IC_'):\n tag_tup[1] = x\n elif x.startswith('Co_'):\n tag_tup[2] = x\n elif '_BASE' in x:\n base += x.replace('_BASE', '_')\n tag_tup = tuple(tag_tup)\n wf = ''.join(trgs_batch_raw[idx].split())\n self.base_wf_tags_2_loss[(base, wf, tag_tup)] = losses[idx].numpy()\n if predictions_file:\n predictions_file.write('{} \\t{} \\t-> \\t{} \\t with loss: {}\\n'.format(base, str(tag_tup), wf, str(round(self.base_wf_tags_2_loss[(base, wf, tag_tup)], 5))))\n\n return self.base_wf_tags_2_loss\n\n def prepare_for_forced_validation(self, src_or_trg, integerizer):\n \"\"\"Integerizes a batch of raw source or target representations.\"\"\"\n\n encoded_sequences = []\n for sequence in src_or_trg:\n middle = []\n for ch in sequence.split():\n try:\n middle.append(integerizer.tokens.index(ch) + 1)\n except ValueError: # Handle OOV characters\n # OOVs should be extremely rare because characters are closed class.\n middle.append(0)\n encoded_sequence = [integerizer.vocab_size] + middle + [\n integerizer.vocab_size+1]\n encoded_sequences.append(encoded_sequence)\n # Pad encoder input.\n src_or_trg_tensor = tf.keras.preprocessing.sequence.pad_sequences(\n encoded_sequences, maxlen=self.max_len, padding='post')\n # Convert encoder input to TF tensor.\n src_or_trg_tensor = tf.convert_to_tensor(src_or_trg_tensor)\n\n return src_or_trg_tensor\n\n def forced_val_step(self, inp, trg):\n \"\"\"Gets losses for each input-target pair in the batch.\"\"\"\n\n trg_inp = trg[:, :-1]\n trg_real = trg[:, 1:]\n\n (enc_padding_mask, combined_mask,\n dec_padding_mask) = create_masks(inp, trg_inp)\n\n predictions, _ = self.transformer(\n inp, trg_inp, False, enc_padding_mask, combined_mask, dec_padding_mask)\n losses = self.forced_val_loss_function(trg_real, predictions)\n\n return losses\n\n def forced_val_loss_function(self, real, pred):\n\n mask = tf.math.logical_not(tf.math.equal(real, 0))\n loss_ = self.loss_object(real, pred)\n\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n\n return tf.reduce_mean(loss_, 1)\n\nclass Transformer(tf.keras.Model):\n \"\"\"Transformer Architecture.\"\"\"\n\n def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size, target_vocab_size, rate=0.1):\n\n super(Transformer, self).__init__()\n\n self.encoder = Encoder(num_layers, d_model, num_heads, dff,\n input_vocab_size, rate)\n\n self.decoder = Decoder(num_layers, d_model, num_heads, dff,\n target_vocab_size, rate)\n\n self.final_layer = tf.keras.layers.Dense(target_vocab_size)\n\n def call(self, inp, trg, training, enc_padding_mask, look_ahead_mask, dec_padding_mask):\n \"\"\"Calls the transformer.\"\"\"\n\n enc_output = self.encoder(inp, training, enc_padding_mask)\n # (batch_size, inp_seq_len, d_model)\n\n dec_output, attention_weights = self.decoder(\n trg, enc_output, training, look_ahead_mask, dec_padding_mask)\n # dec_output.shape == (batch_size, trg_seq_len, d_model)\n\n final_output = self.final_layer(dec_output)\n # (batch_size, trg_seq_len, target_vocab_size)\n\n return final_output, attention_weights\n\n\nclass EncoderLayer(tf.keras.layers.Layer):\n \"\"\"Architecture of a single layer within the encoder.\"\"\"\n\n def __init__(self, d_model, num_heads, dff, rate=0.1):\n super(EncoderLayer, self).__init__()\n\n self.mha = MultiHeadAttention(d_model, num_heads)\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n\n def call(self, x, training, mask):\n \"\"\"Calls the encoder layer.\"\"\"\n\n attn_output, _ = self.mha(x, x, x, mask)\n # (batch_size, input_seq_len, d_model)\n attn_output = self.dropout1(attn_output, training=training)\n out1 = self.layernorm1(x + attn_output)\n # (batch_size, input_seq_len, d_model)\n\n ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)\n ffn_output = self.dropout2(ffn_output, training=training)\n out2 = self.layernorm2(out1 + ffn_output)\n # (batch_size, input_seq_len, d_model)\n\n return out2\n\n\nclass DecoderLayer(tf.keras.layers.Layer):\n \"\"\"Architecture of a single layer within the decoder.\"\"\"\n\n def __init__(self, d_model, num_heads, dff, rate=0.1):\n\n super(DecoderLayer, self).__init__()\n\n self.mha1 = MultiHeadAttention(d_model, num_heads)\n self.mha2 = MultiHeadAttention(d_model, num_heads)\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n\n def call(self, x, enc_output, training, look_ahead_mask, padding_mask):\n # enc_output == (batch_size, input_seq_len, d_model)\n \"\"\"Calls the decoder layer.\"\"\"\n\n attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask)\n # (batch_size, target_seq_len, d_model)\n attn1 = self.dropout1(attn1, training=training)\n out1 = self.layernorm1(attn1 + x)\n\n attn2, attn_weights_block2 = self.mha2(\n enc_output, enc_output, out1, padding_mask)\n # (batch_size, target_seq_len, d_model)\n attn2 = self.dropout2(attn2, training=training)\n out2 = self.layernorm2(attn2 + out1)\n # (batch_size, target_seq_len, d_model)\n\n ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)\n ffn_output = self.dropout3(ffn_output, training=training)\n out3 = self.layernorm3(ffn_output + out2)\n # (batch_size, target_seq_len, d_model)\n\n return out3, attn_weights_block1, attn_weights_block2\n\n\nclass Encoder(tf.keras.layers.Layer):\n \"\"\"Encoder architecture.\"\"\"\n\n def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size, rate=0.1):\n\n super(Encoder, self).__init__()\n\n self.d_model = d_model\n self.num_layers = num_layers\n\n self.embedding = tf.keras.layers.Embedding(input_vocab_size, self.d_model)\n self.pos_encoding = positional_encoding(input_vocab_size, self.d_model)\n\n self.enc_layers = [EncoderLayer(self.d_model, num_heads, dff, rate)\n for _ in range(self.num_layers)]\n\n self.dropout = tf.keras.layers.Dropout(rate)\n\n def call(self, x, training, mask):\n \"\"\"Calls the Encoder.\"\"\"\n\n seq_len = tf.shape(x)[1]\n\n # Adding embedding and position encoding.\n x = self.embedding(x) # (batch_size, input_seq_len, d_model)\n x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n x += self.pos_encoding[:, :seq_len, :]\n\n x = self.dropout(x, training=training)\n\n for i in range(self.num_layers):\n x = self.enc_layers[i](x, training, mask)\n\n return x # (batch_size, input_seq_len, d_model)\n\n\nclass Decoder(tf.keras.layers.Layer):\n \"\"\"Decoder architecture.\"\"\"\n\n def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size, rate=0.1):\n super(Decoder, self).__init__()\n\n self.d_model = d_model\n self.num_layers = num_layers\n\n self.embedding = tf.keras.layers.Embedding(target_vocab_size, self.d_model)\n self.pos_encoding = positional_encoding(target_vocab_size, self.d_model)\n\n self.dec_layers = [DecoderLayer(self.d_model, num_heads, dff, rate)\n for _ in range(self.num_layers)]\n self.dropout = tf.keras.layers.Dropout(rate)\n\n def call(self, x, enc_output, training, look_ahead_mask, padding_mask):\n \"\"\"Calls the decoder.\"\"\"\n\n seq_len = tf.shape(x)[1]\n attention_weights = {}\n\n x = self.embedding(x) # (batch_size, target_seq_len, d_model)\n x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))\n x += self.pos_encoding[:, :seq_len, :]\n\n x = self.dropout(x, training=training)\n\n for i in range(self.num_layers):\n x, block1, block2 = self.dec_layers[i](x, enc_output, training, look_ahead_mask, padding_mask)\n\n attention_weights['decoder_layer{}_block1'.format(i+1)] = block1\n attention_weights['decoder_layer{}_block2'.format(i+1)] = block2\n\n return x, attention_weights\n # x.shape == (batch_size, target_seq_len, d_model)\n\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n \"\"\"Multi-headed attention architecture.\"\"\"\n\n def __init__(self, d_model, num_heads):\n super(MultiHeadAttention, self).__init__()\n self.num_heads = num_heads\n self.d_model = d_model\n\n assert self.d_model % self.num_heads == 0\n\n self.depth = self.d_model // self.num_heads\n\n self.wq = tf.keras.layers.Dense(self.d_model)\n self.wk = tf.keras.layers.Dense(self.d_model)\n self.wv = tf.keras.layers.Dense(self.d_model)\n\n self.dense = tf.keras.layers.Dense(self.d_model)\n\n def split_heads(self, x, batch_size):\n \"\"\"Splits x's last dimension into (num_heads, depth) and transpose.\"\"\"\n\n x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))\n return tf.transpose(x, perm=[0, 2, 1, 3])\n # (batch_size, num_heads, seq_len, depth)\n\n def call(self, v, k, q, mask):\n\n batch_size = tf.shape(q)[0]\n\n q = self.wq(q) # (batch_size, seq_len_q, d_model)\n k = self.wk(k) # (batch_size, seq_len_k, d_model)\n v = self.wv(v) # (batch_size, seq_len_v, d_model)\n\n q = self.split_heads(q, batch_size)\n # (batch_size, num_heads, seq_len_q, depth)\n k = self.split_heads(k, batch_size)\n # (batch_size, num_heads, seq_len_k, depth)\n v = self.split_heads(v, batch_size)\n # (batch_size, num_heads, seq_len_v, depth)\n\n scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v, mask)\n # scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)\n # attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)\n\n scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])\n # (batch_size, seq_len_q, num_heads, depth)\n\n concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)\n\n output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)\n\n return output, attention_weights\n\n\nclass CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n \"\"\"A custom schedule for learning rate annealing.\"\"\"\n\n def __init__(self, d_model, warmup_steps=4000):\n super(CustomSchedule, self).__init__()\n\n self._d_model = d_model\n self._d_model = tf.cast(self._d_model, tf.float32)\n\n self.warmup_steps = warmup_steps\n\n def __call__(self, step):\n arg1 = tf.math.rsqrt(step)\n arg2 = step * (self.warmup_steps ** -1.5)\n\n return tf.math.rsqrt(self._d_model) * tf.math.minimum(arg1, arg2)\n\n\n################################################################################\n### Functions\n################################################################################\n\n\ndef create_masks(src, trg):\n \"\"\"Creates all the masks used by the transformer.\n\n Args:\n src: Batched src.\n trg: Batched trg.\n Returns:\n enc_padding_mask: Negates padded cells in src.\n combined_mask: Negates cells that were padded in src or trg.\n dec_padding_mask: Negates padded cells in trg.\n \"\"\"\n\n # Encoder padding mask.\n enc_padding_mask = create_padding_mask(src)\n\n # Used in the 2nd attention block in the decoder.\n # This padding mask is used to mask the encoder outputs.\n dec_padding_mask = create_padding_mask(src)\n\n # Used in the 1st attention block in the decoder.\n # Used to pad and mask future tokens in input received by decoder.\n look_ahead_mask = create_look_ahead_mask(tf.shape(trg)[1])\n dec_target_padding_mask = create_padding_mask(trg)\n combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)\n\n return enc_padding_mask, combined_mask, dec_padding_mask\n\n\ndef create_padding_mask(seq):\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\n\n # Add extra dimensions to pad the attention logits.\n return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)\n\n\ndef create_look_ahead_mask(size):\n return 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)\n # (seq_len, seq_len)\n\n\ndef scaled_dot_product_attention(q, k, v, mask):\n \"\"\"Calculate the attention weights.\n\n q, k, v must have matching leading dimensions.\n k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.\n The mask has different shapes depending on its type(padding or look ahead)\n but it must be broadcastable for addition.\n\n Args:\n q: query shape == (..., seq_len_q, depth)\n k: key shape == (..., seq_len_k, depth)\n v: value shape == (..., seq_len_v, depth_v)\n mask: Float tensor with shape broadcastable\n to (..., seq_len_q, seq_len_k). Defaults to None.\n\n Returns:\n output, attention_weights\n \"\"\"\n\n scaled_attention_logits = tf.matmul(q, k, transpose_b=True)\n # (..., seq_len_q, seq_len_k)\n dk = tf.cast(tf.shape(k)[-1], tf.float32)\n denom = tf.math.sqrt(dk)\n scaled_attention_logits = scaled_attention_logits / denom\n\n # Add the mask to the scaled tensor.\n if mask is not None:\n scaled_attention_logits += (mask * -1e9)\n\n # Softmax is normalized on last axis (seq_len_k) so scores will add to 1.\n attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)\n # (..., seq_len_q, seq_len_k)\n\n output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)\n\n return output, attention_weights\n\n\ndef get_angles(pos, i, d_model):\n angle_rates = 1 / np.power(10000, (2 * (i//2)) / np.float32(d_model))\n return pos * angle_rates\n\n\ndef positional_encoding(position, d_model):\n \"\"\"Encodes sequence elements' positions relative to some current position.\"\"\"\n\n angle_rads = get_angles(np.arange(position)[:, np.newaxis],\n np.arange(d_model)[np.newaxis, :],\n d_model)\n\n # Apply sin to even indices in the array; 2i.\n angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])\n\n # Apply cos to odd indices in the array; 2i+1.\n angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])\n\n pos_encoding = angle_rads[np.newaxis, ...]\n\n return tf.cast(pos_encoding, dtype=tf.float32)\n\n\ndef point_wise_feed_forward_network(d_model, dff):\n return tf.keras.Sequential([\n tf.keras.layers.Dense(dff, activation='relu'),\n tf.keras.layers.Dense(d_model)])\n # first dense layer: (batch_size, seq_len, dff)\n # second dense layer: (batch_size, seq_len, d_model)\n","sub_path":"Scripts/Seq2seq/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":29233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"233252849","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 11 20:35:10 2018\n\n@author: michi\n\"\"\"\n\nimport os\nimport unittest\nfrom astrologging.FileLogger import FileLogger\n\n\n# In[]\nclass FileLoggerTest(unittest.TestCase):\n\n def test_WhenFilePathIsNotString_ThenErrorIsRaised(self):\n with self.assertRaises(TypeError):\n FileLogger(42)\n \n\n def test_WhenFileModeIsInvalid_ThenErrorIsRaised(self):\n with self.assertRaises(ValueError):\n FileLogger(\"test_log.txt\", \"r\") \n \n \n def test_WhenOutputDirectoryDoesNotExist_ThenItWillBeCreated(self):\n filePath = \"/tmp/test_WhenOutputDirectoryDoesNotExist_ThenItWillBeCreated/test_log.txt\"\n if os.path.exists(filePath):\n os.remove(filePath)\n if os.path.exists(os.path.dirname(filePath)):\n os.rmdir(os.path.dirname(filePath))\n self.assertFalse(os.path.exists(os.path.dirname(filePath)))\n FileLogger(filePath)\n self.assertTrue(os.path.exists(os.path.dirname(filePath)))\n os.remove(filePath)\n os.rmdir(os.path.dirname(filePath))\n \n \n def test_WhenFilLoggerIsCreated_ThenLogFileIsCreated(self):\n filePath = \"/tmp/test_WhenFilLoggerIsCreated_ThenLogFileIsCreated/test_log.txt\"\n if os.path.exists(filePath):\n os.remove(filePath)\n if os.path.exists(os.path.dirname(filePath)):\n os.rmdir(os.path.dirname(filePath))\n self.assertFalse(os.path.exists(filePath))\n FileLogger(filePath)\n self.assertTrue(os.path.exists(filePath))\n os.remove(filePath)\n \n \n def test_WhenFilLoggerIsCreatedWithAppending_ThenOldTheLogFileIsPreserved(self):\n filePath = \"/tmp/test_WhenFilLoggerIsCreatedWithAppending_ThenOldTheLogFileIsPreserved/test_log.txt\"\n message = \"oldContent\\n\"\n FileLoggerTest._prepareFile(filePath, message)\n FileLogger(filePath, \"a\", True, False)\n fid = open(filePath)\n self.assertEqual(fid.read(), message)\n os.remove(filePath)\n \n \n def test_WhenFilLoggerIsCreatedWithWriting_ThenLogFileIsCleared(self):\n filePath = \"/tmp/test_WhenFilLoggerIsCreatedWithWriting_ThenLogFileIsCleared/test_log.txt\"\n message = \"oldContent\\n\"\n FileLoggerTest._prepareFile(filePath, message)\n FileLogger(filePath, \"w\", True, False)\n fid = open(filePath)\n self.assertEqual(fid.read(), \"\")\n os.remove(filePath)\n \n \n def test_WhenMessageIsLogged_ThenMessageIsWrittenCorrectly(self):\n filePath = \"/tmp/test_WhenMessageIsLogged_ThenMessageIsWrittenCorrectly/test_log.txt\"\n logger = FileLogger(filePath, \"w\")\n logger.log(\"test\")\n fid = open(filePath)\n self.assertTrue(\"] test\" in fid.read())\n \n \n def _prepareFile(filePath, message):\n if not os.path.exists(os.path.dirname(filePath)):\n os.makedirs(os.path.dirname(filePath))\n fid = open(filePath, \"w\")\n fid.write(message)\n fid.close\n \n \n# In[]\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"test/astrologging/FileLoggerTest.py","file_name":"FileLoggerTest.py","file_ext":"py","file_size_in_byte":3088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"160888922","text":"# read data file\nf = open('d16-input.txt', 'r')\ndata = [line.rstrip() for line in f.readlines()]\n# Test data\n# data = [\"class: 1-3 or 5-7\",\"row: 6-11 or 33-44\",\"seat: 13-40 or 45-50\",\"your ticket:\",\"7,1,14\",\"\",\"nearby tickets:\",\"7,3,47\",\"40,4,50\",\"55,2,20\",\"38,6,12\"]\n\n# Split data into areas - fields, my ticket, nearby tickets\nfields, myTicket, nearbyTickets = {}, [], []\n# split fields into dictionary with class type, min and max values\nfor idx, line in enumerate(data):\n if line == \"\": continue\n if line[:11] == \"your ticket\":\n break\n # split the line into text before the colon to form the key, then the value pairs afer the colon seperated by ' or ' to form value pairs\n fields[line.split(\": \")[0]] = (line.split(\": \")[1].split(\" or \")[0], line.split(\": \")[1].split(\" or \")[1])\n #the min of the min value\n min_min = int(fields[line.split(\": \")[0]][0].split(\"-\")[0])\n # the max of the min value\n min_max = int(fields[line.split(\": \")[0]][0].split(\"-\")[1])\n #the min of the max value\n max_min = int(fields[line.split(\": \")[0]][1].split(\"-\")[0])\n #the max of the max value\n max_max = int(fields[line.split(\": \")[0]][1].split(\"-\")[1])\n # redefine the dictionary entry with new min/max values\n fields[line.split(\": \")[0]] = (min_min, min_max, max_min, max_max)\n# split myTicket data into list\nfor line in data[idx+1:]:\n if line == \"\": continue\n if line[:14] == \"nearby tickets\":\n break\n myTicket.append(line)\n# split nearbyTicket data into list\nfor line in data[idx+4:]:\n if line == \"\": continue\n nearbyTickets.append(line)\n# ignore my ticket\n# loop though each field, and compare ticket values to given parameters\n# store sum of invalid parameters\n# can combine all parameters into one set to compare with each line of list\nvalidParams = set() # define empty set\nticketScanningErrorRate = 0\n# add paramteters to set as union of sets of values from fields\nfor keys in fields:\n validParams = validParams.union(set(range(fields[keys][0],fields[keys][1]+1)),set(range(fields[keys][2],fields[keys][3]+1)))\n# loop though nearbyTickets and identify invalid tickets\ninValid = set()\nvalidTickets = set()\nfor ticket in nearbyTickets:\n values = ticket.split(\",\")\n for value in values:\n if not(int(value) in validParams):\n inValid.add(ticket)\n# remove inValid tickets and generaate list of validTickets\nvalidTickets = set(nearbyTickets).difference(inValid)\n\n# got some help here:\n# code below adapted from q-viper\n# a dictionary to hold possible fields for each col\nvalid_tickets = list(validTickets)\npossibleFields = {i: set(fields.keys()) for i in range(len(validTickets))}\nfor ticket in validTickets:\n for i, value in enumerate(ticket.split(\",\")):\n for keys in fields:\n #validParaams is intersection of set on lower-range values and set of upper-range values\n validParams = set(range(fields[keys][0],fields[keys][1]+1)) | set(range(fields[keys][2],fields[keys][3]+1))\n possible = False\n if int(value) in validParams:\n possible = True\n if not possible:\n possibleFields[i].discard(keys)\n# remove repeated fields\n## having problem with this segment of code - returns StopIteration error at line 73\n## unable to trace source - something to do witht he way I've setup the lists???\n## used code from https://github.com/q-viper/Adevent-Of-Code-2020/blob/master/Advent%20of%20code.ipynb\n## and https://github.com/jakobsen/advent-of-code-2020/blob/master/16.py\n## to help check and develop my code - still stuck here...\n## answer calculated as 650080463519\nfor i in sorted(possibleFields, key=lambda k: len(possibleFields[k])):\n thisField = next(iter(possibleFields[i]))\n for j in possibleFields:\n if j != i:\n possibleFields[j].discard(thisField)\n \nmyTicket = [int(x) for x in lines[22].split(\",\")]\nans = 1\nfor i in possibleFields:\n if possibleFields[i].pop().startswith(\"departure\"):\n ans *= myTicket[i]\n\nprint(\"Part 2:\", ans)\n\n","sub_path":"d16p2-solution.py","file_name":"d16p2-solution.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"189882866","text":"import discord\nfrom discord.ext import commands\nimport asyncio\nimport os\nimport traceback\n\nclient = commands.Bot(command_prefix='.')\ntoken = os.environ['DISCORD_BOT_TOKEN']\n\n\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n\n@client.command()\nasync def 募集(ctx, about = \"募集\", cnt = 5, settime = 86400.0):\n cnt, settime = int(cnt), float(settime)\n reaction_member = [\"♦参加者一覧♦\"]\n reaction_emoji = \"✔参加/❌参加取消/✋募集停止\"\n test = discord.Embed(title=f\"現在の {about} 募集状況\",colour=0x1e90ff)\n test.add_field(name=f\"あと{cnt}人 募集中\\n\", value=None, inline=True)\n msg = await ctx.send(embed=test)\n msg2 = await ctx.send(reaction_emoji)\n \n #投票の欄\n await msg.add_reaction('✋')\n await msg.add_reaction('❌')\n\n def check(reaction, user):\n emoji = str(reaction.emoji)\n if user.bot == True: # botは無視\n pass\n else:\n return emoji == '✔' or emoji == '❌' or emoji == '✋'\n\n while len(reaction_member)-1 <= cnt:\n try:\n reaction, user = await client.wait_for('reaction_add', timeout=settime, check=check)\n except asyncio.TimeoutError:\n await msg.delete()#メッセージの削除\n await msg2.delete()#メッセージの削除\n await ctx.send('残念、人が足りなかったようだ...')\n break\n else:\n print(str(reaction.emoji))\n if str(reaction.emoji) == '✔':\n reaction_member.append(user.name)\n cnt -= 1\n test = discord.Embed(title=f\"現在の {about} 募集状況\",colour=0x1e90ff)\n test.add_field(name=f\"あと__{cnt}__人 募集中\\n\", value='\\n'.join(reaction_member), inline=True)\n await msg.edit(embed=test)\n\n if cnt == 0:\n test = discord.Embed(title=f\"現在 {about} 募集状況\",colour=0x1e90ff)\n test.add_field(name=f\"あと__{cnt}__人 募集中\\n\", value='\\n@'.join(reaction_member), inline=True)\n await msg.edit(embed=test)\n finish = discord.Embed(title=f\"{about} 募集終了(満員御礼)\",colour=0xFF0000)\n finish.add_field(name=\"仲間が集まったようだ。\",value='\\n@'.join(reaction_member), inline=True)\n msg3 = await ctx.send(embed=finish) \n await msg.delete()#メッセージの削除\n await msg2.delete()#メッセージの削除\n #await asyncio.sleep(10)\n #await msg3.delete()#メッセージの削除\n\n elif str(reaction.emoji) == '❌':\n if user.name in reaction_member:\n reaction_member.remove(user.name)\n cnt += 1\n test = discord.Embed(title=f\"現在の {about} 募集状況\",colour=0x1e90ff)\n test.add_field(name=f\"あと__{cnt}__人 募集中\\n@\", value='\\n'.join(reaction_member), inline=True)\n await msg.edit(embed=test)\n else:\n pass\n # リアクション消す。メッセージ管理権限がないとForbidden:エラーが出ます。\n await msg.remove_reaction(str(reaction.emoji), user)\n \n@client.command()\nasync def アンケート(ctx, about = \"question\", *args):\n emojis = [\"1⃣\",\"2⃣\",\"3⃣\",\"4⃣\"]\n\n cnt = len(args)\n message = discord.Embed(title=\":speech_balloon: \"+about,colour=0x1e90ff)\n if cnt <= len(emojis):\n for a in range(cnt):\n message.add_field(name=f'{emojis[a]}{args[a]}', value=\"** **\", inline=False)\n msg = await ctx.send(embed=message)\n #投票の欄\n for i in range(cnt):\n await msg.add_reaction(emojis[i])\n else:\n await ctx.send(\"悪い...項目は4つまでなんだ...\")\n\n\nclient.run(token)\n","sub_path":"discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":4067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"403552559","text":"import numpy as np\nimport numpy.random as npr\nimport matplotlib.pyplot as plt\n\nmarkersize = 4\n\ndef make_pinwheel_data(radial_std, tangential_std, num_classes, num_per_class, rate):\n rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)\n\n features = npr.randn(num_classes*num_per_class, 2) \\\n * np.array([radial_std, tangential_std])\n features[:,0] += 1.\n labels = np.repeat(np.arange(num_classes), num_per_class)\n\n angles = rads[labels] + rate * np.exp(features[:,0])\n rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])\n rotations = np.reshape(rotations.T, (-1, 2, 2))\n\n return 10*npr.permutation(np.einsum('ti,tij->tj', features, rotations))\n\ndef make_figure():\n fig, ax = plt.subplots(figsize=(4, 4))\n ax.axis('off')\n fig.tight_layout()\n fig.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)\n ax.margins(0, 0)\n ax.xaxis.set_major_locator(plt.NullLocator())\n ax.yaxis.set_major_locator(plt.NullLocator())\n return fig, ax\n\ndef plot_data(data):\n fig, ax = make_figure()\n ax.plot(data[:,0], data[:,1], 'k.', markersize=markersize)\n# save_figure(fig, 'figures/mainfig_mix_data')\n# plt.close()","sub_path":"GMM/spiral.py","file_name":"spiral.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"558434217","text":"import jwt\nfrom api.config import config\n\ndef token_creation(user):\n token_content = {\n 'user': user.userName,\n 'status': user.status,\n 'validity': datetime.now\n }\n token = jwt.encode(token_content, config['tokenSecret'], config['tokenAlogrithm'])\n \n return token.decode()\n\ndef refresh_token(token):\n return 'lol'","sub_path":"api/utils/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"393565098","text":"from django.conf.urls import url,include\n\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nadmin.autodiscover()\n\n\n\n\n\nfrom . import views\n# from views import *\n\nurlpatterns = [\n# ============================ start of login ===============================\n \n url(r'^$', views.index, name='index'),\n url(r'^login/$', views.user_login, name='login'),\n url(r'^auth/', include('social_django.urls', namespace='social')),\n\n# ============================ Courses and bundles =================================\n\n url(r'^courses/$', views.courses, name='courses'),\n url(r'^bundle/(\\d+)/$', views.bundle, name='bundle'),\n url(r'^matchCase/$', views.courseSeacrh, name='courseSeacrh'),\n url(r'^course/(\\d+)/$', views.course, name='course'),\n # url(r'^test/$', views.test, name='test'),\n# ============================ cart =========================================\n \n url(r'^addToCart/(\\w+)/(\\d+)/$', views.add_to_cart, name='add_to_cart'),\n url(r'^cart/$', views.cart, name='cart'),\n url(r'^deleteFromCart/(\\d+)/$', views.cart_item_delete, name='cart_item_delete'),\n url(r'^cartCheckout/(\\w+)/(\\d+)/$', views.cart_checkout, name='cart_checkout'),\n url(r'^cartObjects/$', views.cartObjects, name='cartObjects'),\n\n url(r'^payment/$', views.payment, name='payment'),\n url(r'^registeredCourses/$', views.registeredCourses, name='registeredCourses'),\n\n# ============================ Pretest =========================================\n \n url(r'^pretest_landing/(\\d+)/$', views.pretest_landing, name='pretest_landing'),\n url(r'^pretest_val/$', views.pretest_val, name='pretest_val'),\n \n# ============================ Contents Display =========================================\n\n url(r'^contentsDisplay/(\\w+)/(\\d+)/$', views.contentsDisplay, name='contentsDisplay'),\n\n# ============================ Topics and tests Display =========================================\n\n \n url(r'^Testvalidate/(\\d+)$',views.Testvalidate,name='Testvalidate'),\n url(r'^topicAndTest/(\\d+)/(\\d+)/(\\d+)/$', views.topicAndTest, name='topicAndTest'),\n # url(r'^page/$', views.TestAndTopic, name='page'),\n url(r'^TestOverview/(\\d+)/(\\d+)$', views.TestOverview, name='TestOverview'),\n url(r'^TimeFunctionality/$', views.test, name='test'),\n \n# ============================ Assignments =========================================\n\n url(r'^Assignments/(\\d+)/$', views.Assignments, name='Assignments'),\n url(r'^compiler/$', views.compiler, name='compiler'),\n url(r'^testcase/$', views.testcase, name='testcase'),\n url(r'^saveProblem/$', views.saveProblem, name='saveProblem'),\n url(r'^chapterOverview/(\\d+)/(\\d+)/$', views.chapterOverview, name='chapterOverview'),\n \n# ============================ grouping =========================================\n url(r'^CourseGrouping/$', views.CourseGrouping, name='CourseGrouping'),\n url(r'^groupsDisplay/$', views.groupsDisplay, name='groupsDisplay'),\n \n\n url(r'^test_creation/$', views.test_creation, name='test_creation'),\n url(r'^test_ques/(?P[0-9]+)/$', views.test_ques, name='test_ques'),\n url(r'^coursesList/$', views.coursesList, name='coursesList'),\n url(r'^chapt_topic_ajax/(?P[0-9]+)/$', views.chapt_topic_ajax, name='chapt_topic_ajax'),\n \n\n\n \n \n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"LMS/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"519602553","text":"import io\n\n\"\"\"Utility class to build language models, namely character ngrams\"\"\"\n\nclass LanguageModel:\n \n def read_file(self, txt):\n try:\n f = io.open(txt, 'r', encoding='utf-8')\n except FileNotFoundError:\n print('File not found')\n return f\n\n \"\"\"Tokenizes space delimited sentences of format: Instance Label\n @Returns: a list of tokenized sentences\n @Params: text file in abovementioned format\n \"\"\"\n def sentence_tokenizer(self, txt): \n sentences = [line.split() for line in txt]\n i = 0\n x = []\n y = []\n for i in range(len(sentences)): \n x.append(sentences[i][0])\n y.append(sentences[i][1])\n i += 1\n x = [line.lower() for line in x]\n return x\n\n \"\"\"Generate character bigrams \n @Returns: List of character bigrams\n @Params: list of sentence or word tokens\"\"\"\n def ngram(text,grams): \n model=[] \n count=0 \n for token in text[:len(x)-grams+1]: \n model.append(text[count:count+grams]) \n count=count+1 \n return model\n\ndef main(): \n ng = LanguageModel()\n f = ng.read_file(\"Data/Spanglish.txt\")\n sentences = ng.sentence_tokenizer(f)\n #for sent in sentences: \n # print(sent)\n # bigrams = ng.bigram(sentences)\n ngrams_clean = []\n bigrams = []\n for i in range(len(sentences)): \n ngrams_clean.append(ng.bigram(sentences[i], 2))\n st = \" \".join(ngrams_clean[i])\n bigrams.append(st) \n for bg in bigrams: \n print(bg)\n\nif __name__ == \"__main__\":\n main()\n\n \n","sub_path":"Spring2018/SeminarCL/Project/CodeSwitching/languageModel.py","file_name":"languageModel.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"622292587","text":"balls = int(input())\n\ncolors = []\nfor i in range(balls):\n\tcolors.append(input().strip())\n\ndef getMax(c):\n\t\tif \"black\" in c:\n\t\t\tc.remove(\"black\")\n\t\t\treturn 7\n\t\tif \"pink\" in c:\n\t\t\tc.remove(\"pink\")\n\t\t\treturn 6\n\t\tif \"blue\" in c:\n\t\t\tc.remove(\"blue\")\n\t\t\treturn 5\n\t\tif \"brown\" in c:\n\t\t\tc.remove(\"brown\")\n\t\t\treturn 4\n\t\tif \"green\" in c:\n\t\t\tc.remove(\"green\")\n\t\t\treturn 3\n\t\tif \"yellow\" in c:\n\t\t\tc.remove(\"yellow\")\n\t\t\treturn 2\n\ndef ans(c):\n\treds = c.count(\"red\")\n\tif reds == len(c):\n\t\treturn 1\n\tfor i in range(reds):\n\t\tc.remove(\"red\")\n\tmc = getMax(c)\n\ttotal = (reds+1)*mc\n\tl = len(c)\n\tfor i in range(l):\n\t\ttotal += getMax(c)\n\treturn total+reds\n\nprint(ans(colors))","sub_path":"Python/Ad Hoc/ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"420829375","text":"# -*- coding: utf8 -*-\n\nfrom django.template.loader import render_to_string\nfrom django.core.mail import EmailMessage\nfrom django.conf import settings\n\n\ndef enviar_email(from_email, to, name, subject, template, tags):\n \"\"\"Envia o email com o email do destinatário\n assunto e mensagem, renderizando-o usando a partir\n de um arquivo separado\n \"\"\"\n\n # Gera a mensagem\n message = render_to_string(template, tags)\n\n # Envia a mensagem\n email = EmailMessage(settings.EMAIL_SUBJECT_PREFIX + \" \" + subject,\n message,\n from_email,\n [to])\n \n email.send()\n","sub_path":"contato/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"545541977","text":"from pygame import sprite\nimport os\nfrom pygame import image as Image\nfrom pygame import error as Error\n\ndef load_image(name, path, color_key=None):\n fullname = os.path.join(path, name)\n try:\n image = Image.load(fullname)\n except Error as message:\n print('Cannot load image:', name)\n raise SystemExit(message)\n image = image.convert_alpha()\n\n if color_key is not None:\n if color_key is -1:\n color_key = image.get_at((0, 0))\n image.set_colorkey(color_key)\n return image\n\n\n\n\nclass Bullet(sprite.Sprite):\n def __init__(self,x,y,group):\n super().__init__(group)\n self.image = load_image('bullet.PNG','images')\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.rect = self.rect.move(self.x,self.y)\n\n def update(self, *args):\n if self.rect.top > 0 :\n self.rect.top -= 20\n","sub_path":"Bullet.py","file_name":"Bullet.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"233690327","text":"\n\n'''\ndelta x = dx\nstarting x = xo\nx at i = xi\ny at i = yi\ny at i+1 = yi2\nEquation to Solve: 3(dy/dx) + 4y = 3x^2\n@ x = 0, y = 10\nx ranges from 0 to 10\ngeneral formula for yi2:\nyi2 = dx*(xi**2) - ((4 * dx)/3)*yi + yi\n'''\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom array import *\n\nglobal xo\nglobal yo\nglobal xf\n\ndef function(dx):\n n = int((xf-xo)/dx)+1\n\n x = np.linspace(0,10,n)\n y = np.zeros([n])\n\n y[0] = 10\n\n for i in range(0,n-1,1):\n a = 3\n b = 4\n f = 3*(x[i]**2)\n y[i+1] = (f-b*y[i])*dx/3. + y[i]\n #y[i+1] = dx*(x[i]**2) - ((4 * dx)/3)*y[i] + y[i]\n return x, y\n\nxo = 0\nyo = 10\nxf = 10\n\nx1, y1 = function(1)\nx2, y2 = function(0.5)\nx3, y3 = function(0.25)\nx4, y4 = function(0.1)\nx5, y5 = function(0.01)\nx6, y6 = function(0.001)\n\nplt.plot(x1, y1, 'b')\nplt.plot(x2, y2, 'g')\nplt.plot(x3, y3, 'r')\nplt.plot(x4, y4, 'c')\nplt.plot(x5, y5, 'm')\nplt.plot(x6, y6, 'y')\n\nplt.show()\n","sub_path":"euler/forwards.py","file_name":"forwards.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"562435559","text":"from shop.views import home_page, GoodListView, AddGoodView, UpdateGoodView, ReturnListView, DeleteReturnView, \\\n AcceptReturnView, PurchaseListView, PurchaseCreateView, ReturnCreateFrom\n\nfrom django.urls import path\n\n\nurlpatterns = [\n path('', GoodListView.as_view(), name='good_list'),\n # path('list/', GoodListView.as_view(), name='good_list'),\n path('add_good/', AddGoodView.as_view(), name='add_good'),\n path('update_good//', UpdateGoodView.as_view(), name='update_good'),\n path('return_list/', ReturnListView.as_view(), name='return_list'),\n path('delete_return//', DeleteReturnView.as_view(), name='delete_return'),\n path('accept_return//', AcceptReturnView.as_view(), name='accept_return'),\n path('purchase_list/', PurchaseListView.as_view(), name='purchase_list'),\n path('purchase_new//', PurchaseCreateView.as_view(), name='purchase_new'),\n path('return_new/', ReturnCreateFrom.as_view(), name='return_new')\n ]\n","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"446024419","text":"'''\r\nUARTInput.py\r\n@author: River Allen\r\n@date: July 23, 2010\r\n\r\nA parent class for objects that will be providing input to be written across UART. \r\n'''\r\n\r\nimport threading\r\n\r\nclass UARTInput(threading.Thread):\r\n def __init__(self):\r\n threading.Thread.__init__(self)\r\n self._input = []\r\n self._lock = threading.Lock()\r\n \r\n def get_input(self):\r\n '''\r\n Retrieves list of all VALID data that have been processed since last called.\r\n '''\r\n with self._lock:\r\n ret_data = self._input\r\n self._input = []\r\n return ret_data\r\n \r\n def add_input(self, dat):\r\n '''\r\n Locks, writes to input buffer, then unlocks.\r\n '''\r\n with self._lock:\r\n if type(dat) == type(list):\r\n self._input.extend(dat)\r\n else:\r\n self._input.append(dat)","sub_path":"L_PyInterface/UART/UARTInput.py","file_name":"UARTInput.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"232378160","text":"import numpy as np\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.stats import norm\r\nimport pandas as pd\r\nfrom pandas_datareader import data\r\n\r\n\r\nmicrosoft = pd.read_csv('c:/Users/Dozie/Desktop/AMZN.csv',parse_dates=[\"Date\"],index_col=0)\r\n\r\n#calculate the compound annual growth rate which will give us our mean return input(mu)\r\n\r\ndays =(microsoft.index[-1]-microsoft.index[0]).days\r\ncagr = ((((microsoft['Adj Close'][-1]) / microsoft['Adj Close'][1])) ** (365.0/days)) - 1\r\n#print ('CAGR =',str(round(cagr,4)*100)+\"%\")\r\nmu = cagr\r\n\r\n#create a series of percentage returns and calculate\r\n#the annual volatility of returns\r\nmicrosoft['Returns'] = microsoft['Adj Close'].pct_change()\r\nvol = microsoft['Returns'].std()*math.sqrt(252)\r\n#print (\"Annual Volatility =\",str(round(vol,4)*100)+\"%\")\r\n\r\n#set up empty list to hold our ending values for each simulated price series\r\nresult = []\r\n#monte carlo simulation\r\n# Define Variables\r\nS = microsoft['Adj Close'][-1] # starting stock price (i.e. last available real stock price)\r\nT = 252 # Number of trading days\r\nmu = 0.1590 # Return\r\nvol = 0.5406 # Volatility\r\n\r\nfor i in range(1000):\r\n # create list of daily returns using random normal distribution\r\n daily_returns = np.random.normal((1+mu)**(1/T),vol/math.sqrt(T),T)\r\n\r\n # set starting price and create price series generated by above random daily returns\r\n price_list = [S]\r\n\r\n for x in daily_returns:\r\n price_list.append(price_list[-1] * x)\r\n\r\n# Generate Plots - price series and histogram of daily returns\r\n plt.plot(price_list)\r\n # append the ending value of each simulated run to the empty list we created at the beginning\r\n result.append(price_list[-1])\r\n\r\nplt.show()\r\nplt.hist(result,bins=50) # Note that we run the line plot and histogram separately, not simultaneously.\r\nplt.show()\r\n\r\nprint(round(np.mean(result),2))\r\nprint(\"5% quantile =\",np.percentile(result,5))\r\nprint(\"95% quantile =\",np.percentile(result,95))\r\n\r\nplt.hist(result,bins=100)\r\nplt.axvline(np.percentile(result,5), color='r', linestyle='dashed', linewidth=2)\r\nplt.axvline(np.percentile(result,95), color='r', linestyle='dashed', linewidth=2)\r\nplt.show()\r\n\r\n","sub_path":"monte carlo.py","file_name":"monte carlo.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"140752129","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: puke/Require.py\n# Compiled at: 2012-10-18 11:09:43\n\"\"\"REquire parser\n\nThis class simply provide an accessor on a dict\n\"\"\"\nimport os, re, yaml, json\nfrom puke.Env import *\nfrom puke.Console import *\nfrom puke.Yak import *\nfrom puke.Utils import *\n\ndef custom_str_constructor(loader, node):\n return loader.construct_scalar(node).encode('utf-8')\n\n\nyaml.add_constructor('tag:yaml.org,2002:str', custom_str_constructor)\n\nclass Load(object):\n\n def __init__(self, filename):\n self.content = None\n try:\n stream = None\n stream = file(filename, 'r')\n payload = stream.read()\n stream.close()\n size = payload.strip()\n ext = os.path.splitext(stream.name)[1]\n if size == 0:\n self.content = {}\n return\n if ext in ('.json', '.js'):\n self.content = json.loads(payload)\n elif ext in ('.yaml', '.yml'):\n self.content = yaml.load(payload)\n except Exception as error:\n raise RequireError('Require load error : %s' % error)\n self.content = None\n\n return\n\n def __str__(self):\n return '%s' % self.content\n\n def __repr__(self):\n return '%s' % self.content\n\n\nclass Require(object):\n __sharedState = {}\n __globalPattern = re.compile('\\\\$\\\\{([^}]+)\\\\}([|])?(.*)')\n\n def __init__(self, filename):\n self.__dict__ = self.__sharedState\n if not self.__sharedState:\n self.__files = [\n filename]\n self.__cfg = Load(filename).content\n self.__makeenvs(self.__cfg)\n\n def merge(self, filename):\n self.__files.append(filename)\n self.__cfg = deepmerge(self.__cfg, Load(filename).content)\n self.__makeenvs(self.__cfg)\n\n def yak(self, selector):\n if not self.get(selector):\n return False\n else:\n for node, value in self.get(selector).items():\n existing = Yak.get(node)\n if existing != None and not isinstance(existing, str):\n value = deepmerge(Yak.get(node), value)\n Yak.set(node, value)\n\n return\n\n def get(self, key):\n if self.__cfg.has_key(key):\n return self.__cfg[key]\n\n def set(self, key, value):\n self.__cfg[key] = value\n\n def __getitem__(self, key):\n return self.get(key)\n\n def __setitem__(self, key, value):\n self.set(key, value)\n\n def __contains__(self, key):\n return key in self.__cfg\n\n def __repr__(self):\n return 'Config: %s' % self.__cfg\n\n def __makeenvs(self, data):\n if data == None:\n return False\n else:\n if isinstance(data, list):\n dataIter = enumerate(data)\n else:\n if isinstance(data, dict):\n dataIter = data.items()\n else:\n return\n for node, value in dataIter:\n if not isinstance(value, (str, int)):\n self.__makeenvs(value)\n elif value.startswith('${'):\n m = self.__globalPattern.match(value)\n if not m:\n continue\n name, isDefault, extValue = m.groups()\n if isDefault:\n default = extValue\n else:\n default = ''\n value = Env.get(name, default)\n if not isDefault:\n value += extValue\n data[node] = value\n\n return\n\n def reload(self):\n files = self.__files\n self.__files = []\n self.__cfg = {}\n for filename in files:\n self.include(filename)\n\n\nclass RequireError(Exception):\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return repr(self.value)","sub_path":"pycfiles/puke-1.5.22.tar/Require.py","file_name":"Require.py","file_ext":"py","file_size_in_byte":4183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"372733055","text":"from numpy import*\na=input(\"digite os paises: \").upper().split(',')\ncont=zeros(5,dtype=int)\nfor i in range(size(a)):\n\tif(a[i]==\"CHN\"):\n\t\tcont[0]=cont[0]+1\n\tif(a[i]==\"JPN\"):\n\t\tcont[1]=cont[1]+1\n\tif(a[i]==\"KOR\"):\n\t\tcont[2]=cont[2]+1\n\tif(a[i]==\"MGL\"):\n\t\tcont[3]=cont[3]+1\n\tif(a[i]==\"THA\"):\n\t\tcont[4]=cont[4]+1\nprint(max(cont))\nprint(cont)","sub_path":"5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/224/users/4372/codes/1832_1658.py","file_name":"1832_1658.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"33905392","text":"# coding:utf-8\nfrom django.conf.urls import url\nfrom django.urls import path, include\n\nfrom .views import CourseListView, CourseDetailView, CourseInfoView, CommentView, AddCommentView\n\napp_name = 'course'\n\nurlpatterns = [\n #课程列表\n url(r'^list/$',CourseListView.as_view(), name='course_list'),\n\n # 课程详情页\n url(r'^detail/(?P\\d+)/$', CourseDetailView.as_view(), name='detail'),\n # 课程信息\n url(r'^info/(?P\\d+)/$', CourseInfoView.as_view(), name='info'),\n # 查看课程评论\n url(r'^comment/(?P\\d+)/$', CommentView.as_view(), name='comment'),\n # 添加课程评论\n url(r'^add_comment/$', AddCommentView.as_view(), name='add_comment'),\n\n]\n\n","sub_path":"apps/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"420449503","text":"from tensorflow.keras import models, layers\r\n\r\n\r\ndef cnn_classifier(num_classes, dropout, embedding_dim, num_filters, filter_sizes, sequence_length, fully_connected_dim, vocabulary_inv, combined_loss=False):\r\n \"\"\"\r\n CNN classification model for sentiment analysis based on \"Convolutional Neural Networks for Sentence Classification\"\r\n by Yoon Kim\r\n \"\"\"\r\n\r\n input_shape = (sequence_length,)\r\n model_input = layers.Input(shape=input_shape, name='input')\r\n\r\n x = layers.Embedding(len(vocabulary_inv), embedding_dim, input_length=sequence_length, name='embedding')(model_input)\r\n\r\n x = layers.Dropout(dropout)(x)\r\n\r\n # Convolutional block\r\n conv_blocks = []\r\n\r\n for sz in filter_sizes:\r\n # padding='same' is better but we are not sure if the padding is only going to be at the end of the text\r\n conv = layers.Convolution1D(filters=num_filters, kernel_size=sz, padding='valid', activation='relu', strides=1)(x)\r\n conv = layers.MaxPooling1D(pool_size=sequence_length - sz + 1)(conv)\r\n conv = layers.Flatten()(conv)\r\n conv_blocks.append(conv)\r\n\r\n # Concatenate the convolutional layers\r\n x = layers.Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]\r\n\r\n x = layers.Dropout(dropout)(x)\r\n x = layers.Dense(fully_connected_dim, activation='relu')(x)\r\n\r\n logits = layers.Dense(num_classes, activation='linear', name='logits')(x)\r\n model_output = layers.Activation('softmax', name='predictions')(logits)\r\n\r\n model = models.Model(model_input, model_output)\r\n\r\n if combined_loss:\r\n return model, model_input, logits, conv_blocks\r\n\r\n return model\r\n","sub_path":"Classifiers/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"556201869","text":"\nclass Allergies:\n\n def __init__(self, test):\n self._allergens = ['eggs', 'peanuts', 'shellfish', 'strawberries',\n 'tomatoes', 'chocolate', 'pollen', 'cats']\n self.lst = []\n for i, v in enumerate(self._allergens):\n if (test & (1 << i)):\n self.lst.append(v)\n\n def is_allergic_to(self, name):\n return name.lower() in self.lst\n","sub_path":"python/allergies/allergies.py","file_name":"allergies.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"222197059","text":"from time import time\n\n# Requires requests library. Install using pip\nimport requests\n\n\nclass EapiRequests(object):\n \"\"\"Allows to make requests to EAPI on given setup.\"\"\"\n\n def __init__(self, eapi_url, rui_url, user_name, user_password):\n \"\"\"\n :param eapi_url: Eapi address on tested setup in following format: ^http[s]?:\\/\\/[a-z0-9\\.-]+\\.com$\n :type eapi_url: str\n :param rui_url: Rui address on tested setup in following format: ^http[s]?:\\/\\/[a-z0-9\\.-]+\\.com$\n :type rui_url: str\n :type user_name: str\n :type user_password: str\n \"\"\"\n self.eapi_url = eapi_url + '/rest/'\n self.rui_url = rui_url + '/oauth/token'\n self.user_name = user_name\n self.user_password = user_password\n self.token = self._generate_token()\n\n def request(self, method, command, **kwargs):\n \"\"\"Creates request to EAPI.\n See http://docs.python-requests.org/en/master/api/ for documentation on arguments.\n \"\"\"\n response = requests.request(method.upper(), self.eapi_url + command, headers={\n 'Authorization': self.token, 'Content-Type': 'application/json'}, verify=False,\n **kwargs)\n return response\n\n def _generate_token(self):\n \"\"\"Returns token used for authentication\"\"\"\n return 'bearer ' + requests.post(url=self.rui_url,\n data={'grant_type': 'password',\n 'password': self.user_password,\n 'response_type': 'token',\n 'username': self.user_name},\n verify=False\n ).json()['access_token']\n\n\n# Example usage:\n\n\n# Create EapiRequests object using given setup/user parameters\neapi_requests = EapiRequests(eapi_url='https://eapigeic-qa2.proximetry.com',\n rui_url='https://geic-qa2.proximetry.com',\n user_name='user05',\n user_password='P@ssw0rd')\n# Send request to retrieve details about first device from systems\nsystems_response = eapi_requests.request('GET', '1.9/systems', params={'limit': 1})\n# Retrieve asdid from response\nasdid = systems_response.json()[0]['asdid']\n# Set alarm for device identified by asdid taken from system\nset_alarm_response = eapi_requests.request('PATCH',\n '2.0/devices/{}/alarms'.format(asdid),\n json={\n \"data\": [\n {\n \"alarm_id\": \"test_alarm_1\",\n \"action\": \"SET\",\n \"timestamp\": int(time()) * 1000,\n \"severity\": \"EMERGENCY\",\n \"description\": \"Device overheating\",\n \"details\": \"Temperature is above safe levels\",\n \"optional\": {}\n }\n ]\n }\n )\n\nactivation_codes = eapi_requests.request('GET', '2.0/activation_codes')\n#print(activation_codes.json())\n#print(systems_response.json())\n#print(asdid)\n\n\ndef test_case_no1_get_all_devices(method, endpoint):\n response = eapi_requests.request(method, endpoint)\n if response.status_code != 200:\n print('Error')\n return response.json()\n\n\n#print(test_case_no1_get_all_devices('GET', '2.0/devices'))\n\ndef test_case_no2_get_device_alarms_by_id(method, endpoint):\n response = eapi_requests.request(method, endpoint)\n if response.status_code != 200:\n return 'Error'\n return response.json()\n\n#print(test_case_no2_get_device_alarms_by_id('GET', '2.0/devices/1482B5C22571/alarms'))\n\ndef test_case_no3_set_alarm_severity_to_emergency(method, endpoint):\n response = eapi_requests.request(method, endpoint, json={\n \"data\": [\n {\n \"alarm_id\": \"string\",\n \"action\": \"SET\",\n \"timestamp\": 1518556647391,\n \"severity\": \"EMERGENCY\",\n \"description\": \"string\",\n \"details\": \"string\",\n \"optional\": {}\n }\n ]\n }\n )\n if response.status_code != 200:\n return 'Error'\n return response.json()\n\n\n#print(test_case_no3_set_alarm_severity_to_emergency('PATCH', '2.0/devices/1482B5C22571/alarms'.format(asdid)))\n\n\ndef test_case_no4_set_alarm_severity_to_fatal(method, endpoint):\n response = eapi_requests.request(method, endpoint, json={\n \"data\": [\n {\n \"alarm_id\": \"string\",\n \"action\": \"SET\",\n \"timestamp\": 1518556647391,\n \"severity\": \"FATAL\",\n \"description\": \"string\",\n \"details\": \"string\",\n \"optional\": {}\n }\n ]\n }\n )\n if response.status_code != 200:\n return 'Error'\n return response.json()\n\n\n#print(test_case_no4_set_alarm_severity_to_fatal('PATCH', '2.0/devices/1482B5C22571/alarms'.format(asdid)))\n\n\ndef test_case_no5_set_alarm_state_to_clear(method, endpoint):\n response = eapi_requests.request(method, endpoint, json={\n \"data\": [\n {\n \"alarm_id\": \"string\",\n \"action\": \"CLEAR\",\n \"timestamp\": 1518556647392,\n \"severity\": \"FATAL\",\n \"description\": \"string\",\n \"details\": \"string\",\n \"optional\": {}\n }\n ]\n }\n )\n if response.status_code != 200:\n return 'Error'\n return response.json()\n\n#print(test_case_no5_set_alarm_state_to_clear('PATCH', '2.0/devices/1482B5C22571/alarms'.format(asdid)))\n\n\ndef test_case_no7_delete_alarm_of_device_by_id(method, endpoint): # to niedokończone :(\n response = eapi_requests.request(method, endpoint)\n if response.status_code != 204:\n return 'Error'\n return response.status_code()\n\n# print(test_case_no7_delete_alarm_of_device_by_id('DELETE', '2.0/devices/088FA67C5907/alarms'.format(asdid))) #niedokończone, nie działa\n\n\n","sub_path":"tester_school_requests.py","file_name":"tester_school_requests.py","file_ext":"py","file_size_in_byte":8086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"280030770","text":"# Copyright (C) 2013 AcoMo Technology.\n# All rights reserved.\n#\n# Authored by Jyun-Yu Huang \n#\n# Pilpline is a gateway transmit data with facebook and django\n\ndef load_data_new_user(backend, response, user, *args, **kwargs):\n\t#print \"Response:\", response\n\t#print \"Details:\", kwargs.get('details')\n\tif user is None:\n\t\tif backend.name == \"facebook\":\n\t\t\ttry:\n\t\t\t\turl = \"http://graph.facebook.com/%s/picture?width=200&height=200&redirect=false\" % response['id']\n\t\t\t\tdata = json.loads(urllib2.urlopen(url).read())['data']\n\t\t\t\treturn {'avatar': data}\n\t\t\texcept StandardError:\n\t\t\t\treturn {'avatar': None}\n\t\telse:\n\t\t\traise ValueError()\n","sub_path":"account/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"537276158","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport sys\nimport os\nimport logging\nimport numpy as np\nimport math\nimport matplotlib\nimport glob\nimport yaml\nimport h5py\n\n\nfrom madminer.limits import AsymptoticLimits\nfrom madminer import sampling\nfrom madminer.sampling import SampleAugmenter\nfrom madminer.ml import ParameterizedRatioEstimator, ScoreEstimator, Ensemble, DoubleParameterizedRatioEstimator, LikelihoodEstimator\nimport matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nfrom scipy.optimize import curve_fit\n\nfrom madminer.fisherinformation import FisherInformation\nfrom madminer.fisherinformation import project_information,profile_information\n\nfrom madminer.plotting import plot_fisher_information_contours_2d\nfrom madminer.plotting import plot_distributions\n\nfrom madminer.sampling import benchmark, benchmarks, random_morphing_points, morphing_point\n\nfrom madminer.analysis import DataAnalyzer\nfrom madminer.utils.interfaces.madminer_hdf5 import madminer_event_loader\nfrom madminer.utils.interfaces.madminer_hdf5 import save_preformatted_events_to_madminer_file\nfrom madminer.utils.various import create_missing_folders, shuffle\n\n\n\n# logging\nlogging.basicConfig(\n format='%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s',\n datefmt='%H:%M',\n level=logging.INFO\n)\nfor key in logging.Logger.manager.loggerDict:\n if \"madminer\" not in key:\n logging.getLogger(key).setLevel(logging.WARNING)\n\n\ndef func(x, sigmaSM, sigma0, sigma1, sigma00, sigma11, sigma01):\n return sigmaSM + x[0]*sigma0 + x[1]*sigma1 + x[0]**2*sigma00+ x[1]**2*sigma11 + x[0]*x[1]*sigma01\n\ndef funcrel(x, sigmaSM, sigma0, sigma1, sigma00, sigma11, sigma01):\n if x[0]==0 and x[1]==0:\n return 0\n else:\n return np.abs( ( x[0]**2*sigma00+ x[1]**2*sigma11 + x[0]*x[1]*sigma01)/(sigmaSM+x[0]*sigma0 + x[1]*sigma1 + x[0]**2*sigma00+ x[1]**2*sigma11 + x[0]*x[1]*sigma01 ) )\n \ndef funcrel2(x, sigmaSM, sigma0, sigma1, sigma00, sigma11, sigma01):\n if x[0]==0 and x[1]==0:\n return 0\n else:\n return ( abs(x[0]**2*sigma00)+ abs(x[1]**2*sigma11) + abs(x[0]*x[1]*sigma01)) / ( (sigmaSM+x[0]*sigma0) + abs(x[1]*sigma1 ) )\n\ndef generate_test_data_ratio(method):\n # get number of paramenters\n hf = h5py.File(h5_file, 'r')\n parameters = len(hf['parameters']['names'])\n sa = SampleAugmenter(h5_file, include_nuisance_parameters=False)\n\n\n if( len(inputs['evaluation'][str(method)])==1 ): #only one theta\n\n theta_sampling = inputs['evaluation'][str(method)]['theta']['sampling_method']\n theta = inputs['evaluation'][str(method)]['theta']\n if(theta_sampling != 'random_morphing_points'):\n\n x, theta, y, r_xz, t_xz, n_effective = sa.sample_test(\n theta=eval(theta_sampling)(theta['argument']),\n n_samples=inputs['n_samples']['test'],\n folder='/madminer/test/'+method+'/',\n filename='test',\n switch_train_test_events=True\n )\n\n else:\n\n prior = []\n for p in range(parameters):\n this_tuple = theta['prior']['parameter_'+str(p)]\n prior.append( (str(this_tuple['prior_shape']), float(this_tuple['prior_param_0']), float(this_tuple['prior_param_1'])) )\n\n x, theta, y, r_xz, t_xz, n_effective = sa.sample_test(\n theta=eval(theta_sampling)(theta_['n_thetas'], prior),\n n_samples=inputs['n_samples']['test'],\n folder='/madminer/test/'+method+'/',\n filename='test',\n switch_train_test_events=True,\n )\n\n\n elif( len(inputs['evaluation'][str(method)])==2 ): #two thetas\n\n theta0_sampling = inputs['evaluation'][str(method)]['theta_0']['sampling_method'] #sampling method for theta0\n theta1_sampling = inputs['evaluation'][str(method)]['theta_1']['sampling_method'] #sampling method for theta1\n theta_0 = inputs['evaluation'][str(method)]['theta_0'] #parameters for theta0 sampling\n theta_1 = inputs['evaluation'][str(method)]['theta_1'] #parameters for theta0 sampling\n\n if (theta0_sampling == 'random_morphing_points' and theta1_sampling != 'random_morphing_points' ): \n \n prior = []\n for p in range(parameters):\n this_tuple = theta_0['prior']['parameter_'+str(p)]\n prior.append( (str(this_tuple['prior_shape']), float(this_tuple['prior_param_0']), float(this_tuple['prior_param_1'])) )\n\n\n x,th0,th1,y,r_xz,t_xz = sa.sample_train_ratio(\n theta0=eval(theta0_sampling)(theta_0['n_thetas'], prior),\n theta1=eval(theta1_sampling)(theta_1['argument']),\n n_samples=inputs['n_samples']['test'],\n folder='/madminer/test/'+method+'/',\n filename='test',\n switch_train_test_events=True,\n ) \n \n\n elif (theta1_sampling == 'random_morphing_points' and theta0_sampling != 'random_morphing_points'): \n tuple_0 = theta_1['prior']['parameter_0'] #tuple for parameter 0\n tuple_1 = theta_1['prior']['parameter_1'] #tuple for parameter 1\n prior = [ (str(tuple_0['prior_shape']), float(tuple_0['prior_param_0']), float(tuple_0['prior_param_1'])), \\\n (str(tuple_1['prior_shape']), float(tuple_1['prior_param_0']), float(tuple_1['prior_param_1'])) ]\n\n x, theta0, theta1, y, r_xz, t_xz = sa.sample_train_ratio(\n theta0=eval(theta0_sampling)(theta_0['argument']),\n theta1=eval(theta1_sampling)(theta_1['n_thetas'], prior),\n n_samples=inputs['n_samples']['test'],\n folder='/madminer/test/'+method+'/',\n filename='test',\n switch_train_test_events=True, \n )\n\n elif (theta0_sampling == 'random_morphing_points' and theta1_sampling == 'random_morphing_points'): \n tuple0_0 = theta_0['prior']['parameter_0'] #tuple for parameter 0\n tuple0_1 = theta_0['prior']['parameter_1'] #tuple for parameter 1\n prior0 = [ (str(tuple0_0['prior_shape']), float(tuple0_0['prior_param_0']), float(tuple0_0['prior_param_1'])), \\\n (str(tuple0_1['prior_shape']), float(tuple0_1['prior_param_0']), float(tuple0_1['prior_param_1'])) ]\n \n tuple1_0 = theta_1[method]['prior']['parameter_0'] #tuple for parameter 0\n tuple1_1 = theta_1[method]['prior']['parameter_1'] #tuple for parameter 1\n prior1 = [ (str(tuple1_0['prior_shape']), float(tuple1_0['prior_param_0']), float(tuple1_0['prior_param_1'])), \\\n (str(tuple1_1['prior_shape']), float(tuple1_1['prior_param_0']), float(tuple1_1['prior_param_1'])) ]\n\n x, theta0, theta1, y, r_xz, t_xz = sa.sample_train_ratio(\n theta0=eval(theta0_sampling)(theta_0['n_thetas'], prior0),\n theta1=eval(theta1_sampling)(theta_1['n_thetas'], prior1),\n n_samples=inputs['n_samples']['test'],\n folder='/madminer/test/'+method+'/',\n filename='test',\n switch_train_test_events=True, \n )\n\n\n else:\n x, theta0, theta1, y, r_xz, t_xz, n_effective= sa.sample_train_ratio(\n theta0=eval(theta0_sampling)(theta_0['argument']),\n theta1=eval(theta1_sampling)(theta_1['argument']),\n n_samples=inputs['n_samples']['test'],\n folder='/madminer/test/'+method+'/',\n filename='test',\n switch_train_test_events=True\n )\n\ndef generate_test_data_score(method):\n # get number of paramenters\n hf = h5py.File(h5_file, 'r')\n parameters = len(hf['parameters']['names'])\n sa = SampleAugmenter(h5_file, include_nuisance_parameters=False)\n\n theta_input = inputs[str(method)]['theta']\n theta_sampling = theta_input['sampling_method']\n \n if (theta_sampling == 'random_morphing_points'): \n \n prior = []\n for p in range(parameters):\n this_tuple = theta_input['prior']['parameter_'+str(p)]\n prior.append( (str(this_tuple['prior_shape']), float(this_tuple['prior_param_0']), float(this_tuple['prior_param_1'])) )\n\n\n x, theta0, theta1, y, r_xz, t_xz = sample_train_local(\n theta=eval(theta_sampling)(theta_input['n_thetas'], prior),\n n_samples=inputs['n_samples']['test'],\n folder='/madminer/test/'+method+'/',\n filename='test',\n switch_train_test_events=False, \n )\n\n if (theta_sampling == 'benchmark'): \n _ = sa.sample_train_local(\n theta=eval(theta_sampling)(theta_input['argument']),\n n_samples=inputs['n_samples']['test'],\n folder='/madminer/test/'+method+'/',\n filename='test',\n switch_train_test_events=False,\n )\n\n\n\n#tunning for evaluation\ninputs_file = sys.argv[1] \nwith open(inputs_file) as f:\n inputs = yaml.safe_load(f)\n\n#folder with trained files\neval_folder_path = str(sys.argv[2]) \n\n#configurate file \nh5_file = sys.argv[3]\n\n# get variables from inputs \nuselumi=float(inputs['uselumi'])\n\npath_split = os.path.split(os.path.abspath(eval_folder_path))\n\nmethod = str(path_split[1]) \n\n\n\n# ASYMPTOTIC LIMIT \nif(inputs['asymptotic_limits']['bool']):\n\n asymptotic = inputs['asymptotic_limits']\n\n theta_ranges=[]\n for this_theta in asymptotic['region']:\n theta_min, theta_max = asymptotic['region'][str(this_theta)]\n theta_ranges.append((theta_min,theta_max))\n \n print('theta range...', theta_ranges)\n \n resolutions = asymptotic['resolutions']\n print('resolutions...',resolutions)\n n_samples_theta = int(asymptotic['n_samples_per_theta'])\n xsec = asymptotic['include_xsec']\n theta_true = asymptotic['theta_true']\n\n\n limits = AsymptoticLimits(h5_file)\n \n #################### RATES & GRID\n\n theta_grid, p_values_expected_xsec, best_fit_expected_xsec = limits.expected_limits(\n theta_true=theta_true,\n theta_ranges=theta_ranges,\n mode=\"rate\",\n include_xsec=True,\n resolutions=resolutions,\n luminosity=uselumi)\n \n np.save('/madminer/rates/grid.npy',theta_grid)\n np.save('/madminer/rates/rate.npy',[p_values_expected_xsec, best_fit_expected_xsec])\n \n \n sa_rates = SampleAugmenter(h5_file, include_nuisance_parameters=False)\n xs_grid=[]\n neff_grid=[]\n n_test=10000\n\n for theta_element in theta_grid:\n _,xs,_=sa_rates.cross_sections(theta=sampling.morphing_point(theta_element))\n _,_,neff=sa_rates.sample_train_plain(theta=sampling.morphing_point(theta_element),n_samples=n_test)\n xs_grid.append(xs)\n neff_grid.append(neff/float(n_test))\n neff_grid=np.array(neff_grid)\n xsgrid=np.array(xs_grid)\n\n np.save('/madminer/rates/neff_grid.npy',neff_grid)\n np.save('/madminer/rates/xs_grid.npy',xs_grid)\n\n\n for bool_xsec in xsec:\n # histogram + save\n _ , p_values_expected_histo, best_fit_expected_histo = limits.expected_limits(\n theta_true=theta_true,\n theta_ranges=theta_ranges,\n mode=\"histo\",\n hist_vars=[ unicode(str(asymptotic['hist_vars'])) ],\n include_xsec=bool_xsec,\n resolutions=resolutions,\n luminosity=uselumi)\n\n if (bool_xsec==True):\n np.save('/madminer/rates/histo.npy',[p_values_expected_histo, best_fit_expected_histo])\n else:\n np.save('/madminer/rates/histo_kin.npy',[p_values_expected_histo, best_fit_expected_histo])\n\n\n\n\n for bool_xsec in xsec:\n\n # method ML +save\n if( method in ['alice','alices','cascal','carl','rolr', 'rascal'] ):\n theta_grid, p_values_expected_method, best_fit_expected_method = limits.expected_limits(\n theta_true=theta_true,\n theta_ranges=theta_ranges,\n mode=\"ml\",\n model_file=eval_folder_path+'/'+method,\n include_xsec=bool_xsec,\n resolutions=resolutions,\n luminosity=uselumi)\n resultsdir = '/madminer/results/'+method+'/ml'\n if not os.path.isdir(resultsdir):\n os.makedirs(resultsdir)\n if (bool_xsec==True):\n np.save(resultsdir+'/'+method+'.npy',[p_values_expected_method, best_fit_expected_method])\n else:\n np.save(resultsdir+'/'+method+'_kin.npy',[p_values_expected_method, best_fit_expected_method])\n\n \n\n \n\n #histo method + save\n if( method in ['sally', 'sallino'] ):\n theta_grid , p_values_expected_method, best_fit_expected_method = limits.expected_limits(\n theta_true=theta_true,\n theta_ranges=theta_ranges,\n mode=\"histo\",\n model_file= eval_folder_path+'/'+method, \n include_xsec=bool_xsec,\n resolutions=resolutions,\n luminosity=uselumi)\n\n resultsdir = '/madminer/results/'+method+'/histo'\n if not os.path.isdir(resultsdir):\n os.makedirs(resultsdir)\n if (bool_xsec==True):\n np.save(resultsdir+'/'+method+'.npy',[p_values_expected_method, best_fit_expected_method])\n else:\n np.save(resultsdir+'/'+method+'_kin.npy',[p_values_expected_method, best_fit_expected_method])\n \n\n\n# FISHER INFO\nif( bool(inputs['fisher_information']['bool']) and (method in ['sally']) ):\n \n uselumi = float(inputs['uselumi'])\n fisher_input = inputs['fisher_information']\n fisher = FisherInformation(h5_file, include_nuisance_parameters=False)\n theta_true = [ float(fisher_input['theta_true'][0]),\\\n float(fisher_input['theta_true'][1]), \\\n float(fisher_input['theta_true'][2]) ]\n\n fisher_information, _ = fisher.calculate_fisher_information_full_detector(\n theta=[0.,0.,0.],\n model_file='/madminer/models/sally/sally',\n luminosity=30000.)\n \n # contourplot.savefig('/madminer/plots/plot_fisher.png')\n\n\n\n\n# EVALUATE: TRAIN VS TEST\n\n#perform the test + evaluation score acconding to method\nif(method in ['alice','alices','cascal','carl','rolr', 'rascal']):\n\n #generate test data\n generate_test_data_ratio(method)\n\n forge = ParameterizedRatioEstimator() \n forge.load(eval_folder_path+'/'+method) #'methods/alices'\n\n theta_grid=np.load('/madminer/rates/grid.npy')\n xs_grid=np.load('/madminer/rates/xs_grid.npy')\n redo_limits=False\n\n \n # From Asymptotic Limits: _calculate_xsecs\n limits = AsymptoticLimits(h5_file)\n xs_limits = limits._calculate_xsecs([theta_true],test_split=float(inputs['test_split']))[0]\n print (\"AsymptoticLimits (_calculate_xsecs): \", xs_limits)\n\n # From Sample Augmenter cross_sections\n sa = SampleAugmenter(h5_file, include_nuisance_parameters=False)\n _,xs_sa,_=sa.cross_sections(theta=sampling.morphing_point(theta_true))\n print (\"SampleAugmenter (cross_sections) : \", xs_sa[0])\n\n # From Sample Augmenter: weighted_events\n _,w=sa.weighted_events(theta='sm')\n xs_we=sum(w)\n print (\"SampleAugmenter (weighted_events): \", xs_we)\n\n # From Sample Augmenter: xsecs\n xs_xsecs,_=sa.xsecs(thetas=[theta_true], events='train', test_split=float(inputs['test_split']))\n print (\"SampleAugmenter (xsecs) : \", xs_xsecs[0])\n\n \n #n_test = int(inputs['n_samples']['test'])\n #data,_,_=sa.sample_train_plain(theta=sampling.morphing_point(theta_true),n_samples=n_test) \n \n #Calc expected number of events \n _,xs,_=sa.cross_sections(theta=sampling.morphing_point(theta_true))\n nevents = uselumi*xs[0]\n\n\n #Get LLR\n out_llr=[]\n out_llr_raw=[]\n out_llr_rescaled=[]\n out_llr_substracted=[]\n out_pval=[]\n out_theta=[]\n\n for i,theta_element in enumerate(theta_grid):\n \n llr,score=forge.evaluate_log_likelihood_ratio(\n theta=np.array([theta_element]),\n x='/madminer/test/'+method+'/x_test.npy',\n evaluate_score=True,\n test_all_combinations=True,\n )\n \n llr_raw= sum(llr[0])/n_test\n llr_rescaled= nevents*llr_raw \n \n out_llr.append(llr)\n out_llr_raw.append(llr_raw)\n out_llr_rescaled.append(llr_rescaled)\n out_theta.append(theta_element)\n\n llrmin = np.argmin(out_llr_rescaled)\n out_llr_substracted,_=limits._subtract_ml(out_llr_rescaled) \n out_pval=limits.asymptotic_p_value(out_llr_substracted) \n \n #save to files\n print('Saving Raw mean -2 log r to file: ', '/madminer/results/'+method+'/llr_raw.npy')\n np.save('/madminer/results/'+method+'/llr_raw.npy', out_llr_raw)\n\n print('Saving Rescaled -2 log r to file: /madminer/results/'+method+'/llr_rescaled.npy')\n np.save('/madminer/results/'+method+'/llr_rescaled.npy', out_llr_rescaled)\n\n print('Saving Raw mean Min-subtracted -2 log r to file: ', '/madminer/results/'+method+'/llr_substracted.npy')\n np.save('/madminer/results/'+method+'/llr_substracted.npy', out_llr_substracted)\n\n print('Saving p-values to file: ', '/madminer/results/'+method+'/p_values.npy')\n np.save('/madminer/results/'+method+'/p_values.npy', out_pval)\n\n print('Saving score to file: ', '/madminer/results/'+method+'/score.npy')\n np.save('/madminer/results/'+method+'/score.npy', score)\n\n\nif(method in ['sally','sallino']):\n\n #generate test data\n generate_test_data_score(method)\n\n forge = LikelihoodEstimator()\n forge.load(eval_folder_path+'/'+method) #'methods/alices'\n\n theta_grid=np.load('/madminer/rates/grid.npy')\n xs_grid=np.load('/madminer/rates/xs_grid.npy')\n redo_limits=False\n\n \n # From Asymptotic Limits: _calculate_xsecs\n limits = AsymptoticLimits(h5_file)\n xs_limits = limits._calculate_xsecs([theta_true],test_split=float(inputs['test_split']))[0]\n print (\"AsymptoticLimits (_calculate_xsecs): \", xs_limits)\n\n # From Sample Augmenter cross_sections\n sa = SampleAugmenter(h5_file, include_nuisance_parameters=False)\n _,xs_sa,_=sa.cross_sections(theta=sampling.morphing_point(theta_true))\n print (\"SampleAugmenter (cross_sections) : \", xs_sa[0])\n\n # From Sample Augmenter: weighted_events\n _,w=sa.weighted_events(theta='sm')\n xs_we=sum(w)\n print (\"SampleAugmenter (weighted_events): \", xs_we)\n\n # From Sample Augmenter: xsecs\n xs_xsecs,_=sa.xsecs(thetas=[theta_true], events='train', test_split=float(inputs['test_split']))\n print (\"SampleAugmenter (xsecs) : \", xs_xsecs[0])\n\n \n #n_test = int(inputs['n_samples']['test'])\n #data,_,_=sa.sample_train_plain(theta=sampling.morphing_point(theta_true),n_samples=n_test) \n \n #Calc expected number of events \n _,xs,_=sa.cross_sections(theta=sampling.morphing_point(theta_true))\n nevents = uselumi*xs[0]\n\n\n #Get LLR\n out_llr=[]\n out_llr_raw=[]\n out_llr_rescaled=[]\n out_llr_substracted=[]\n out_pval=[]\n out_theta=[]\n\n for i,theta_element in enumerate(theta_grid):\n \n llr,_=forge.evaluate_score(\n theta=np.array([theta_element]),\n x='/madminer/test/'+method+'/x_test.npy'\n )\n \n llr_raw= sum(llr[0])/n_test\n llr_rescaled= nevents*llr_raw \n \n out_llr.append(llr)\n out_llr_raw.append(llr_raw)\n out_llr_rescaled.append(llr_rescaled)\n out_theta.append(theta_element)\n\n llrmin = np.argmin(out_llr_rescaled)\n out_llr_substracted,_=limits._subtract_ml(out_llr_rescaled) \n out_pval=limits.asymptotic_p_value(out_llr_substracted) \n \n #save to files\n print('Saving Raw mean -2 log r to file: ', '/madminer/results/'+method+'/llr_raw.npy')\n np.save('/madminer/results/'+method+'/llr_raw.npy', out_llr_raw)\n\n print('Saving Rescaled -2 log r to file: /madminer/results/'+method+'/llr_rescaled.npy')\n np.save('/madminer/results/'+method+'/llr_rescaled.npy', out_llr_rescaled)\n\n print('Saving Raw mean Min-subtracted -2 log r to file: ', '/madminer/results/'+method+'/llr_substracted.npy')\n np.save('/madminer/results/'+method+'/llr_substracted.npy', out_llr_substracted)\n\n print('Saving p-values to file: ', '/madminer/results/'+method+'/p_values.npy')\n np.save('/madminer/results/'+method+'/p_values.npy', out_pval)\n\n print('Saving score to file: ', '/madminer/results/'+method+'/score.npy')\n np.save('/madminer/results/'+method+'/score.npy', score)\n\n\n\n\n\n\n\n\n\n # evaluation = inputs['evaluation']['theta_each']\n # theta_each = np.linspace( float(evaluation['start']), float(evaluation['stop']), int(evaluation['num']) ) \n # theta0, theta1 = np.meshgrid(theta_each, theta_each)\n # theta_grid = np.vstack((theta0.flatten(), theta1.flatten())).T #numtidim\n # np.save('/madminer/data/test/theta_grid.npy', theta_grid)\n\n # log_r_hat, score_theta0, _ = forge.evaluate(\n # theta0_filename='/madminer/data/test/theta_grid.npy',\n # x='/madminer/data/test/x_test.npy',\n # evaluate_score=inputs['evaluation']['evaluate_score']\n\n\nif(method in ['alice2','alices2','cascal2','carl2','rolr2', 'rascal2']):\n forge = DoubleParameterizedRatioEstimator() \n forge.load(eval_folder_path+'/'+method) #'methods/alices'\n\n\n# #?\n# theta_denom = np.array([[0.,0.]])\n# np.save('/madminer/data/test/theta_ref.npy', theta_denom)\n\n# print('you need to update v0.3.0 to forge.evaluate_likelihood() or similar ')\n\n# if( method in ['alice', 'alices', 'carl', 'nde', 'rascal', 'rolr', 'scandal'] ):\n \n# #make and save grid\n# \n# )\n# with open('/madminer/data/test/log_r_hat_'+method+'.npy', \"w+\") as f: #create file\n# np.save(file='/madminer/data/test/log_r_hat_'+method, arr=log_r_hat)\n \n# with open('/madminer/data/test/score_theta0_'+method+'.npy', \"w+\") as g: #create file\n# np.save(file='/madminer/data/test/score_theta0_'+method, arr=score_theta0)\n \n\n# #plots\n# if( bool(inputs['plots']['activate']) ):\n \n# bin_size = theta_each[1] - theta_each[0]\n# edges = np.linspace(theta_each[0] - bin_size/2, theta_each[-1] + bin_size/2, len(theta_each)+1)\n\n# fig = plt.figure(figsize=(6,5))\n# ax = plt.gca()\n\n# expected_llr = np.mean(log_r_hat,axis=1)\n# best_fit = theta_grid[np.argmin(-2.*expected_llr)]\n\n# cmin, cmax = np.min(-2*expected_llr), np.max(-2*expected_llr)\n \n# pcm = ax.pcolormesh(edges, edges, -2. * expected_llr.reshape((21,21)),\n# norm=matplotlib.colors.Normalize(vmin=cmin, vmax=cmax),\n# cmap='viridis_r')\n# cbar = fig.colorbar(pcm, ax=ax, extend='both')\n\n# plt.scatter(best_fit[0], best_fit[1], s=80., color='black', marker='*')\n\n# plt.xlabel(r'$\\theta_0$')\n# plt.ylabel(r'$\\theta_1$')\n# cbar.set_label(r'$\\mathbb{E}_x [ -2\\, \\log \\,\\hat{r}(x | \\theta, \\theta_{SM}) ]$')\n\n# plt.tight_layout()\n# plt.savefig('/madminer/plots/expected_llr_'+method+'.png') \n\n\n# if( method in ['alice2', 'alices2', 'carl2', 'rascal2', 'rolr2' ] ):\n# print('evaluation for this method is not yet implemented')\n# pass \n\n# #make and save grid\n# evaluation = inputs['evaluation']['theta_each']\n# theta_each = np.linspace( float(evaluation['start']), float(evaluation['stop']), int(evaluation['num']) ) \n# theta0, theta1 = np.meshgrid(theta_each, theta_each)\n# theta_grid = np.vstack((theta0.flatten(), theta1.flatten())).T #numtidim\n# np.save('/madminer/data/test/theta_grid.npy', theta_grid)\n\n# log_r_hat, score_theta0, score_theta1 = forge.evaluate(\n# theta0_filename='/madminer/data/test/theta_grid.npy',\n# theta1_filename='/madminer/data/test/theta_grid.npy', #TO DO !\n# x='/madminer/data/test/x_test.npy',\n# evaluate_score=False\n# )\n# with open('/madminer/data/test/log_r_hat_'+method+'.npy', \"w+\") as f: #create file\n# np.save(file='/madminer/data/test/log_r_hat_'+method, arr=log_r_hat) \n\n\n# if( method in ['sally', 'sallino'] ):\n# t_hat = forge.evaluate(\n# x='/madminer/data/samples/x_test.npy'\n# )\n# with open('/madminer/data/test/t_hat_'+method+'.npy', \"w+\") as f: #create file\n# np.save(file='/madminer/data/test/t_hat_'+method, arr=t_hat)\n\n# #plots\n# if( bool(inputs['plots']['activate']) ):\n# x = np.load('data/samples/x_test.npy')\n# fig = plt.figure(figsize=(10,4))\n\n# for i in range(2):\n# ax = plt.subplot(1,2,i+1)\n# sc = plt.scatter(x[::10,0], x[::10,1], c=t_hat[::10,i], s=10., cmap='viridis', vmin=-0.8, vmax=0.4)\n# cbar = plt.colorbar(sc)\n# cbar.set_label(r'$\\hat{t}_' + str(i) + r'(x | \\theta_{ref})$')\n# plt.xlabel(r'$p_{T,j1}$ [GeV]')\n# plt.ylabel(r'$\\Delta \\phi_{jj}$')\n# plt.xlim(10.,400.)\n# plt.ylim(-3.15,3.15)\n# plt.tight_layout()\n# plt.savefig('/madminer/plots/t_hat_'+method+'.png') \n\n\n","sub_path":"docker-images/docker-madminer-ml/code/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":24438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"578616336","text":"from django.contrib import admin\nfrom blog.models import Post\nfrom django_summernote.admin import SummernoteModelAdmin\n\n\n# Register your models here.\n# admin.site.register(Post)\n@admin.register(Post)\nclass PostAdmin(SummernoteModelAdmin):\n fields = (\n \"title\",\n (\"header_image\", \"cover_image\"),\n \"description\",\n \"content\",\n \"date\",\n )\n summernote_fields = (\"content\",)\n","sub_path":"blog/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"352470003","text":"from vortilib.elements.SourceEllipsoid import *\nimport numpy as np\nimport matplotlib.pyplot as plt\ntry:\n from pybra.curves import streamQuiver\n from pybra.tictoc import Timer\n from pybra.figure import * \n setFigureFont(15)\n setFigurePath('./')\nexcept:\n def streamQuiver(*args,**kwargs):\n pass\n\n# --- Parameters\nminSpeed=0\nmaxSpeed=1.20\n\n\nnx=200\nny=nx+1\n# Ellipse parameters\na = 1\nb = 0.5*a\nU0 = 10\n\n\n# --- Velocity field on grid\nvx = np.linspace(-2*a,2*a,nx)\nvy = np.linspace(-4*b ,4*b,ny)\nX,Y = np.meshgrid(vx, vy)\n# with Timer('Numerical'):\nU1,V1 = ser_u_numerical(X,Y,vx,vy,U0,a,b)\n# with Timer('Analytical'):\nU,V = ser_u(X,Y,U0,a,b)\n\n# PSI = ser_psi_elliptic(MU,ZETA,U0,a,e)\n# PSI = ser_psi(X,Y,X*0,U0,a,b)\n# PHI = ser_phi_elliptic(MU,ZETA,U0,a,e)\n\n\n# --- Plot\nUtot = U+U0\nSpeed = np.sqrt((Utot**2+V**2))/U0\nbInEllipse=(X**2/a**2+Y**2/b**2)<1\nSpeed[bInEllipse]=np.nan\n\nxe,ye=ellipse_coord(a,b)\nfig,ax = plt.subplots(1,1)\n# im = ax.pcolormesh(X, Y, Speed, vmin=minSpeed, vmax=maxSpeed)\nim = ax.contourf(X, Y, Speed, levels=np.linspace(minSpeed,maxSpeed,25), vmin=minSpeed, vmax=maxSpeed)\ncb=fig.colorbar(im)\nyseed=np.linspace(np.min(vy)*0.85,np.max(vy)*0.85,8)\nstart=np.array([yseed*0-2*a*0.9,yseed])\nsp=ax.streamplot(vx,vy,Utot,V,color='k',start_points=start.T,linewidth=0.7,density=30,arrowstyle='-')\nqv=streamQuiver(ax,sp,n=7,scale=40,angles='xy')\nax.plot(xe,ye,'k',lw=3)\nax.set_ylim([-4*b,4*b])\nax.set_xlim([-2*a,2*a])\nax.set_xlabel('x/a [-]')\nax.set_ylabel('r/a [-]')\nax.set_aspect('equal','box')\nax.set_title('Source Ellipsoid Streamlines')\n\n\ntry:\n export2pdf()\nexcept:\n pass\n\nplt.show()\n\n\n\n\n","sub_path":"vortilib/elements/_examples/SourceEllipsoid_Plots.py","file_name":"SourceEllipsoid_Plots.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"474553341","text":"# {\n# \"all_wheels_on_track\": Boolean, # flag to indicate if the vehicle is on the track\n# \"x\": float, # vehicle's x-coordinate in meters\n# \"y\": float, # vehicle's y-coordinate in meters\n# \"distance_from_center\": float, # distance in meters from the track center\n# \"is_left_of_center\": Boolean, # Flag to indicate if the vehicle is on the left side to the track center or not.\n# \"heading\": float, # vehicle's yaw in degrees\n# \"progress\": float, # percentage of track completed\n# \"steps\": int, # number steps completed\n# \"speed\": float, # vehicle's speed in meters per second (m/s)\n# \"steering_angle\": float, # vehicle's steering angle in degrees\n# \"track_width\": float, # width of the track\n# \"waypoints\": [[float, float], … ], # list of [x,y] as milestones along the track center\n# \"closest_waypoints\": [int, int] # indices of the two nearest waypoints.\n# }\n\nimport math\n\ndef reward_function(params):\n\n # Read input variables\n\tall_wheels_on_track = params['all_wheels_on_track']\n\tspeed = params['speed']\n\twaypoints = params['waypoints']\n\tclosest_waypoints = params['closest_waypoints']\n\theading = params['heading']\n\ttrack_width = params['track_width']\n\tdistance_from_center = params['distance_from_center']\n\tsteering = abs(params['steering_angle'])\n\tsteps = params['steps']\n\tprogress = params['progress']\n\n\t# Initialize the reward with typical value\n\treward = 1.0\n\n\tif progress == 100:\n\t\treward += 100\n\telse:\n\n\t\t#############################################################################\n\t\t'''\n\t\tExample of using all_wheels_on_track and speed\n\t\t'''\n\t\t# Set the speed threshold based your action space\n\t\tSPEED_THRESHOLD_3 = 3.0\n\t\tSPEED_THRESHOLD_4 = 4.0\n\t\tSPEED_THRESHOLD_5 = 5.0\n\t\tSPEED_THRESHOLD_6 = 6.0\n\t\tSPEED_THRESHOLD_7 = 7.0\n\t\tSPEED_WEIGHT = 5\n\n\t\tif not all_wheels_on_track:\n\t\t\t# Penalize if the car goes off track\n\t\t\treward -= 100\n\t\telif speed > SPEED_THRESHOLD_7:\n\t\t\treward += SPEED_THRESHOLD_7 * SPEED_WEIGHT\n\t\telif speed > SPEED_THRESHOLD_6:\n\t\t\treward += SPEED_THRESHOLD_6 * SPEED_WEIGHT\n\t\telif speed > SPEED_THRESHOLD_5:\n\t\t\treward += SPEED_THRESHOLD_5 * SPEED_WEIGHT\n\t\telif speed > SPEED_THRESHOLD_4:\n\t\t\treward += SPEED_THRESHOLD_4 * SPEED_WEIGHT\n\t\telif speed < SPEED_THRESHOLD_3:\n\t\t\t# Penalize if the car goes too slow\n\t\t\treward -= 5\n\t\telse:\n\t\t\t# High reward if the car stays on track and goes fast\n\t\t\treward += 10\n\n\t\t###############################################################################\n\t\t# '''\n\t\t# Example of using waypoints and heading to make the car in the right direction\n\t\t# '''\n\t\t# # Calculate the direction of the center line based on the closest waypoints\n\t\t# next_point = waypoints[closest_waypoints[1]]\n\t\t# prev_point = waypoints[closest_waypoints[0]]\n\t\t#\n\t\t# # Calculate the direction in radius, arctan2(dy, dx), the result is (-pi, pi) in radians\n\t\t# track_direction = math.atan2(next_point[1] - prev_point[1], next_point[0] - prev_point[0])\n\t\t# # Convert to degree\n\t\t# track_direction = math.degrees(track_direction)\n\t\t#\n\t\t# # Calculate the difference between the track direction and the heading direction\n\t\t# direction_diff = abs(track_direction - heading)\n\t\t#\n\t\t# # Penalize the reward if the difference is too large\n\t\t# DIRECTION_THRESHOLD = 10.0\n\t\t# if direction_diff > DIRECTION_THRESHOLD:\n\t\t# \treward -= 10\n\n\t\t#################################################################################\n\t\t'''\n\t\tExample of using distance from the center\n\t\t'''\n\t\t# Penalize if the car is too far away from the center\n\t\tmarker_1 = 0.1 * track_width\n\t\tmarker_2 = 0.5 * track_width\n\n\t\tif distance_from_center <= marker_1:\n\t\t reward += 10.0\n\t\telif distance_from_center <= marker_2:\n\t\t reward += 5\n\t\telse:\n\t\t reward -= 2 # likely crashed/ close to off track\n\n\t\t##################################################################################\n\t\t# #Example of using steering angle\n\t\t# # Penalize if car steer too much to prevent zigzag\n\t\t# STEERING_THRESHOLD = 20.0\n\t\t# if steering > STEERING_THRESHOLD:\n\t\t# reward -= 20\n\n\t\t#############################################################################\n\t\t# '''\n\t\t# #Example of using steps and progress\n\t\t# '''\n\t\t# # Total num of steps we want the car to finish the lap, it will vary depends on the track length\n\t\t# TOTAL_NUM_STEPS = 300\n\t\t#\n\t\t# # Give additional reward if the car pass every 100 steps faster than expected\n\t\t# if (steps % 100) == 0 and progress > (steps / TOTAL_NUM_STEPS) :\n\t\t# \treward += 10.0\n\n\t\t#############################################################################\n\t\t# '''\n\t\t# #Example of using track width\n\t\t# '''\n\t\t# # Calculate the distance from each border\n\t\t# distance_from_border = 0.5 * track_width - distance_from_center\n\t\t#\n\t\t# # Reward higher if the car stays inside the track borders\n\t\t# if distance_from_border >= 0.05:\n\t\t# \treward += 10.0\n\t\t# else:\n\t\t# \treward += 1e-3 # Low reward if too close to the border or goes off the track\n\n\t\t############################################################################\n\t\t'''\n\t\t# Kevin's code\n\t\t'''\n\t\t# Reward finish the lap\n\t\tif progress >= 1:\n\t\t reward += progress * 10\n\n\n\treturn reward\n","sub_path":"0621/aws_deepracer_clone0620_fast.py","file_name":"aws_deepracer_clone0620_fast.py","file_ext":"py","file_size_in_byte":5278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"43417658","text":"# write_FBA_USGS_WU_Coef.py (flowsa)\n# !/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"\nAnimal Water Use coefficients data obtained from: USGS Publication (Lovelace, 2005)\n\nData output saved as csv, retaining assigned file name \"USGS_WU_Coef_Raw.csv\"\n\"\"\"\n\nfrom flowsa.common import *\nimport pandas as pd\nfrom flowsa.flowbyactivity import store_flowbyactivity\nfrom flowsa.flowbyfunctions import add_missing_flow_by_fields\n\n\n# 2012--2018 fisheries data at state level\ncsv_load = datapath + \"USGS_WU_Coef_Raw.csv\"\n\n\nif __name__ == '__main__':\n # Read directly into a pandas df\n df_raw = pd.read_csv(csv_load)\n\n # rename columns to match flowbyactivity format\n df = df_raw.copy()\n df = df.rename(columns={\"Animal Type\": \"ActivityConsumedBy\",\n \"WUC_Median\": \"FlowAmount\",\n \"WUC_Minimum\": \"Min\",\n \"WUC_Maximum\": \"Max\"\n })\n\n # drop columns\n df = df.drop(columns=[\"WUC_25th_Percentile\", \"WUC_75th_Percentile\"])\n\n # hardcode data\n df[\"Class\"] = \"Water\"\n df[\"SourceName\"] = \"USGS_WU_Coef\"\n df[\"Location\"] = US_FIPS\n df['LocationSystem'] = \"FIPS_2015\" # state FIPS codes have not changed over last decade\n df['Year'] = 2005\n df[\"Unit\"] = \"gallons/animal/day\"\n\n # add missing dataframe fields (also converts columns to desired datatype)\n flow_df = add_missing_flow_by_fields(df, flow_by_activity_fields)\n parquet_name = 'USGS_WU_Coef_2005'\n store_flowbyactivity(flow_df, parquet_name)\n","sub_path":"scripts/write_FBA_USGS_WU_Coef.py","file_name":"write_FBA_USGS_WU_Coef.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"378989297","text":"from base64 import b64encode\nfrom collections import OrderedDict\nfrom hashlib import sha256\nfrom hmac import HMAC\nfrom urllib.parse import urlparse, parse_qsl, urlencode\n\nclient_secret = \"SOVnLdXcGDaVAZDMm594\" # \"2oLcfunEVTYukWTmitZK\" # Защищённый ключ\n\n\ndef valid_user_id(url) -> dict:\n\t\"\"\"\n\tПроверить валидность УРЛь и выдать пользовательский ID\n\t:param url: Урль\n\t:return: словарь с ключами valid и user_id\n\t\"\"\"\n\tglobal client_secret\n\tquery = dict(parse_qsl(urlparse(url).query, keep_blank_values=True))\n\tsecret = client_secret\n\tvalid, user_id = False, -1\n\ttry:\n\t\tvk_subset = OrderedDict(sorted(x for x in query.items() if x[0][:3] == \"vk_\"))\n\t\thash_code = b64encode(HMAC(secret.encode(), urlencode(vk_subset, doseq=True).encode(), sha256).digest())\n\t\tdecoded_hash_code = hash_code.decode('utf-8')[:-1].replace('+', '-').replace('/', '_')\n\t\tvalid = query[\"sign\"] == decoded_hash_code\n\t\tuser_id = query[\"vk_user_id\"]\n\texcept:\n\t\tpass\n\n\treturn {\"valid\": valid, \"user_id\": user_id}\n","sub_path":"sign_valid.py","file_name":"sign_valid.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"129948003","text":"from flask_restful import Resource, reqparse\nfrom flask_jwt_extended import (\n jwt_required, \n get_jwt_claims, \n jwt_optional, \n get_jwt_identity,\n fresh_jwt_required\n)\nfrom models.categorymodel import CategoryModel\n\n#create a Product model class to represent a Product and its operations\nclass Category(Resource):\n\n @jwt_required\n def get(self, name):\n category = CategoryModel.getCategoryByName(name)\n if category:\n return category.json()\n return {\"message\" : \"Category by the name {name} not found!!!\".format(name=name)}, 404\n\n @fresh_jwt_required\n def post(self, name):\n # check if the category exists\n if CategoryModel.getCategoryByName(name):\n return {\"message\" : \"Category by the name {name} already exists, select a new name!!!\".format(name=name)}, 400\n\n # otherwise insert the new category\n category = CategoryModel(name)\n try:\n category.Save()\n returnMessage = \"Congrats, Category by the name : {name} has been ADDED.\".format(name=name)\n return {\"message\" : returnMessage }\n except:\n return {\"message\" : \"Sorry!!!, The Category by the name {name} could not be sucessfully ADDED!!!\".format(name=name)}, 500 \n # return category.json(), 201\n\n @jwt_required\n def put(self, name):\n category = CategoryModel.getCategoryByName(name)\n\n if category:\n try:\n category.name = name\n category.Save()\n returnMessage = \"Congrats, Category by the name : {name} has been UPDATED.\".format(name=name)\n return {\"message\" : returnMessage }\n except:\n return {\"message\" : \"Sorry!!!, The Category by the name {name} could not be sucessfully UPDATED!!!\".format(name=name)}, 500 \n else:\n try:\n category = CategoryModel(name)\n category.Save()\n returnMessage = \"Congrats, Category by the name : {name} has been ADDED.\".format(name=name)\n return {\"message\" : returnMessage }\n except:\n return {\"message\" : \"Sorry!!!, The Category by the name {name} could not be sucessfully ADDED!!!\".format(name=name)}, 500 \n \n # return category.json()\n \n @jwt_required\n def delete(self, name):\n claims = get_jwt_claims()\n if not claims['isadmin']:\n return {'message' : 'Sorry, you need an administrator priviledge to delete a category.'}\n \n category = CategoryModel.getCategoryByName(name)\n if category:\n category.Delete()\n return {\"message\" : \"Category by the name {name} DELETED!!!\".format(name=name)}\n return {\"message\" : \"Category by the name {name} can not be found!!!\".format(name=name)}, 404\n\n\n\n#create a Categories model class to represent list of Categories and its operations\nclass Categories(Resource):\n @jwt_optional\n def get(self):\n userid = get_jwt_identity()\n # using a map function with lambda\n # categories = list(map(lambda category: category.json(), CategoryModel.query.all()))\n categories = list([x.json() for x in CategoryModel.getAll()])\n if userid:\n return {\n \"categories\" : categories\n }, 200\n categories = list([x.name for x in CategoryModel.getAll()])\n return {\n \"categories\" : categories,\n \"message\" : \"Detailed info only after you login.\"\n }, 200\n\n\n\n \n ","sub_path":"Python 201/014-RESTfulAPISecurityExtended/resources/categoryresources.py","file_name":"categoryresources.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"41204495","text":"#! /usr/bin/env python\r\nfrom __future__ import print_function\r\nfrom sphinxcontrib import mat_documenters as doc\r\nimport json\r\nimport os\r\nimport sys\r\n\r\nDIRNAME = doc.MatObject.basedir = os.path.abspath(os.path.dirname(__file__))\r\n\r\ndef test_matlabify_class():\r\n \"\"\"\r\n test matlabify classes\r\n \"\"\"\r\n # test module\r\n m = doc.MatObject.matlabify('test_data')\r\n assert isinstance(m, doc.MatModule)\r\n assert m.getter('__name__') == 'test_data'\r\n assert m.getter('__path__')[0] == os.path.join(DIRNAME, 'test_data')\r\n assert m.getter('__file__') == os.path.join(DIRNAME, 'test_data')\r\n assert m.getter('__package__') == 'test_data'\r\n assert not m.getter('__doc__')\r\n assert m.getter('__name__') in sys.modules\r\n # test superclass\r\n my_cls = m.getter('MyHandleClass')\r\n assert isinstance(my_cls, doc.MatClass)\r\n assert my_cls.getter('__name__') == 'MyHandleClass'\r\n assert my_cls.getter('__module__') == 'test_data'\r\n assert my_cls.bases == ['handle', 'my.super.Class']\r\n assert my_cls.attrs == {}\r\n assert my_cls.properties == {'x': {'attrs': {}, 'default': None, 'docstring': ' a property'}}\r\n assert my_cls.getter('__doc__') == ' a handle class\\n\\n :param x: a variable\\n'\r\n x = my_cls.getter('x')\r\n # test cls attr\r\n my_abc = m.getter('MyAbstractClass')\r\n assert isinstance(my_abc, doc.MatClass)\r\n assert my_abc.getter('__name__') == 'MyAbstractClass'\r\n assert my_abc.getter('__module__') == 'test_data'\r\n assert my_abc.bases == ['MyHandleClass', 'MyClass']\r\n assert my_abc.attrs == {'Abstract': True, 'Sealed': True}\r\n assert my_abc.properties == {'y': {'default': None,\r\n 'docstring': ' y variable',\r\n 'attrs': {'GetAccess': 'private', 'SetAccess': 'private'}},\r\n 'version': {'default': \"'0.1.1-beta'\",\r\n 'docstring': ' version',\r\n 'attrs': {'Constant': True}}}\r\n assert my_abc.getter('__doc__') == ' an abstract class\\n\\n :param y: a variable\\n :type y: double\\n'\r\n y = my_abc.getter('y')\r\n version = my_abc.getter('version')\r\n return m, my_cls, x, my_abc, y, version\r\n\r\ndef test_function():\r\n \"\"\"\r\n test matlabify function\r\n \"\"\"\r\n # test function\r\n m = doc.MatObject.matlabify('test_data')\r\n assert isinstance(m, doc.MatModule)\r\n myfun = m.getter('myfun')\r\n assert isinstance(myfun, doc.MatFunction)\r\n assert myfun.getter('__name__') == 'myfun'\r\n assert myfun.retv == ['o1', 'o2', 'o3']\r\n assert myfun.args == ['a1', 'a2']\r\n assert myfun.docstring == \" a fun function\\n\\n :param a1: the first input\\n :param a2: another input\\n :returns: ``[o1, o2, o3]`` some outputs\\n\"\r\n return myfun\r\n\r\n\r\ndef test_method():\r\n \"\"\"\r\n test matlabify methods\r\n \"\"\"\r\n # test function\r\n m = doc.MatObject.matlabify('test_data')\r\n assert isinstance(m, doc.MatModule)\r\n my_cls_meth = m.getter('MyClass')\r\n assert isinstance(my_cls_meth, doc.MatClass)\r\n assert my_cls_meth.getter('__name__') == 'MyClass'\r\n assert my_cls_meth.docstring == \" test class methods\\n\\n :param a: the input to :class:`MyClass`\\n\"\r\n constructor = my_cls_meth.getter('MyClass')\r\n assert isinstance(constructor, doc.MatMethod)\r\n assert constructor.getter('__name__') == 'MyClass'\r\n mymethod = my_cls_meth.getter('mymethod')\r\n assert isinstance(mymethod, doc.MatMethod)\r\n assert mymethod.getter('__name__') == 'mymethod'\r\n assert mymethod.args == ['obj', 'b']\r\n assert mymethod.retv == ['c']\r\n assert mymethod.docstring == \" a method in :class:`MyClass`\\n\\n :param b: an input to :meth:`mymethod`\\n\"\r\n return my_cls_meth, constructor, mymethod\r\n\r\nif __name__ == '__main__':\r\n m, my_cls, x, my_abc, y, version = test_matlabify_class()\r\n\r\n print('\\nmodule: %s' % m)\r\n print('docstring:\\n%s' % m.getter('__doc__'))\r\n\r\n print('\\nclass: %s' % my_cls)\r\n print('bases: %s' % my_cls.bases)\r\n print('class attributes: %s' % my_cls.attrs)\r\n print('properties:\\n')\r\n print(json.dumps(my_cls.properties, indent=2, sort_keys=True))\r\n print('docstring:\\n%s' % my_cls.getter('__doc__'))\r\n\r\n print('\\nx property: %s' % x)\r\n print('x default: %s' % x.default)\r\n print('x docstring: %s' % x.__doc__)\r\n print('x attrs: %s' % x.attrs)\r\n\r\n print('\\nclass: %s' % my_abc)\r\n print('bases: %s' % my_abc.bases)\r\n print('class attributes: %s' % my_abc.attrs)\r\n print('properties:\\n')\r\n print(json.dumps(my_abc.properties, indent=2, sort_keys=True))\r\n print('docstring:\\n%s' % my_abc.docstring)\r\n\r\n print('\\ny property: %s' % y)\r\n print('y docstring: %s' % y.__doc__)\r\n print('y default: %s' % y.default)\r\n print('y attrs: %s' % y.attrs)\r\n print('version property: %s' % version)\r\n print('version default: %s' % version.default)\r\n print('version docstring: %s' % version.__doc__)\r\n print('version attrs: %s' % version.attrs)\r\n print('\\n')\r\n\r\n myfun = test_function()\r\n print('function: %s' % myfun)\r\n print('returns: %s' % myfun.retv)\r\n print('name: %s' % myfun.getter('__name__'))\r\n print('args: %s' % myfun.args)\r\n print('docstring:\\n%s' % myfun.getter('__doc__'))\r\n\r\n my_cls_meth, constructor, mymethod = test_method()\r\n print(my_cls_meth)\r\n print(constructor)\r\n print(mymethod)\r\n","sub_path":"tests/test_matlabify.py","file_name":"test_matlabify.py","file_ext":"py","file_size_in_byte":5367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"81570972","text":"#\n# File config.py\n#\n#\tInitializes application\n#\n\nimport datetime\n\nWTF_CSRF_ENABLED = True\nSECRET_KEY = 'you-will-never-guess'\n\n# create log file for each execution of application\nnow = datetime.datetime.now()\ntimeString = now.strftime(\"%Y-%m-%d_%H:%M\")\nlog_filename = \"log/\" + timeString + \".log\"\nlog_file = open(log_filename, 'w')\nheader = \"Output logging started: \" + timeString + \"\\n\\n\"\nlog_file.write(header)\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"629113406","text":"#!/usr/bin/env python -u\n# -*- coding: utf-8 -*-\n\"\"\"\nParse MediaWiki XML Dump\n\"\"\"\n\nfrom __future__ import print_function\n\nimport argparse\nimport traceback\n\nfrom epedia import EpediaDumpParser\n\n\ndef report(dp):\n print(\"error at bzip2 byte: %d\" % dp.bytes_read)\n print(traceback.format_exc())\n\n\ndef main(fname, dest, expat, interval, limit, offset, titles):\n dp = EpediaDumpParser(fname, dest, expat, interval, limit, offset, titles)\n try:\n dp.parse_bzip()\n except KeyboardInterrupt:\n print(\" ABORT\")\n except Exception:\n report(dp)\n\nif __name__ == \"__main__\":\n desc = \"Parse MediaWiki XML Dump\"\n argp = argparse.ArgumentParser(description=desc)\n argp.add_argument(\"fname\", help=\"MediaWiki XML Dump (bzip2)\")\n argp.add_argument(\"-d\", \"-dest\", help=\"write (gzip) parts to dest\")\n argp.add_argument(\"-e\", \"-expat\", action='store_true', help=\"use expat\")\n argp.add_argument(\"-i\", \"-interval\", type=int,\n default=EpediaDumpParser.DEFAULT_INTERVAL,\n help=\"page report interval\")\n argp.add_argument(\"-l\", \"-limit\", type=int,\n default=EpediaDumpParser.DEFAULT_LIMIT,\n help=\"page limit\")\n argp.add_argument(\"-o\", \"-offset\", type=int,\n default=EpediaDumpParser.DEFAULT_OFFSET,\n help=\"bzip2 byte offset\")\n argp.add_argument(\"-t\", \"-tfile\", help=\"titles file\")\n args = argp.parse_args()\n\n main(args.fname, args.d, args.e, args.i, args.l, args.o, args.t)\n","sub_path":"dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"240081902","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\naddress = \"http://py4e-data.dr-chuck.net/known_by_Kyra.html\"\nfor i in range(7):\n html = urlopen(address, context=ctx).read()\n soup = BeautifulSoup(html, \"html.parser\")\n\n # Retrieve all of the anchor tags\n tags = soup('a')\n address = soup('a')[17].get('href',None)\nprint(tags[17].contents[0])","sub_path":"Using Python to Access Web Data/week3/week3.py","file_name":"week3.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"343679428","text":"## Script (Python) \"resposta_executivo_pysc\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=cod_materia_respondida\n##title=\n##\nimport simplejson as json\n\nREQUEST = context.REQUEST\nRESPONSE = REQUEST.RESPONSE\nsession = REQUEST.SESSION\n\nusuario = REQUEST['AUTHENTICATED_USER'].getUserName()\n\n\ndef obter_materia(cod_materia_respondida):\n for materia in context.zsql.materia_obter_zsql(cod_materia=cod_materia_respondida):\n for autor in context.zsql.autoria_obter_zsql(cod_materia=materia.cod_materia, ind_excluido=0):\n autoria = autor.nom_autor_join \n id_materia = 'Resposta - ' + materia.sgl_tipo_materia + ' ' + str(materia.num_ident_basica) + '/' + str(materia.ano_ident_basica) +' - '+ autoria \n\n return id_materia.encode('utf-8')\n\ndef numero_protocolo():\n if context.sapl_documentos.props_sagl.numero_protocolo_anual == 1:\n for numero in context.zsql.protocolo_numero_obter_zsql(ano_protocolo = DateTime().strftime('%Y')):\n hdn_num_protocolo = int(numero.novo_numero)\n else:\n for numero in zsql.protocolo_codigo_obter_zsql():\n hdn_num_protocolo = int(numero.novo_codigo)\n\n return hdn_num_protocolo\n\n\ndef criar_protocolo(cod_materia_respondida):\n login_usuario = ''\n cod_usuario = ''\n for usuario in context.zsql.usuario_obter_zsql(col_username=REQUEST['AUTHENTICATED_USER'].getUserName()):\n if usuario.cod_usuario:\n login_usuario = usuario.col_username\n cod_usuario = usuario.cod_usuario\n\n autor = ''\n nome_autor = ''\n for autor in context.zsql.autor_obter_zsql(col_username=REQUEST['AUTHENTICATED_USER'].getUserName()):\n autor = autor.cod_autor\n nome_autor = autor.nom_autor_join\n\n for tipo_doc in context.zsql.tipo_documento_obter_zsql(ind_excluido=0):\n if 'Ofício / Resposta' == tipo_doc.des_tipo_documento:\n tip_doc = int(tipo_doc.tip_documento)\n else:\n tip_doc = 9 \n\n txt_ementa = obter_materia(cod_materia_respondida)\n\n context.zsql.protocolo_legislativo_incluir_zsql(num_protocolo=numero_protocolo(), tip_protocolo=0, tip_processo=1, tip_natureza_materia=3, tip_materia = tip_doc, cod_materia_principal = cod_materia_respondida, txt_assunto_ementa = txt_ementa, cod_autor=autor, txt_user_protocolo=login_usuario, txt_observacao=context.pysc.get_ip())\n \n for codigo in context.zsql.protocolo_incluido_codigo_obter_zsql():\n cod_protocolo = int(codigo.cod_protocolo)\n id_documento = str(cod_protocolo)+'_protocolo.pdf'\n\n for protocolo in context.zsql.protocolo_obter_zsql(cod_protocolo=cod_protocolo): \n num_protocolo = str(protocolo.num_protocolo) +'/'+ str(protocolo.ano_protocolo)\n #context.sapl_documentos.protocolo.manage_addFile(id=id_documento,file=REQUEST.form['file'])\n\n return criar_documento(tip_doc, txt_ementa, nome_autor, cod_materia_respondida, cod_protocolo, num_protocolo, cod_usuario)\n\n\ndef criar_documento(tip_doc, txt_ementa, nom_autor, cod_materia_respondida, cod_protocolo, num_protocolo, cod_usuario):\n\n context.zsql.documento_acessorio_incluir_zsql(tip_documento = tip_doc, nom_documento = txt_ementa, nom_autor_documento = nome_autor, cod_materia = cod_materia_respondida, num_protocolo = num_protocolo, dat_documento = DateTime().strftime('%d/%m/%Y %H:%M:%S'), ind_publico=1)\n \n for codigo in context.zsql.documento_acessorio_incluido_codigo_obter_zsql():\n cod_documento = int(codigo.cod_documento)\n id_documento = str(cod_documento)+'.pdf'\n\n for documento in context.zsql.documento_acessorio_obter_zsql(cod_documento=cod_documento):\n context.sapl_documentos.materia.manage_addFile(id=id_documento,file=REQUEST.form['file'])\n\n if context.dbcon_logs:\n context.zsql.logs_registrar_zsql(usuario = REQUEST['AUTHENTICATED_USER'].getUserName(), data = DateTime().strftime('%Y-%m-%d %H:%M:%S'), modulo = 'documento_acessorio_materia', metodo = 'resposta_executivo_pysc', cod_registro = cod_documento, IP = context.pysc.get_ip())\n\n return tramitar_materia(cod_documento, cod_materia, cod_protocolo, num_protocolo, cod_usuario)\n\n\ndef tramitar_materia(cod_documento, cod_materia, cod_protocolo, num_protocolo, cod_usuario):\n\n for tramitacao in context.zsql.tramitacao_obter_zsql(cod_materia=cod_materia, ind_ult_tramitacao=1, ind_excluido=0):\n context.zsql.tramitacao_ind_ultima_atualizar_zsql(cod_materia = cod_materia, cod_tramitacao = tramitacao.cod_tramitacao, ind_ult_tramitacao = 0) \n context.zsql.tramitacao_registrar_recebimento_zsql(cod_tramitacao = tramitacao.cod_tramitacao, cod_usuario_corrente = cod_usuario)\n context.pysc.atualiza_indicador_tramitacao_materia_pysc(cod_materia = tramitacao.cod_materia, cod_status = 1056) \n\n hr_tramitacao = DateTime().strftime('%d/%m/%Y %H:%M:%S')\n txt_tramitacao = '

Resposta eletrônica recebida em ' + hr_tramitacao + ' sob protocolo nº ' + str(hdn_num_protocolo) + '/' + DateTime().strftime(\"%Y\") +'

'\n context.zsql.tramitacao_incluir_zsql(cod_materia=cod_materia, dat_tramitacao=DateTime().strftime('%Y-%m-%d %H:%M:%S'), cod_unid_tram_local=4, cod_usuario_local=cod_usuario, cod_unid_tram_dest=19, dat_encaminha=DateTime().strftime('%Y-%m-%d %H:%M:%S'), cod_status=1056, ind_urgencia=0, txt_tramitacao = txt_tramitacao, ind_ult_tramitacao=1)\n\n for tramitacao in context.zsql.tramitacao_obter_zsql(cod_materia=cod_materia, ind_ult_tramitacao=1, ind_excluido=0):\n context.pysc.envia_tramitacao_autor_pysc(cod_materia = materia.cod_materia)\n context.pysc.envia_acomp_materia_pysc(cod_materia = materia.cod_materia) \n hdn_url = context.portal_url() + ''\n context.relatorios.pdf_tramitacao_preparar_pysc(hdn_cod_tramitacao = tramitacao.cod_tramitacao, hdn_url = hdn_url)\n if context.dbcon_logs:\n context.zsql.logs_registrar_zsql(usuario = REQUEST['AUTHENTICATED_USER'].getUserName(), data = DateTime().strftime('%Y-%m-%d %H:%M:%S'), modulo = 'tramitacao_materia', metodo = 'resposta_executivo_pysc', cod_registro = tramitacao.cod_tramitacao, IP = context.pysc.get_ip())\n\n return criar_reposta(cod_protocolo, num_protocolo)\n\n\ndef criar_reposta(cod_protocolo, num_protocolo):\n resposta = []\n dic_resposta = {}\n dic_resposta['status'] = 'SUCESSO'\n dic_resposta['usuario'] = usuario\n dic_resposta['numero_protocolo'] = str(num_protocolo)\n dic_resposta['codigo'] = str(cod_protocolo)\n dic_resposta['data_protocolo'] = DateTime().strftime('%d/%m/%Y %H:%M:%S')\n dic_resposta['ip_origem'] = context.pysc.get_ip()\n resposta.append(dic_resposta)\n retorno = json.dumps(resposta) \n return json.loads(retorno)\n\nreturn criar_protocolo(cod_materia_respondida)\n","sub_path":"branches/4.0_buildout/openlegis/sagl/skins/cadastros/protocolo/resposta_executivo_pysc.py","file_name":"resposta_executivo_pysc.py","file_ext":"py","file_size_in_byte":6819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"610734136","text":"import pandas as pd\nimport pretty_midi\nfrom midiutil import MIDIFile\n\ndef read_notes():\n notes = pd.read_csv('notes.csv', encoding='utf-8')\n note_list = notes[\"note\"].values.tolist()\n stringToAdd = \"4\"\n note_list_appended = [x + stringToAdd for x in note_list]\n print(note_list_appended)\n return note_list_appended\n\ndef convert_notes():\n # Iterate over note names, which will be converted to note number later\n note_list = []\n for note_name in read_notes():\n # Retrieve the MIDI note number for this note name\n note_number = pretty_midi.note_name_to_number(note_name)\n note_list.append(note_number)\n return note_list\n\n\ndef convert_midi():\n note_list = convert_notes()\n track = 0\n channel = 0\n time = 0 # In beats\n duration = 0.0625 # In beats\n tempo = 60 # In BPM\n volume = 100 # 0-127, as per the MIDI standard\n gap = 1\n\n MyMIDI = MIDIFile(1) # One track, defaults to format 1 (tempo track\n # automatically created)\n MyMIDI.addTempo(track, time, tempo)\n\n for pitch in note_list:\n MyMIDI.addNote(track, channel, pitch, time, duration, volume)\n time = time + gap\n\n with open(\"major-scale.mid\", \"wb\") as output_file:\n MyMIDI.writeFile(output_file)\n\n\n\nconvert_midi()","sub_path":"notes-to-midi.py","file_name":"notes-to-midi.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"474777043","text":"__author__ = 'danielebrandimarte'\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.core.exceptions import PermissionDenied\nimport json\nfrom notes.utils import get_parent\nfrom notes.forms import NoteForm\nfrom notes.models import Note, Section\nfrom front_end.decorators import frontend_login_required\n\n@frontend_login_required\ndef add_note(request, type, section, object_id, fund_index_pk, is_investd=0):\n if not request.user.is_staff:\n raise PermissionDenied('Only staff users can perform this action')\n section = get_object_or_404(Section, code=section)\n obj = get_parent(type, object_id)\n\n if request.method == 'POST':\n form = NoteForm(request.POST)\n if form.is_valid():\n note = form.save(commit=False)\n note.content_object = obj\n note.section = section\n if form.cleaned_data['assessment_type'] == 'None':\n form.cleaned_data['assessment_type'] == ''\n note.save()\n response = {'response': 'success', 'data': {'note_id': note.id}, 'errors': {}}\n return HttpResponse(json.dumps(response), content_type='application/json')\n response = {'response': 'failure', 'data': {}, 'errors': {}}\n return HttpResponse(json.dumps(response), content_type='application/json')\n\n@frontend_login_required\ndef del_note(request, note_id, fund_index_pk):\n if not request.user.is_staff:\n raise PermissionDenied('Only staff users can perform this action')\n note = get_object_or_404(Note, pk=note_id)\n\n if request.method == 'POST':\n note.delete()\n response = {'response': 'success', 'data': {}, 'errors': {}}\n return HttpResponse(json.dumps(response), content_type='application/json')\n response = {'response': 'failure', 'data': {}, 'errors': {}}\n return HttpResponse(json.dumps(response), content_type='application/json')\n\n@frontend_login_required\ndef update_note(request, note_id, fund_index_pk):\n if not request.user.is_staff:\n raise PermissionDenied('Only staff users can perform this action')\n\n note = get_object_or_404(Note, pk=note_id)\n obj = note.content_object\n\n if request.method == 'POST':\n form = NoteForm(request.POST, instance=note)\n\n if form.is_valid():\n if form.cleaned_data['review_lvl'] is not 'None':\n form.save()\n response = {'response': 'success', 'data': {}, 'errors': {}}\n return HttpResponse(json.dumps(response), content_type='application/json')\n\n response = {'response': 'failure', 'data': {}, 'errors': {}}\n return HttpResponse(json.dumps(response), content_type='application/json')","sub_path":"notes/ajax_views.py","file_name":"ajax_views.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"433013296","text":"from flask import request\nfrom db.connection import db_session\nimport json\nfrom os import listdir\nfrom os.path import isfile, join\nfrom random import *\nfrom func.wikipedia_crawler import *\nfrom db.models import Question\nimport requests\nfrom bs4 import BeautifulSoup\nfrom sqlalchemy import or_, desc, asc\nimport time\nimport pandas as pd\n\ndoc_path = './static/docs/'\nwikipedia_url = 'https://fa.wikipedia.org/wiki/'\n\nsentences = []\n\n\ndef preprocess():\n global sentences\n sentences = []\n for i in range(10):\n title = random_title()\n page_url = wikipedia_url + title.replace(' ', '_')\n sumarry = get_page_sumarries(title)[0][1]\n s = sumarry.replace('\\n', '.').split('.')\n s = list(filter(None, sentences))\n sentences.append([sample(s, 1)[0], page_url])\n\n\ndef paragraph():\n files = [f for f in listdir(doc_path) if isfile(join(doc_path, f))]\n # shuffle(files)\n with open(doc_path + sample(files, 1)[0]) as f:\n ps = f.read()\n return json.dumps({\n 'success': True,\n 'result': sample(ps.split('\\n\\n'), 1)[0]\n })\n\n\ndef sentence():\n filenames = ['./data/sentences_peykaregan_train.csv', './data/sentences_UPDT_train.csv']\n df = pd.read_csv(choice(filenames), lineterminator='\\n')\n s = df.sample(n=1)['Sentence'].values[0].replace('\\u200c', ' ')\n # title = random_title()\n # page_url = wikipedia_url + title.replace(' ', '_')\n # sumarry = get_page_sumarries(title)[0][1]\n # sentences = sumarry.replace('\\n', '.').split('.')\n # sentences = list(filter(None, sentences))\n # s = sample(sentences, 1)[0]\n return json.dumps({\n 'success': True,\n 'result': {\n 'page_url': '',\n 'sentence': s\n }\n })\n\n\ndef add_question(uid):\n if 'sentence' in request.json and 'questions' in request.json:\n for q in request.json['questions']:\n if 'text' not in q:\n return json.dumps({\n 'success': False,\n 'error': 'BAD_REQUEST'\n })\n questions = []\n result = []\n for q in request.json['questions']:\n question = Question()\n question.sentence = request.json['sentence']\n question.text = q['text']\n if 'answer' in q:\n question.answer = q['answer']\n question.user_id = uid\n questions.append(question)\n db_session.add(question)\n db_session.commit()\n for question in questions:\n result.append(question.toJSON())\n return json.dumps({\n 'success': True,\n 'result': result\n })\n return json.dumps({\n 'success': False,\n 'error': 'BAD_REQUEST'\n })\n\n\ndef search():\n if 'limit' not in request.args or 'offset' not in request.args:\n return json.dumps({\n 'success': False,\n 'error': 'BAD_REQUEST'\n })\n questions = Question.query.filter().order_by(desc(Question.id))\\\n .limit(request.args['limit']).offset(request.args['offset']).all()\n return json.dumps({\n 'success': True,\n 'result': [question.toJSON() for question in questions]\n })\n\n","sub_path":"func/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"103253753","text":"# from analysis.data import root_path\n# from docx import Document\n# from docx.shared import Pt, Cm, Inches\n\n# def set_col_widths(t):\n# widths = (Inches(1), Inches(1), Inches(0.2), Inches(1), Inches(1))\n# for row in t.rows:\n# for idx, width in enumerate(widths):\n# row.cells[idx].width = width\n#\n# def table():\n#\n# doc = Document()\n#\n# font = doc.styles['Normal'].font\n# font.name = 'Arial'\n# font.size = Pt(10)\n#\n# heading = str('How do you change column widths?')\n# doc.add_paragraph().add_run(str(heading)).bold = True\n#\n# table = doc.add_table(rows=1, cols=5)\n# hdr_cells = table.rows[0].cells\n# hdr_cells[0].text = 'Milestone'\n# hdr_cells[1].text = 'Date'\n# hdr_cells[2].text = 'Change from Lst Qrt'\n# hdr_cells[3].text = 'Change from BL'\n# hdr_cells[4].text = 'Notes'\n#\n# # TODO specify column widths\n#\n# for i in range(0,10):\n# row_cells = table.add_row().cells\n# row_cells[0].text = 'h'\n# row_cells[1].text = 'e'\n# row_cells[2].text = 'l'\n# row_cells[3].text = 'l'\n# row_cells[4].text = 'oooo'\n#\n# #change table column width\n# # for cell in table.columns[2].cells:\n# # cell.width = Cm(4) #not working\n#\n# set_col_widths(table) #not working\n#\n# #make_rows_bold(table.rows[0]) # makes top of table bold. Found function on stack overflow.\n#\n# doc.save(root_path / 'output/table.docx')\n#\n# def make_rows_bold(*rows):\n# '''Makes text bold in specified row'''\n# for row in rows:\n# for cell in row.cells:\n# for paragraph in cell.paragraphs:\n# for run in paragraph.runs:\n# run.font.bold = True\n\nfrom docx import Document\nfrom docx.shared import Pt, Inches\n\n\ndef set_col_widths(t):\n widths = (Inches(1), Inches(1), Inches(0.2), Inches(1), Inches(1))\n for row in t.rows:\n for idx, width in enumerate(widths):\n row.cells[idx].width = width\n\n\ndef table():\n doc = Document()\n\n font = doc.styles[\"Normal\"].font\n font.name = \"Arial\"\n font.size = Pt(10)\n\n heading = \"How do you change widths?\"\n doc.add_paragraph().add_run(heading).bold = True\n\n table = doc.add_table(rows=1, cols=5)\n hdr_cells = table.rows[0].cells\n hdr_cells[0].text = 'Milestone'\n hdr_cells[1].text = 'Date'\n hdr_cells[2].text = 'Change from Lst Qrt'\n hdr_cells[3].text = 'Change from BL'\n hdr_cells[4].text = 'Notes'\n\n for i in range(0, 10):\n row_cells = table.add_row().cells\n row_cells[0].text = 'h'\n row_cells[1].text = 'e'\n row_cells[2].text = 'l'\n row_cells[3].text = 'l'\n row_cells[4].text = 'oooo'\n\n set_col_widths(table)\n\n doc.save(\"/home/will/Documents/analysis_engine/output/table.docx\")\n\n\ntable()\n\ntable()\n\n\n\n","sub_path":"project_summaries/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"137758308","text":"# -*- coding: utf-8 -*-\n# vi:si:et:sw=4:sts=4:ts=4\n\nimport asyncio\nimport logging\n\nfrom .exceptions import RejectMessage, IgnoreMessage\n\nlogger = logging.getLogger(__name__)\n\n\nclass LoaferDispatcher(object):\n def __init__(self, routes, consumers, loop, max_jobs=10):\n self.routes = routes\n self.consumers = consumers or []\n self._loop = loop\n self._semaphore = asyncio.Semaphore(max_jobs)\n self._stop_consumers = True\n\n self.consumers_sources = dict((x.source, x) for x in self.consumers)\n\n def get_consumer(self, route):\n consumer = self.consumers_sources.get(route.source)\n return consumer\n\n def translate_message(self, message, route):\n try:\n content = route.message_translator.translate(message)['content']\n except Exception as exc:\n logger.exception(exc)\n logger.error('Error translating message content')\n return None\n\n return content\n\n async def dispatch_message(self, message, route):\n logger.info('Dispatching message to route={}'.format(route))\n\n content = self.translate_message(message, route)\n if content is None:\n logger.warning('Message will be ignored:\\n{}\\n'.format(message))\n return False\n\n # Since we don't know what will happen on message handler, use semaphore\n # to protect scheduling or executing too many coroutines/threads\n with await self._semaphore:\n try:\n await route.deliver(content)\n except RejectMessage as exc:\n logger.exception(exc)\n logger.warning('Explicit message rejection:\\n{}\\n'.format(message))\n # eg, we will return True at the end\n except IgnoreMessage as exc:\n logger.exception(exc)\n logger.warning('Explicit message ignore:\\n{}\\n'.format(message))\n return False\n except asyncio.CancelledError as exc:\n msg = '\"{}\" was cancelled, the message will be ignored:\\n{}\\n'\n logger.warning(msg.format(route.message_handler, message))\n return False\n except Exception as exc:\n logger.exception(exc)\n return route.error_handler(content, exc)\n\n return True\n\n async def consume_route(self, route):\n consumer = self.get_consumer(route)\n messages = await consumer.consume()\n for message in messages:\n confirmation = await self.dispatch_message(message, route)\n if confirmation:\n await consumer.confirm_message(message)\n\n async def dispatch_consumers(self, sentinel=None):\n if sentinel is None or not callable(sentinel):\n self._stop_consumers = False\n stopper = self._default_sentinel\n else:\n stopper = sentinel\n\n while not stopper():\n tasks = [self._loop.create_task(self.consume_route(route)) for route in self.routes]\n await asyncio.wait(tasks, loop=self._loop)\n\n def _default_sentinel(self):\n return self._stop_consumers\n\n def stop_consumers(self):\n logger.info('Stopping consumers')\n self._stop_consumers = True\n","sub_path":"loafer/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"641564922","text":"# -*- coding: utf-8 -*-\n\"\"\"https://jsonlines.readthedocs.io/en/latest/\n\"\"\"\nimport jsonlines\nimport pandas as pd\nimport hubspot\nimport datetime as dt\nfrom tabulate import tabulate\n\n\ndef main():\n INP_FILE = '/home/alxfed/archive/last_deals_inspections.jl'\n OUT_FILE = '/home/alxfed/archive/inspections_notes_created.jl'\n reference_file_path = '/home/alxfed/archive/deals_downloaded.csv'\n previously_created = '/home/alxfed/archive/inspections_notes_created.jl'\n\n all_deals = pd.read_csv(reference_file_path, dtype=object)\n created_notes = pd.read_json(previously_created, lines=True, dtype=object)\n # permit, dealId, (note) id\n\n ownerId = 40202623 # Data Robot\n\n permit_inspections = ['PERMIT INSPECTION', 'BLDG_PERM IRON PERMIT INSP', 'VENT/HEAT PERMIT INSPECTION',\n 'WATER DEPT PERMIT INSPECTION', 'ELECTRICAL PERMIT INSPECTION', 'CONSTRUCTION EQUIPMENT PERMIT',\n 'PORCH/DECK PERMIT INSPECTION', 'BLDG_PERM IRON PERMIT INSP', 'BOILER PERMIT INSPECTION',\n 'DOB NEW CONSTRUCTION INSP', 'DOB PLUMBING INSPECTION', 'DOB VENT/FURNACE INSPECTION',\n 'DOB REFRIGERATION INSPECTION', 'DOB GARAGE INSPECTION',\n 'EQUIPMENT INSPECTION']\n\n with jsonlines.open(INP_FILE, mode='r') as reader:\n with jsonlines.open(OUT_FILE, mode='a') as writer:\n for line in reader:\n has_data = True\n permit = line['permit']\n created_notes_for_permits = created_notes['permit'].to_list()\n if permit in created_notes_for_permits:\n has_data = False\n print('Already created a note for permit # ', permit)\n else: # get deal parameter from the reference\n deal_line = all_deals[all_deals['permit_'] == permit]\n if deal_line.empty:\n print('No deal for permit # ', permit)\n has_data = False\n else:\n dealId = deal_line['dealId'].values[0] # 1143450728\n date = pd.to_datetime(deal_line['permit_issue_date'], infer_datetime_format=True).values[0]\n insp_table = pd.DataFrame.from_records(line['insp_table'])\n if insp_table.empty:\n print('No data about inspections for deal', dealId)\n has_data = False\n elif 'insp_date' in insp_table.keys():\n insp_table['insp_date'] = pd.to_datetime(insp_table['insp_date'], infer_datetime_format=True)\n post_permit = insp_table[insp_table['insp_date'] >= date]\n if not post_permit.empty:\n last_inspection = post_permit.iloc[0]\n last_inspection_datetime = last_inspection['insp_date']\n last_inspection_number = last_inspection['insp_n']\n last_inspection_type = last_inspection['type_desc']\n else:\n print('No inspections after permit for deal', dealId)\n has_data = False\n else:\n post_permit = insp_table\n last_inspection = post_permit.iloc[0]\n last_inspection_type = last_inspection['type_desc']\n last_inspection_number = last_inspection['insp_n']\n last_inspection_datetime = dt.datetime(year=2019, month=7, day=12, hour=0, minute=0, second=0)\n if has_data:\n hubspot_timestamp = int(last_inspection_datetime.timestamp() * 1000)\n # update the deal parameters last_inspection and last_inspection_date here\n result = hubspot.deals.update_a_deal_oauth(dealId, {'last_inspection': last_inspection_type.title(),\n 'last_inspection_date': hubspot_timestamp})\n if result:\n print('Updated a deal: ', dealId)\n else:\n print('Did not update the deal: ', dealId)\n post_permit['insp_date'] = post_permit['insp_date'].dt.strftime('%Y-%m-%d')\n note_text = post_permit.to_html(header=False, index=False)\n params = {'ownerId': ownerId, 'timestamp': hubspot_timestamp, 'dealId': dealId,\n 'note': note_text}\n created_note = hubspot.engagements.create_engagement_note(params)\n engagement = created_note['engagement']\n engagement.update({'permit': permit, 'dealId': dealId,\n 'insp_n': last_inspection_number, 'insp_date': hubspot_timestamp,\n 'insp_type': last_inspection_type})\n # transform\n writer.write(engagement)\n writer.close()\n reader.close()\n return\n\n'''\nperm_table = pd.DataFrame.from_records(line['perm_table'])\nperm_table['perm_date'] = pd.to_datetime(perm_table['perm_date'], infer_datetime_format=True)\npermeat = perm_table.loc[perm_table['permit_n'] == permit]\nif permeat.empty:\n print('No such permit on page ', permit)\n break\ndate = permeat['perm_date'].values[0] \n'''\n\nif __name__ == '__main__':\n main()\n print('main - done')","sub_path":"create_inspection_notes_from_jl.py","file_name":"create_inspection_notes_from_jl.py","file_ext":"py","file_size_in_byte":5790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"506526178","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nfrom absl import logging\nfrom absl import app\n\nimport os\nimport torch\nimport argparse\nimport nfsp_arm\nimport numpy as np\nfrom open_spiel.python import policy\nfrom open_spiel.python import rl_environment\nfrom open_spiel.python.algorithms import exploitability\nfrom utils.exper_logger import Logger\n\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass NFSPPolicies(policy.Policy):\n \"\"\"Joint policy to be evaluated.\"\"\"\n\n def __init__(self, env, nfsp_policies, mode):\n game = env.game\n player_ids = [0, 1]\n super(NFSPPolicies, self).__init__(game, player_ids)\n self._policies = nfsp_policies\n self._mode = mode\n self._obs = {\"info_state\": [None, None], \"legal_actions\": [None, None]}\n\n def action_probabilities(self, state, player_id=None):\n cur_player = state.current_player()\n legal_actions = state.legal_actions(cur_player)\n\n self._obs[\"current_player\"] = cur_player\n self._obs[\"info_state\"][cur_player] = (\n state.information_state_tensor(cur_player))\n self._obs[\"legal_actions\"][cur_player] = legal_actions\n\n info_state = rl_environment.TimeStep(\n observations=self._obs, rewards=None, discounts=None, step_type=None)\n\n with self._policies[cur_player].temp_mode_as(self._mode):\n p = self._policies[cur_player].step(info_state, is_evaluation=True).probs\n prob_dict = {action: p[action] for action in legal_actions}\n return prob_dict\n\ndef main():\n parser = argparse.ArgumentParser(description=\"NFSP LONR in kuhn args.\")\n parser = argparse.ArgumentParser(\"NFSP LONR in kuhn args.\")\n parser.add_argument('--seed', type=int, default=int(0), help=\"random seed\")\n parser.add_argument('--results_dir', type=str, default=\"learn_every_128\", help=\"log direction of nfsp-lonr experiments\")\n parser.add_argument('--num_train_episodes', type=int, default=int(1e7), help=\"Number of training episodes.\")\n parser.add_argument('--eval_every', type=int, default=int(10000), help=\"Episode frequency at which agents are evaluated.\")\n parser.add_argument('--hidden_layers_sizes', type=list, default=[128, ], help= \"Number of hidden units in the avg-net and Q-net.\")\n parser.add_argument('--anticipatory_param',type=float, default=0.1, help= \"Prob of using the rl best response as episode policy.\")\n parser.add_argument('--batch_size', type=int,default=int(128), help= \"Number of transitions to sample at each learning step.\" )\n parser.add_argument('--learn_every', type=int,default=int(128), help=\"Number of steps between learning updates.\")\n\n parser.add_argument('--replay_buffer_capacity', type=int, default=int(2e5), help=\"replay_buffer_capacity\")\n parser.add_argument('--reservoir_buffer_capacity', type=int, default=int(2e6), help= \"Size of the reservoir buffer.\")\n parser.add_argument('--sl_learning_rate', type=float,default=0.001, help=\"Learning rate for avg-policy sl network.\")\n parser.add_argument('--rl_q_learning_rate', type=float,default=1e-3, help=\"Learning rate for inner rl q network learning rate.\")\n parser.add_argument('--rl_v_learning_rate', type=float,default=1e-3, help=\"Learning rate for inner rl pi network learning rate.\")\n parser.add_argument('--discount_factor', type=float, default=1.0, help=\"Discount factor for future rewards.\")\n \n parser.add_argument('--arm_target_step_size',type=float, default=0.01, help= \"Target value function parameters are updated via moving average with this rate.\")\n parser.add_argument('--critic_update_num', default=int(2), help=\"Number of every collected data being trained\")\n parser.add_argument('--min_buffer_size_to_learn', default=int(128), help=\"Number of samples in buffer before learning begins.\")\n parser.add_argument('--train_batch_size', type=int,default=int(64), help=\"Number of steps between learning updates.\")\n\n parser.add_argument('--optimizer_str', default=\"adam\", help=\"choose from 'adam' and 'sgd'.\")\n parser.add_argument('--use_checkpoints', default=True, help=\"Save/load neural network weights.\")\n parser.add_argument('--loss_str', default=\"mse\", help=\"choose from 'mse' and 'huber'.\")\n args = parser.parse_args()\n\n game = \"kuhn_poker\"\n num_players = 2\n\n env_configs = {\"players\": num_players}\n env = rl_environment.Environment(game, **env_configs)\n info_state_size = env.observation_spec()[\"info_state\"][0]\n num_actions = env.action_spec()[\"num_actions\"]\n\n absolute_dir = \"./kuhn_nfsp_arm\"\n # final_dir = os.path.join(absolute_dir, args.optimizer_str, args.loss_str) # 正常实验的保存路径\n final_dir = os.path.join(absolute_dir, args.results_dir) # 只有arm的保存路径\n\n logger = Logger(final_dir)\n\n checkpoint_dir=os.path.join(absolute_dir, args.results_dir, \"tmp\")\n\n env.seed(args.seed)\n torch.manual_seed(args.seed)\n torch.cuda.manual_seed_all(args.seed)\n torch.backends.cudnn.deterministic = True\n np.random.seed(args.seed)\n\n hidden_layers_sizes = [int(l) for l in args.hidden_layers_sizes]\n\n agents = [\n nfsp_arm.NFSP_ARM(device, idx, info_state_size, num_actions, hidden_layers_sizes, checkpoint_dir, args) \n for idx in range(num_players)\n ]\n expl_policies_avg = NFSPPolicies(env, agents, nfsp_arm.MODE.average_policy)\n for ep in range(args.num_train_episodes):\n if (ep + 1) % args.eval_every == 0:\n losses = [agent.loss for agent in agents]\n # print(\"Losses: \" , losses)\n expl = exploitability.exploitability(env.game, expl_policies_avg)\n print(\"Episode:\", ep + 1, \"Exploitability AVG\", expl, \"losses:\", losses)\n print(\"_____________________________________\")\n\n # logging.info(\"Losses: %s\", losses)\n # expl = exploitability.exploitability(env.game, expl_policies_avg)\n # logging.info(\"[%s] Exploitability AVG %s\", ep + 1, expl)\n # logging.info(\"_____________________________________________\")\n logger.log_performance(ep + 1, expl)\n\n time_step = env.reset()\n while not time_step.last():\n player_id = time_step.observations[\"current_player\"]\n agent_output = agents[player_id].step(time_step)\n action_list = [agent_output.action]\n time_step = env.step(action_list)\n\n for agent in agents:\n agent.step(time_step)\n logger.close_files()\n logger.plot('kuhn_nfsp_arm')\n\nif __name__ == \"__main__\":\n main()","sub_path":"nfsp_arm_example/kuhn_nfsp_arm.py","file_name":"kuhn_nfsp_arm.py","file_ext":"py","file_size_in_byte":6547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"235834852","text":"from django.test import TestCase\n\nfrom graphql_extensions.views import GraphQLView\n\n\nclass ViewsTests(TestCase):\n\n def test_format_error(self):\n error = ValueError()\n formatted_error = GraphQLView.format_error(error)\n self.assertEqual(formatted_error['code'], 'error')\n","sub_path":"tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"217719631","text":"# -*- coding utf-8 -*-\n#\n# create_dfits_dict.py: Create the Dictionary for DFITS\n#\n# Author : Tetsutaro Ueda\n# Created: 2017/11/02\n#-------------------------------- IMPORT MODULES\n#-------- Standard Modules\nfrom pathlib import Path\nfrom collections import OrderedDict\n#-------- Dependent Packages\nimport yaml\n\n#-------------------------------- DEFINE CONSTANTS\nPATH_DFITSDICT = Path('~/DESHIMA/devtools/merge_sample/dfits_dict.yaml').expanduser()\n\n#-------------------------------- CREATE DICTIONARIES\n#---------------- OBSINFO\ndef obsinfo_dict():\n hdr_vals = OrderedDict([\n ('EXTNAME', 'OBSINFO'), ('FITSTYPE', 'DESHIMAv0'), ('DDBID', None),\n ('TELESCOP', None), ('SITELON', None), ('SITELAT', None),\n ('DATE-OBS', None), ('OBSERVER', None), ('OBJECT', None),\n ('RA', None), ('DEC', None), ('EQUINOX', None), ('KIDTYPE0', 'wideband'),\n ('KIDTYPE1', 'filter'), ('KIDTYPE2', 'blind')\n ])\n hdr_coms = {\n 'EXTNAME': 'name of binary table',\n 'FITSTYPE': 'declares DESHIMA FITS',\n 'DDBID': 'ID of DESHIMA database',\n 'TELESCOP': 'name of used telescope',\n 'SITELON': 'site longitude in units of deg',\n 'SITELAT': 'site latitude in units of deg',\n 'DATE-OBS': 'YYYY-mm-ddTHH:MM:SS',\n 'OBSERVER': 'name of observer',\n 'OBJECT': 'name of observed object',\n 'RA': 'right ascension of the object in units of dec',\n 'DEC': 'declination of the object in units of deg',\n 'EQUINOX': 'equinox of coordinates',\n 'KIDTYPE0': '0 is the kid of \"wideband\"',\n 'KIDTYPE1': '1 is the kid of \"filter\"',\n 'KIDTYPE2': '2 is the kid of \"blind\"',\n 'TTYPE1': 'label for field 1',\n 'TFORM1': 'data format of field 1',\n 'TTYPE2': 'label for field 2',\n 'TFORM2': 'data format of field 2',\n 'TUNIT2': 'data unit of field 2',\n 'TTYPE3': 'label for field 3',\n 'TFORM3': 'data format of field 3',\n 'TUNIT3': 'data unit of field 3',\n 'TTYPE4': 'label for field 4',\n 'TFORM4': 'data format of field 4',\n 'TUNIT4': 'data unit of field 4',\n 'TTYPE5': 'label for field 5',\n 'TFORM5': 'data format of field 5',\n 'TUNIT5': 'data unit of field 5',\n 'TTYPE6': 'label for field 6',\n 'TFORM6': 'data format of field 6',\n 'TUNIT6': 'data unit of field 6',\n 'TTYPE7': 'label for field 7',\n 'TFORM7': 'data format of field 7',\n 'TUNIT7': 'data unit of field 7',\n 'TTYPE8': 'label for field 8',\n 'TFORM8': 'data format of field 8',\n 'TTYPE9': 'label for field 9',\n 'TFORM9': 'data format of field 9',\n 'TTYPE10': 'label for field 10',\n 'TFORM10': 'data format of field 10',\n 'TTYPE11': 'label for field 11',\n 'TFORM11': 'data format of field 11',\n 'TUNIT11': 'data unit of field 11'\n }\n col_vals = OrderedDict([\n ('pixelid', None), ('offsetaz', None), ('offsetel', None),\n ('interval', None), ('integtime', None), ('beamsize', None),\n ('gain', None), ('masterids', None), ('kidids', None),\n ('kidtypes', None), ('kidfreqs', None)\n ])\n col_form = OrderedDict([\n ('pixelid', 'K'), ('offsetaz', 'D'), ('offsetel', 'D'),\n ('interval', 'D'), ('integtime', 'D'), ('beamsize', 'D'),\n ('gain', 'D'), ('masterids', '63K'), ('kidids', '63K'),\n ('kidtypes', '63K'), ('kidfreqs', '63D')\n ])\n col_unit = OrderedDict([\n ('pixelid', None), ('offsetaz', 'deg'), ('offsetel', 'deg'),\n ('interval', 's'), ('integtime', 's'), ('beamsize', 'deg'),\n ('gain', '1'), ('masterids', None), ('kidids', None),\n ('kidtypes', None), ('kidfreqs', 'GHz')\n ])\n\n obsinfo_dict = {\n 'hdr_vals': hdr_vals, 'hdr_coms': hdr_coms, 'col_vals': col_vals,\n 'col_form': col_form, 'col_unit': col_unit\n }\n return obsinfo_dict\n\n#---------------- ANTENNA\ndef antenna_dict():\n hdr_vals = OrderedDict([('EXTNAME', 'ANTENNA'), ('FILENAME', None)])\n hdr_coms = {\n 'EXTNAME': 'name of binary table',\n 'FILENAME': 'filename of \"Antenna Log\"',\n 'TTYPE1': 'label for field 1',\n 'TFORM1': 'data format of field 1',\n 'TTYPE2': 'label for field 2',\n 'TFORM2': 'data format of field 2',\n 'TTYPE3': 'label for field 3',\n 'TFORM3': 'data format of field 3',\n 'TUNIT3': 'data unit of field 3',\n 'TTYPE4': 'label for field 4',\n 'TFORM4': 'data format of field 4',\n 'TUNIT4': 'data unit of field 4',\n 'TTYPE5': 'label for field 5',\n 'TFORM5': 'data format of field 5',\n 'TUNIT5': 'data unit of field 5',\n 'TTYPE6': 'label for field 6',\n 'TFORM6': 'data format of field 6',\n 'TUNIT6': 'data unit of field 6',\n 'TTYPE7': 'label for field 7',\n 'TFORM7': 'data format of field 7',\n 'TUNIT7': 'data unit of field 7',\n 'TTYPE8': 'label for field 8',\n 'TFORM8': 'data format of field 8',\n 'TUNIT8': 'data unit of field 8'\n }\n col_vals = OrderedDict([\n ('time', None), ('scantype', None), ('az', None), ('el', None),\n ('ra', None), ('dec', None), ('az_center', None), ('el_center', None)\n ])\n col_form = OrderedDict([\n ('time', '26A'), ('scantype', '4A'), ('az', 'D'), ('el', 'D'),\n ('ra', 'D'), ('dec', 'D'), ('az_center', 'D'), ('el_center', 'D')\n ])\n col_unit = OrderedDict([\n ('time', None), ('scantype', None), ('az', 'deg'), ('el', 'deg'),\n ('ra', 'deg'), ('dec', 'deg'), ('az_center', 'deg'), ('el_center', 'deg')\n ])\n\n antenna_dict = {\n 'hdr_vals': hdr_vals, 'hdr_coms': hdr_coms, 'col_vals': col_vals,\n 'col_form': col_form, 'col_unit': col_unit\n }\n return antenna_dict\n\n#---------------- READOUT\ndef readout_dict():\n hdr_vals = OrderedDict([('EXTNAME', 'READOUT'), ('FILENAME', None)])\n hdr_coms = {\n 'EXTNAME': 'name of binary table',\n 'FILENAME': 'filename which is readed for READOUT',\n 'TTYPE1': 'label for field 1',\n 'TFORM1': 'data format of field 1',\n 'TTYPE2': 'label for field 2',\n 'TFORM2': 'data format of field 2',\n 'TTYPE3': 'label for field 3',\n 'TFORM3': 'data format of field 3',\n 'TUNIT3': 'data unit of field 3',\n 'TTYPE4': 'label for field 4',\n 'TFORM4': 'data format of field 4',\n 'TUNIT4': 'data unit of field 4',\n 'TTYPE5': 'label for field 5',\n 'TFORM5': 'data format of field 5',\n 'TUNIT5': 'data unit of field 5',\n 'TTYPE6': 'label for field 6',\n 'TFORM6': 'data format of field 6',\n 'TUNIT6': 'data unit of field 6',\n 'TTYPE7': 'label for field 7',\n 'TFORM7': 'data format of field 7',\n 'TUNIT7': 'data unit of field 7'\n }\n col_vals = OrderedDict([\n ('starttime', None), ('pixelid', None), ('amplitude', None), ('phase', None),\n ('line_phase', None), ('Tsignal', None), ('Psignal', None)\n ])\n col_form = OrderedDict([\n ('starttime', '26A'), ('pixelid', 'K'), ('Amplitude', '63D'), ('Phase', '63D'),\n ('Line_Phase', '63D'), ('Tsignal', '63D'), ('Psignal', '63D')\n ])\n col_unit = OrderedDict([\n ('starttime', None), ('pixelid', None), ('Amplitude', '??'), ('Phase', '??'),\n ('Line_Phase', '??'), ('Tsignal', 'K'), ('Psignal', 'W')\n ])\n\n readout_dict = {\n 'hdr_vals': hdr_vals, 'hdr_coms': hdr_coms, 'col_vals': col_vals,\n 'col_form': col_form, 'col_unit': col_unit\n }\n return readout_dict\n\n#---------------- WEATHER\ndef weather_dict():\n hdr_vals = OrderedDict([('EXTNAME', 'WEATHER'), ('FILENAME', None)])\n hdr_coms = {\n 'EXTNAME': 'name of binary table',\n 'FILENAME': 'filename of \"Weather Log\"',\n 'TTYPE1': 'label for field 1',\n 'TFORM1': 'data format of field 1',\n 'TTYPE2': 'label for field 2',\n 'TFORM2': 'data format of field 2',\n 'TUNIT2': 'data unit of field 2',\n 'TTYPE3': 'label for field 3',\n 'TFORM3': 'data format of field 3',\n 'TUNIT3': 'data unit of field 3',\n 'TTYPE4': 'label for field 4',\n 'TFORM4': 'data format of field 4',\n 'TUNIT4': 'data unit of field 4',\n 'TTYPE5': 'label for field 5',\n 'TFORM5': 'data format of field 5',\n 'TUNIT5': 'data unit of field 5',\n 'TTYPE6': 'label for field 6',\n 'TFORM6': 'data format of field 6',\n 'TUNIT6': 'data unit of field 6'\n }\n col_vals = OrderedDict([\n ('time', None), ('temperature', None), ('pressure', None),\n ('vapor-pressure', None), ('windspd', None), ('winddir', None)\n ])\n col_form = OrderedDict([\n ('time', '19A'), ('temperature', 'D'), ('pressure', 'D'),\n ('vapor-pressure', 'D'), ('windspd', 'D'), ('winddir', 'D')\n ])\n col_unit = OrderedDict([\n ('time', None), ('temperature', 'deg_C'), ('pressure', 'hPa'),\n ('vapor-pressure', 'hPa'), ('windspd', 'm/s'), ('winddir', 'deg')\n ])\n\n weather_dict = {\n 'hdr_vals': hdr_vals, 'hdr_coms': hdr_coms, 'col_vals': col_vals,\n 'col_form': col_form, 'col_unit': col_unit\n }\n return weather_dict\n\n#-------------------------------- MAIN\nif __name__ == '__main__':\n#---------------- Create the dictionary for DFITS\n dfits_dict = {\n 'obsinfo_dict': obsinfo_dict(), 'antenna_dict': antenna_dict(),\n 'readout_dict': readout_dict(), 'weather_dict': weather_dict()\n }\n\n#---------------- Write DFITS to the file (yaml)\n with PATH_DFITSDICT.open('w') as f:\n f.write(yaml.dump(dfits_dict, default_flow_style=False))\n","sub_path":"merge_sample/create_dfits_dict.py","file_name":"create_dfits_dict.py","file_ext":"py","file_size_in_byte":9664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"492090200","text":"from flask import Blueprint, request, jsonify\nfrom movie_lovers.helpers import token_required\nfrom movie_lovers.models import User, Movie, db, movie_schema, movies_schema\n\n\napi = Blueprint('api', __name__, url_prefix='/api')\n\n@api.route('/getdata')\ndef getdata():\n return {'item_1': 'hello', 'item_2': 'worlds'}\n\n\n# CRUD functionality below\n\n@api.route('/movies', methods=['POST'])\n@token_required\ndef create_movie(current_user_token):\n name = request.json['name']\n genre = request.json['genre']\n year= request.json['year']\n token = current_user_token.token\n\n\n movie = Movie(name,genre,year,user_token = token)\n\n db.session.add(movie)\n db.session.commit()\n\n response = movie_schema.dump(movie)\n return jsonify(response)\n\n\n\n@api.route('/movies', methods = ['GET'])\n@token_required\ndef get_movies(current_user_token):\n owner = current_user_token.token\n movies = Movie.query.filter_by(user_token = owner).all()\n response = movies_schema.dump(movies)\n return jsonify(response)\n\n\n@api.route('/movies/', methods= ['GET'])\n@token_required\ndef get_movie(current_user_token, id):\n movie = Movie.query.get(id)\n if movie:\n response = movie_schema.dump(movie)\n return jsonify(response)\n else:\n return jsonify({'Error': 'That movie does not exist!'})\n\n\n\n@api.route('/movies/', methods= ['POST', 'PUT'])\n@token_required\ndef update_movie(current_user_token, id):\n movie = Movie.query.get(id)\n print(movie)\n if movie:\n movie.name = request.json['name']\n movie.genre = request.json['genre']\n movie.year = request.json['year']\n movie.token_token = current_user_token.token\n db.session.commit()\n\n response = movie_schema.dump(movie)\n return jsonify(response)\n else:\n return jsonify({'Error': 'That movie does not exist!'})\n\n# DELETE ROUTE\n@api.route('/movies/', methods= ['DELETE'])\n@token_required\ndef delete_movies(urrent_user_token, id):\n movie = Movie.query.get(id)\n if movie:\n db.session.delete(movie)\n db.session.commit()\n return jsonify({'Success': f'Movie ID #{movie.id} has been deleted'})\n else:\n return jsonify({'Error': 'That movie does not exist!'})\n","sub_path":"movie_lovers/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"640469233","text":"####################################################\n# Author: Tyler Daddio #\n# Date: July 25, 2017 #\n# File: eval-clones.py #\n# About: Cross-references two CLONES files and #\n# identifies the similar and dissimilar #\n# clones. #\n####################################################\n\nfrom sys import argv\nimport os\nimport tcrtypes\n\n# Verify we have enough arguments\nif len(argv) < 1+3:\n\tprint(\"arguments: \")\n\texit()\n\n# Grab program arguments\nscript = argv[0]\nrefclonespath = argv[1]\ncandclonespath = argv[2]\ncanddualspath = argv[3]\n\n# Print script arguments\nprint(\"script: \", script)\nprint(\"reference clones: \", refclonespath)\nprint(\"candidate clones: \", candclonespath)\nprint(\"candidate duals: \", canddualspath)\n\n\n####################\n# Read input files #\n####################\n\nwith open(refclonespath, \"r\") as f:\n\trefclones = set([tuple([c.snk1, c.snk2, c.src1, c.src2]) for c in tcrtypes.CLONES.from_file(f).clones])\nwith open(candclonespath, \"r\") as f:\n\tcandclones = set([tuple([c.snk1, c.snk2, c.src1, c.src2]) for c in tcrtypes.CLONES.from_file(f).clones])\ncandsrcduals = set()\ncandsnkduals = set()\nif os.path.isfile(canddualspath):\n\twith open(canddualspath, \"r\") as f:\n\t\tcandsrcduals = set()\n\t\tcandsnkduals = set()\n\t\tfor ln in f:\n\t\t\tcols = ln.rstrip().split(\",\")\n\t\t\ti = int(cols[1])\n\t\t\tj = int(cols[2])\n\t\t\tif i > j:\n\t\t\t\ttemp = i\n\t\t\t\ti = j\n\t\t\t\tj = temp\n\t\t\tif cols[0] == \"S\":\n\t\t\t\tcandsrcduals.add(tuple([i, j]))\n\t\t\telif cols[0] == \"T\":\n\t\t\t\tcandsnkduals.add(tuple([i, j]))\n\n\n###############################\n# Identify duals in reference #\n###############################\n\nrefsrcduals = set()\nrefsnkduals = set()\nfor c in refclones:\n\tsnk1, snk2, src1, src2 = c\n\tif src1 != src2:\n\t\ti = src1\n\t\tj = src2\n\t\tif i > j:\n\t\t\ttemp = i\n\t\t\ti = j\n\t\t\tj = temp\n\t\trefsrcduals.add(tuple([i, j]))\n\tif snk1 != snk2:\n\t\ti = snk1\n\t\tj = snk2\n\t\tif i > j:\n\t\t\ttemp = i\n\t\t\ti = j\n\t\t\tj = temp\n\t\trefsnkduals.add(tuple([i, j]))\n\n\n##################\n# Compare clones #\n##################\n\ndef compare_clones (refclones, candclones):\n\t\"\"\"\n\tIdentifies the number of shared clones between\n\teach given set of clones.\n\t\"\"\"\n\tnumshared = 0\n\t\n\tmissedsingles = 0\n\tmisseddualbetas = 0\n\tmisseddualalphas = 0\n\tmisseddualduals = 0\n\t\n\tcorrectsingles = 0\n\tcorrectdualbetas = 0\n\tcorrectdualalphas = 0\n\tcorrectdualduals = 0\n\t\n\tincorrectsingles = 0\n\tincorrectdualbetas = 0\n\tincorrectdualalphas = 0\n\tincorrectdualduals = 0\n\tfor c in candclones:\n\t\tb1, b2, a1, a2 = c\n\t\tif c in refclones:\n\t\t\tnumshared += 1\n\t\t\tif b1 == b2 and a1 == a2:\n\t\t\t\tcorrectsingles += 1\n\t\t\telif b1 != b2 and a1 != a2:\n\t\t\t\tcorrectdualduals += 1\n\t\t\telif b1 != b2:\n\t\t\t\tcorrectdualbetas += 1\n\t\t\telif a1 != a2:\n\t\t\t\tcorrectdualalphas += 1\n\t\telse:\n\t\t\tif b1 == b2 and a1 == a2:\n\t\t\t\tincorrectsingles += 1\n\t\t\telif b1 != b2 and a1 != a2:\n\t\t\t\tincorrectdualduals += 1\n\t\t\telif b1 != b2:\n\t\t\t\tincorrectdualbetas += 1\n\t\t\telif a1 != a2:\n\t\t\t\tincorrectdualalphas += 1\n\tfor c in refclones:\n\t\tb1, b2, a1, a2 = c\n\t\tif not(c in candclones):\n\t\t\tif b1 == b2 and a1 == a2:\n\t\t\t\tmissedsingles += 1\n\t\t\telif b1 != b2 and a1 != a2:\n\t\t\t\tmisseddualduals += 1\n\t\t\telif b1 != b2:\n\t\t\t\tmisseddualbetas += 1\n\t\t\telif a1 != a2:\n\t\t\t\tmisseddualalphas += 1\n\treturn [numshared, missedsingles, misseddualalphas, misseddualbetas, misseddualduals, \n\t\t\tcorrectsingles, correctdualalphas, correctdualbetas, correctdualduals, \n\t\t\tincorrectsingles, incorrectdualalphas, incorrectdualbetas, incorrectdualduals]\n\ndef count_clones (clones):\n\t\"\"\"\n\tCounts the number of each type of clone in the\n\tgiven set of clones.\n\t\"\"\"\n\tnumsingles = 0\n\tnumdualduals = 0\n\tnumdualsnks = 0\n\tnumdualsrcs = 0\n\tfor c in clones:\n\t\tsnk1, snk2, src1, src2 = c\n\t\tif snk1 == snk2 and src1 == src2:\n\t\t\tnumsingles += 1\n\t\telif snk1 != snk2 and src1 != src2:\n\t\t\tnumdualduals += 1\n\t\telif snk1 != snk2:\n\t\t\tnumdualsnks += 1\n\t\telif src1 != src2:\n\t\t\tnumdualsrcs += 1\n\treturn [numsingles, numdualduals, numdualsnks, numdualsrcs]\n\ndef build_pairs (clones):\n\t\"\"\"\n\tConstructs the TCR pairs from the given list of clones.\n\t\"\"\"\n\tpairs = set()\n\tfor c in clones:\n\t\tsnk1, snk2, src1, src2 = c\n\t\tpairs.add(tuple([snk1, src1]))\n\t\tpairs.add(tuple([snk2, src1]))\n\t\tpairs.add(tuple([snk1, src2]))\n\t\tpairs.add(tuple([snk2, src2]))\n\treturn pairs\n\ndef identify_sharing (clones):\n\t\"\"\"\n\tIdentifies clones with shared chains.\n\t\"\"\"\n\tseensrc = set()\n\tseensnk = set()\n\tsharedsrc = set()\n\tsharedsnk = set()\n\tfor c in clones:\n\t\tsnk1, snk2, src1, src2 = c\n\t\tif snk1 in seensnk:\n\t\t\tsharedsnk.add(snk1)\n\t\tif snk2 in seensnk and snk2 != snk1:\n\t\t\tsharedsnk.add(snk2)\n\t\tif src1 in seensrc:\n\t\t\tsharedsrc.add(src1)\n\t\tif src2 in seensrc and src2 != src1:\n\t\t\tsharedsrc.add(src2)\n\t\tseensnk.add(snk1)\n\t\tseensnk.add(snk2)\n\t\tseensrc.add(src1)\n\t\tseensrc.add(src2)\n\t\n\tsharingclones = set()\n\tfor c in clones:\n\t\tsnk1, snk2, src1, src2 = c\n\t\tif snk1 in sharedsnk or snk2 in sharedsnk or src1 in sharedsrc or src2 in sharedsrc:\n\t\t\tsharingclones.add(tuple([snk1, snk2, src1, src2]))\n\t\n\treturn sharingclones\n\ndef compare_sharing (refclones, candclones, sharingrefclones, sharingcandclones):\n\t\"\"\"\n\tCompares the given sets of clones and their chain-sharing\n\tclones to determine the amount of overlap between them.\n\t\"\"\"\n\tnumcorrect = 0\n\tnumincorrect = 0\n\tnumrecovered = 0\n\tnummissing = 0\n\tfor c in sharingcandclones:\n\t\tif c in refclones:\n\t\t\tnumcorrect += 1\n\t\telse:\n\t\t\tnumincorrect += 1\n\tfor c in sharingrefclones:\n\t\tif c in candclones:\n\t\t\tnumrecovered += 1\n\t\telse:\n\t\t\tnummissing += 1\n\treturn [numcorrect, numincorrect, numrecovered, nummissing]\n\n######################\n# Compile statistics #\n######################\n\n# Get counts of each type of clone\nnumrefsingles, numrefdualduals, numrefdualbetas, numrefdualalphas = count_clones(refclones)\nnumcandsingles, numcanddualduals, numcanddualbetas, numcanddualalphas = count_clones(candclones)\n\n# Get pair comparison statistics\nrefpairs = build_pairs(refclones)\ncandpairs = build_pairs(candclones)\n\nnumrefpairs = len(refpairs)\nnumcandpairs = len(candpairs)\nnumrefcorrect = len(refpairs) - len(refpairs - candpairs)\nnumcandcorrect = len(candpairs) - len(candpairs - refpairs)\n\n# Get clone comparison statistics\nnumshared, missedsingles, misseddualalphas, misseddualbetas, misseddualduals, correctsingles, correctdualalphas, correctdualbetas, correctdualduals, incorrectsingles, incorrectdualalphas, incorrectdualbetas, incorrectdualduals = compare_clones(refclones, candclones)\nnumreferences = len(refclones)\nif numreferences != 0:\n\tpropreferences = numshared / numreferences\nelse:\n\tpropreferences = 0\nnumcandidates = len(candclones)\nif numcandidates != 0:\n\tpropcandidates = numshared / numcandidates\nelse:\n\tpropcandidates = 0\n\n# Evaluate raw candidate duals\nsrcdualscorrect = refsrcduals & candsrcduals\nsnkdualscorrect = refsnkduals & candsnkduals\nsrcdualsincorrect = candsrcduals - srcdualscorrect\nsnkdualsincorrect = candsnkduals - snkdualscorrect\nsrcdualsrecovered = refsrcduals & candsrcduals\nsnkdualsrecovered = refsnkduals & candsnkduals\nsrcdualsmissed = refsrcduals - srcdualsrecovered\nsnkdualsmissed = refsnkduals - snkdualsrecovered\n\nnumsrcdualscorrect = len(srcdualscorrect)\nnumsnkdualscorrect = len(snkdualscorrect)\nnumsrcdualsincorrect = len(srcdualsincorrect)\nnumsnkdualsincorrect = len(snkdualsincorrect)\nnumsrcdualsrecovered = len(srcdualsrecovered)\nnumsnkdualsrecovered = len(snkdualsrecovered)\nnumsrcdualsmissed = len(srcdualsmissed)\nnumsnkdualsmissed = len(snkdualsmissed)\nnumrefsrcduals = len(refsrcduals)\nnumrefsnkduals = len(refsnkduals)\nnumcandsrcduals = len(candsrcduals)\nnumcandsnkduals = len(candsnkduals)\n\n# Evaluate pair sharing\nsharingrefclones = identify_sharing(refclones)\nsharingcandclones = identify_sharing(candclones)\nsharingrefpairs = build_pairs(sharingrefclones)\nsharingcandpairs = build_pairs(sharingcandclones)\nnumrefsharepairs = len(sharingrefpairs)\nnumcandsharepairs = len(sharingcandpairs)\nnumsharepairscorrect = len(candpairs & sharingcandpairs)\nnumsharepairsincorrect = numcandsharepairs - numsharepairscorrect\nnumsharepairsmissed = len(sharingrefpairs - sharingcandpairs)\nnumsharepairsrecovered = numrefsharepairs - numsharepairsmissed\n\n# Evaluate clone sharing\nnumsharingrefclones = len(sharingrefclones)\nnumsharingcandclones = len(sharingcandclones)\nnumshareclonescorrect, numshareclonesincorrect, numshareclonesrecovered, numshareclonesmissing = compare_sharing(refclones, \n\t\tcandclones, sharingrefclones, sharingcandclones)\n\n\n#########################\n# Output all statistics #\n#########################\nprint(\"\")\n\n# Dual statistics\nprint(\">dual alpha condition={}\".format(numrefsrcduals))\nprint(\">dual alpha predicted={}\".format(numcandsrcduals))\nprint(\">dual alpha shared={}\".format(numsrcdualscorrect))\nif numcandsrcduals == 0:\n\tnumcandsrcduals = 1\nprint(\">dual alpha correct={}\".format(numsrcdualscorrect))\nprint(\">dual alpha incorrect={}\".format(numsrcdualsincorrect))\nprint(\">dual alpha recovered={}\".format(numsrcdualsrecovered))\nprint(\">dual alpha missed={}\".format(numsrcdualsmissed))\nprint(\">dual alpha PPV={}\".format(numsrcdualscorrect / numcandsrcduals))\nif numrefsrcduals == 0:\n\tnumrefsrcduals = 1\nprint(\">dual alpha TPR={}\".format(numsrcdualscorrect / numrefsrcduals))\nprint(\"\")\nprint(\">dual beta condition={}\".format(numrefsnkduals))\nprint(\">dual beta predicted={}\".format(numcandsnkduals))\nprint(\">dual beta shared={}\".format(numsnkdualscorrect))\nprint(\">dual beta correct={}\".format(numsnkdualscorrect))\nprint(\">dual beta incorrect={}\".format(numsnkdualsincorrect))\nprint(\">dual beta recovered={}\".format(numsnkdualsrecovered))\nprint(\">dual beta missed={}\".format(numsnkdualsmissed))\nif numcandsnkduals == 0:\n\tnumcandsnkduals = 1\nprint(\">dual beta PPV={}\".format(numsnkdualscorrect / numcandsnkduals))\nif numrefsnkduals == 0:\n\tnumrefsnkduals = 1\nprint(\">dual beta TPR={}\".format(numsnkdualscorrect / numrefsnkduals))\n\n# Sharing pairs statistics\nprint(\"\")\nprint(\">pair sharing condition={}\".format(numrefsharepairs))\nprint(\">pair sharing predicted={}\".format(numcandsharepairs))\nprint(\">pair sharing correct={}\".format(numsharepairscorrect))\nprint(\">pair sharing incorrect={}\".format(numsharepairsincorrect))\nprint(\">pair sharing recovered={}\".format(numsharepairsrecovered))\nprint(\">pair sharing missed={}\".format(numsharepairsmissed))\nif numcandsharepairs == 0:\n\tnumcandsharepairs = 1\nprint(\">pair sharing PPV={}\".format(numsharepairscorrect / numcandsharepairs))\nif numrefsharepairs == 0:\n\tnumrefsharepairs = 1\nprint(\">pair sharing TPR={}\".format(numsharepairsrecovered / numrefsharepairs))\n\n# Sharing clones statistics\nprint(\"\")\nprint(\">clones sharing condition={}\".format(numsharingrefclones))\nprint(\">clones sharing predicted={}\".format(numsharingcandclones))\nprint(\">clones sharing correct={}\".format(numshareclonescorrect))\nprint(\">clones sharing incorrect={}\".format(numshareclonesincorrect))\nprint(\">clones sharing recovered={}\".format(numshareclonesrecovered))\nprint(\">clones sharing missed={}\".format(numshareclonesmissing))\nif numsharingcandclones == 0:\n\tnumsharingcandclones = 1\nprint(\">clones sharing PPV={}\".format(numshareclonescorrect / numsharingcandclones))\nif numsharingrefclones == 0:\n\tnumsharingrefclones = 1\nprint(\">clones sharing TPR={}\".format(numshareclonesrecovered / numsharingrefclones))\n\n# Pair statistics\nprint(\"\")\nprint(\">pair condition={}\".format(numrefpairs))\nprint(\">pair predicted={}\".format(numcandpairs))\nif numrefpairs != 0:\n\tprint(\">pair TPR={}\".format(numrefcorrect / numrefpairs))\nelse:\n\tprint(\">pair TPR=0\")\nif numcandpairs != 0:\n\tprint(\">pair PPV={}\".format(numcandcorrect / numcandpairs))\nelse:\n\tprint(\">pair PPV=0\")\n\n# Whole-clone statistics\nprint(\"\")\nprint(\">clones condition={}\".format(numreferences))\nprint(\">clones predicted={}\".format(numcandidates))\nprint(\">clones correct={}\".format(numshared))\nprint(\">clones TPR={}\".format(propreferences))\nprint(\">clones PPV={}\".format(propcandidates))\nprint(\"\")\nif numcandsingles == 0:\n\tprint(\">clones single rate=0\")\nelse:\n\tprint(\">clones single rate={}\".format(correctsingles / numcandsingles))\nprint(\">clones correct singles={}\".format(correctsingles))\nprint(\">clones incorrect singles={}\".format(incorrectsingles))\nprint(\">clones missed singles={}\".format(missedsingles))\nprint(\"\")\nif numcanddualalphas == 0:\n\tprint(\">clones dual alpha rate=0\")\nelse:\n\tprint(\">clones dual alpha rate={}\".format(correctdualalphas / numcanddualalphas))\nprint(\">clones missed dual alphas={}\".format(misseddualalphas))\nif numcanddualalphas == 0:\n\tnumcanddualalphas = 1\nprint(\">clones correct dual alphas={}\".format(correctdualalphas, correctdualalphas / numcanddualalphas))\nprint(\">clones incorrect dual alphas={}\".format(incorrectdualalphas))\nprint(\"\")\nif numcanddualbetas == 0:\n\tprint(\">clones dual beta rate=0\")\nelse:\n\tprint(\">clones dual beta rate={}\".format(correctdualbetas / numcanddualbetas))\nif numcanddualbetas == 0:\n\tnumcanddualbetas = 1\nprint(\">clones missed dual betas={}\".format(misseddualbetas))\nprint(\">clones correct dual betas={}\".format(correctdualbetas))\nprint(\">clones incorrect dual betas={}\".format(incorrectdualbetas))\nprint(\"\")\nif numcanddualduals == 0:\n\tprint(\">clones dual dual rate=0\")\nelse:\n\tprint(\">clones dual dual rate={}\".format(correctdualduals / numcanddualduals))\nif numcanddualduals == 0:\n\tnumcanddualduals = 1\nprint(\">clones missed dual duals={}\".format(misseddualduals))\nprint(\">clones correct dual duals={}\".format(correctdualduals))\nprint(\">clones incorrect dual duals={}\".format(incorrectdualduals))\nprint(\"\")\n","sub_path":"analysis/eval-clones.py","file_name":"eval-clones.py","file_ext":"py","file_size_in_byte":13530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"577657596","text":"#!/usr/bin/env python3\n\nimport codecs\nfrom aes import *\nimport os\n\ndata = codecs.decode(open(\"10.txt\", \"rb\").read(), \"base64\")\n\niv=b\"\\x00\"*16\nprint(cbc_decrypt(b\"YELLOW SUBMARINE\", iv, data).decode(\"ascii\"))\n\nkey=os.urandom(16)\niv=os.urandom(16)\n\nmsg = b\"hello there world\"\nassert msg == cbc_decrypt(key, iv, cbc_encrypt(key, iv, msg))\n","sub_path":"c10.py","file_name":"c10.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"251949907","text":"import subprocess\nimport psutil\nfrom yt_crawler.backend.various_m import config\nfrom yt_crawler.backend.various_m.classes import Section\nfrom yt_crawler.backend.various_m.utils import LOG, get_local_ip\n\n__author__ = 'Sheldan'\n\n\ndef toggle_transmission(slow):\n \"\"\"\n Throttles/reenables the transmission instance running on the configured ip to an absolute minimum\n :param slow:\n :return:\n \"\"\"\n if config.get_value_from_config('throttletransmission', Section.THROTTELING) == 'true':\n transmission_pid = 0\n for process in psutil.process_iter():\n if process.name == 'transmission-daemon':\n transmission_pid = process.pid\n if transmission_pid != 0:\n if slow:\n LOG.debug('Slowing down transmission.')\n parameter = ' -as -asd 1'\n else:\n LOG.debug('Resetting transmission to the normal speed.')\n parameter = ' -AS'\n subprocess.Popen(\n ['transmission-remote ' + get_local_ip() + parameter],\n shell=True)","sub_path":"yt_crawler/backend/various_m/transmission.py","file_name":"transmission.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"136031893","text":"import py\nfrom genscript.pkgpacker import find_toplevel, pkgname, pkg_to_mapping\nfrom genscript import pkgpacker\n\npy_pkg = py.path.local(py.__file__).pypkgpath()\n\n@py.test.mark.skipif(py_pkg is None, reason='magical pylib import')\ndef test_find_toplevel():\n pylib = find_toplevel('py')\n assert pylib == py_pkg\n\n\ndef test_pkgname(tmpdir):\n result = pkgname('test', tmpdir, tmpdir/'foo.py')\n assert result == 'test.foo'\n\n\ndef test_pkg_to_mapping(tmpdir, monkeypatch):\n monkeypatch.setattr(pkgpacker, 'find_toplevel', lambda x:tmpdir)\n tmpdir.join('test.py').write('#test\\n')\n tmpdir.join('__init__.py').write('#!/bin/python')\n\n mapping = pkg_to_mapping('test')\n expected = {\n 'test.__init__': '#!/bin/python',\n 'test.test': '#test\\n',\n }\n assert mapping == expected\n\ndef test_pkg_to_mapping_for_module(tmpdir, monkeypatch):\n monkeypatch.setattr(pkgpacker, 'find_toplevel', lambda x:test)\n test = tmpdir.join('test.py')\n test.write('#test\\n')\n\n mapping = pkg_to_mapping('test')\n assert mapping == { 'test': '#test\\n' }\n\n\n","sub_path":"testing/test_walkpkg.py","file_name":"test_walkpkg.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"620550405","text":"#!/usr/bin/env python3\nimport sys\nfrom threading import Timer\nfrom omxplayer.player import OMXPlayer\nfrom pathlib import Path\nfrom pynput import keyboard\nimport psutil\nimport json\n\n# Kill all running omxplayer instances\ndef kill_all():\n for proc in psutil.process_iter():\n if \"omxplayer\" in proc.name():\n try:\n proc.kill()\n except:\n print(\"Couldn't kill omxplayer\")\n\nkill_all()\n\nargs_basic = ['--no-osd', '--no-keys', '--aspect-mode', 'fill']\n# Window mode for testing\nargs_basic = args_basic + ['--win', '0 0 200 200']\nargs_static = args_basic + ['--layer', '3', '--alpha', '127', '--loop']\nargs = args_basic + ['--layer', '2']\nargs_loop = args_basic + ['--layer', '1', '--loop']\nprint(args_loop)\n\nfile = open('video.json', 'r')\ndata = json.load(file)\nprint(data)\n\ndef stop_static():\n try:\n static.pause()\n static.hide_video()\n except:\n print(\"failed to stop static\")\n\ndef static_transition(player, exit_status):\n try:\n static.show_video()\n if player == looper:\n static.set_alpha(255)\n else:\n static.set_alpha(127)\n static.play()\n t = Timer(1.0, stop_static)\n t.start()\n except:\n print(\"static failed\")\n static.hide_video()\n\nstatic_path = Path(\"static.mp4\")\nbackground_path = Path(\"logo.mp4\")\nlooper = OMXPlayer(background_path, args=args_loop, dbus_name=\"omxplayer.player0\")\nplayer = OMXPlayer(background_path, args=args, dbus_name=\"omxplayer.player1\")\nstatic = OMXPlayer(static_path, args=args_static, dbus_name=\"omxplayer.player2\")\nstatic.hide_video()\nstatic.pause()\nstatic.set_alpha(127)\nplayer.hide_video()\nplayer.pause()\n#static.hide_video()\nlooper.exitEvent += static_transition\nplayer.exitEvent += static_transition\n\n\ndef on_press(key):\n if key == keyboard.Key.esc:\n try:\n kill_all()\n except:\n print(\"couldn't kill all\")\n sys.exit()\n \n else:\n try:\n key = key.char\n except AttributeError:\n key = str(key)\n video = data.get(key, False)\n if video:\n if video['loop'] == 1:\n try:\n print(\"looping\")\n looper.load(Path(video[\"url\"]))\n player.hide_video()\n except:\n print(\"failed to start loop\")\n else:\n try:\n player.load(Path(video[\"url\"]))\n player.show_video()\n except:\n print(\"failed to start video\")\n\n print(video)\n\n\nwith keyboard.Listener(on_press=on_press) as listener:\n listener.join()\n","sub_path":"video/video_switcher.py","file_name":"video_switcher.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"273030777","text":"# -*- coding:utf-8 -*-\r\n# Author: washing\r\n# DateTime: 2021/6/11 11:20\r\n# File: 0279.py\r\n# Desc: \r\n\r\nclass Solution:\r\n def numSquares(self, n: int) -> int:\r\n pfs = set()\r\n for i in range(1, 101): pfs.add(i**2)\r\n\r\n def ck4(num):\r\n while num % 4 == 0: num //= 4\r\n return num % 8 == 7\r\n\r\n if n in pfs: return 1\r\n if ck4(n): return 4\r\n for i in range(1, int(math.sqrt(n))+1):\r\n j = n - i ** 2\r\n if j in pfs: return 2\r\n return 3\r\n","sub_path":"Solutions/0279/0279.py","file_name":"0279.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"360084725","text":"\"\"\"\nImplementation of linear algorithm to determine shape elasticity as described by Bernal et al.\n\"\"\"\nimport numpy as np\nfrom numba import jit, types, float64, int16\nimport shapedist.elastic_n_2\nfrom math import floor, pi\nnp.set_printoptions(precision=25)\n\n@jit([float64(float64, float64[:], float64[:], int16, int16)], cache=True, nopython=True)\ndef interp(t, x, y, lower, upper):\n \"\"\"\n Linear interpolation function. Uses binary search to find which values of x to interpolate over.\n Does not work if interpolation is out of bounds\n\n Parameters\n ----------\n t : float\n The input of the function\n x : numpy array of floats\n The domain of the function to be interpolated\n y : numpy array of floats\n The range of the function to be interpolated\n\n Returns\n -------\n float\n The calculated value\n \"\"\"\n i = 0\n while lower < upper:\n i = lower + (upper - lower) // 2\n val = x[i]\n if t == val:\n break\n elif t > val:\n if lower == i:\n break\n lower = i\n elif t < val:\n upper = i\n\n if i == x.size-1:\n temp = y[i]\n else:\n temp = (t - x[i]) * (y[i + 1] - y[i]) / (x[i + 1] - x[i]) + y[i]\n return temp\n\n\n@jit([float64(float64, float64[:], float64[:], int16, int16)], cache=True, nopython=True)\ndef interp_uniform(t, x, y, lower, upper):\n interval = x[1] - x[0]\n i = floor(t / interval)\n if i == floor(1/interval):\n return y[i]\n else:\n #print((t - x[i]) * (y[i + 1] - y[i]) / interval + y[i])\n return (t - x[i]) * (y[i + 1] - y[i]) / interval + y[i]\n\n\n@jit([float64(float64, float64[:], float64, int16, int16)], cache=True, nopython=True)\ndef interp_range_only(t, y, n, lower, upper):\n \"\"\"\n Linear interpolation function. Uses binary search to find which values of x to interpolate over.\n Does not work if interpolation is out of bounds\n\n Parameters\n ----------\n t : float\n The input of the function\n x : numpy array of floats\n The domain of the function to be interpolated\n y : numpy array of floats\n The range of the function to be interpolated\n\n Returns\n -------\n float\n The calculated value\n \"\"\"\n i = 0\n while lower < upper:\n i = lower + (upper - lower) // 2\n val = (1/n) * i\n if t == val:\n break\n elif t > val:\n if lower == i:\n break\n lower = i\n elif t < val:\n upper = i\n return (t - (1/n) * i) * (y[i + 1] - y[i]) / ((1/n) * (i + 1) - (1/n) * i) + y[i]\n\n\n@jit([float64(float64[:], float64[:], float64[:], float64[:], int16, int16, int16, int16, float64)], cache=True, nopython=True)\ndef integrate(tp, tq, py, qy, k, i, l, j, gamma_interval):\n e = 0\n a = k\n while a < i:\n gammak_1 = gamma_interval * l + (tp[a] - tp[k]) * \\\n (gamma_interval * j - gamma_interval * l) / (tp[i] - tp[k])\n gammak_2 = gamma_interval * l + (tp[(a+1)] - tp[k]) * (gamma_interval * j - gamma_interval * l) \\\n / (tp[i] - tp[k])\n e = e + (0.5 * (py[a] - interp_uniform(gammak_1, tq, qy, 0, tq.size)) ** 2\n + 0.5 * (py[(a+1)] - interp_uniform(gammak_2, tq, qy, 0, tq.size)) ** 2) * \\\n (tp[(a+1)] - tp[a]) * 0.5\n a = a + 1\n return e\n\n\n@jit([float64(float64[:], float64[:], float64[:], float64[:], int16, int16, int16, int16, float64, int16[:])], cache=True, nopython=True)\ndef integrate_efficient(tp, tq, py, qy, k, i, l, j, gamma_interval, rough_path):\n e = 0\n a = k\n index_k = rough_path[k]\n index_i = rough_path[i]\n index_a = rough_path[a]\n while a < i:\n index_a = rough_path[a]\n index_a_1 = rough_path[a+1]\n gammak_1 = gamma_interval * l + (tp[index_a] - tp[index_k]) * \\\n (gamma_interval * j - gamma_interval * l) / (tp[index_i] - tp[index_k])\n gammak_2 = gamma_interval * l + (tp[index_a_1] - tp[index_k]) * (gamma_interval * j - gamma_interval * l) \\\n / (tp[index_i] - tp[index_k])\n e = e + (0.5 * (py[index_a] - interp_uniform(gammak_1, tq, qy, 0, tq.size)) ** 2\n + 0.5 * (py[index_a_1] - interp_uniform(gammak_2, tq, qy, 0, tq.size)) ** 2) * \\\n (tp[index_a_1] - tp[index_a]) * 0.5\n a = a + 1\n return e\n\n\n@jit([types.Tuple((float64[:], float64[:], float64))(float64[:, :], float64[:, :], int16, int16, int16)], cache=True,\n nopython=True)\ndef find_gamma(p, q, neighborhood, strip_height, max_iteration):\n \"\"\"\n Finds the discretized function gamma, and the minimum energy.\n\n Parameters\n ----------\n\n p : array of two arrays of floats\n The first input curve, an array with 2 elements: the domain of p as an array and the range of\n p as an array in that order. Both arrays must be the same size as q\n q : array of two arrays of floats\n The first second curve, an array with 2 elements: the domain of q as an array and the range of\n q as an array in that order. Both arrays must be the same length as p\n neighborhood : int\n The height of the adapting strip. Generally advised to be 1/3 or 1/4 of 2**max_iteration. -1 uses the value\n max_iteration * 30.\n max_iteration : int\n The resolution of the algorithm. Actual resolution is 2**max_iteration. Default value is 10. -1 uses that value\n\n Returns\n -------\n array of floats, array of floats, float\n The domain of gamma as an array of floats, the range of gamma as an array, of floats,\n and the minimum energy calculated as a float.\n\n \"\"\"\n current_iteration = 0\n path_length = 32\n i = 0\n max_path_length = 32\n while i < max_iteration:\n max_path_length = max_path_length * 2 + 1\n i = i + 1\n\n tp, tq, py, qy = p[0], q[0], p[1], q[1]\n\n n = tp.size\n path = np.zeros(n + 1, dtype=np.float64)\n i = 0\n # !!\n tg = np.linspace(0., 1., path_length).astype(np.float64)\n g = np.linspace(0., 1., path_length).astype(np.float64)\n domain_interval = 0\n domain_interval = tp.size // path_length\n temp1 = np.zeros((2, path_length), dtype=np.float64)\n temp2 = np.zeros((2, path_length), dtype=np.float64)\n temp3 = np.zeros((2, path_length), dtype=np.float64)\n\n while i < path_length-1:\n temp1[0][i] = tp[i * domain_interval]\n temp1[1][i] = py[i * domain_interval]\n temp2[0][i] = tq[i * domain_interval]\n temp2[1][i] = qy[i * domain_interval]\n temp3[0][i] = tg[i]\n temp3[1][i] = g[i]\n i = i + 1\n temp1[0][path_length-1] = 1\n temp1[1][path_length - 1] = py[py.size-1]\n temp2[0][path_length - 1] = 1\n temp2[1][path_length - 1] = qy[qy.size-1]\n temp3[0][path_length-1] = 1\n temp3[1][path_length - 1] = 1\n tg, gamma_range, val = shapedist.elastic_n_2.find_gamma(temp1, temp2, temp3, 5, 5)\n\n i = 0\n while i < gamma_range.size:\n path[i] = gamma_range[i]\n i = i + 1\n current_iteration = current_iteration + 1\n # !!\n i = 0\n min_energy_values = np.zeros((n, max_path_length), dtype=np.float64)\n path_nodes = np.zeros((n, max_path_length, 2), dtype=np.int16)\n previous_n = gamma_range.size\n rough_path = np.zeros(tp.size, dtype=np.int16)\n\n while current_iteration < max_iteration:\n path_length = path_length * 2 + 1\n\n m = path_length\n n = path_length\n if n > tp.size:\n n = tp.size\n domain_interval = tp.size // n\n n = tp.size // domain_interval\n if domain_interval == 0:\n domain_interval = 1\n i = 0\n\n while i < n:\n rough_path[i] = domain_interval * i\n i = i + 1\n\n if n < rough_path.size - 1:\n rough_path[n] = tp.size-1\n n = n + 1\n gamma_interval = 1 / (m-1)\n min_energy_values[0][0] = (0.5 * (py[0] - interp_uniform(0, tq, qy, 0, tq.size)) ** 2\n + 0.5 * (py[1] - interp_uniform(gamma_interval, tq, qy, 0, tq.size)) ** 2) *\\\n (tp[1] - tp[0]) * 0.5\n path_nodes[1][1][0] = 0\n path_nodes[1][1][1] = 0\n i, j, k, l = 1, 1, 1, 1\n val = 0\n val2 = 0\n while i < n-1:\n val = interp_range_only(tp[rough_path[i]], path, previous_n, 0, previous_n)\n j = floor(val / gamma_interval) - strip_height\n if j <= 0:\n j = 1\n while j < m-1 and j * gamma_interval < val + strip_height * gamma_interval:\n min_energy_values[i][j] = integrate_efficient(tp, tq, py, qy, 0, i, 0, j, gamma_interval, rough_path)\n\n k = i - neighborhood\n if k <= 0:\n k = 1\n minimum = min_energy_values[i][j]\n while k < i:\n val2 = interp_range_only(tp[rough_path[k]], path, previous_n, 0,\n previous_n)\n l = j - neighborhood\n if l <= floor(val2 / gamma_interval) - strip_height:\n l = floor(val2 / gamma_interval) - strip_height\n if l <= 0:\n l = 1\n while l < j and l * gamma_interval < val2 + strip_height * gamma_interval:\n e = min_energy_values[k, l] + integrate_efficient(tp, tq, py, qy, k, i, l, j,\n gamma_interval, rough_path)\n if e < minimum:\n minimum = e\n path_nodes[i][j][0] = k\n path_nodes[i][j][1] = l\n l = l + 1\n k = k + 1\n min_energy_values[i][j] = minimum\n j = j + 1\n i = i + 1\n\n # !!\n i = n-1\n j = m-1\n min_energy_values[i][j] = integrate_efficient(tp, tq, py, qy, 0, i, 0, j, gamma_interval, rough_path)\n\n k = i - neighborhood\n if k <= 0:\n k = 1\n minimum = min_energy_values[i][j]\n while k < i:\n val2 = interp_range_only(tp[rough_path[k]], path, previous_n, 0,\n previous_n)\n l = j - neighborhood\n if l <= floor(val2 / gamma_interval) - strip_height:\n l = floor(val2 / gamma_interval) - strip_height\n if l <= 0:\n l = 1\n while l < j and l * gamma_interval < val2 + strip_height * gamma_interval:\n e = min_energy_values[k, l] + integrate_efficient(tp, tq, py, qy, k, i, l, j,\n gamma_interval, rough_path)\n\n if e < minimum:\n minimum = e\n path_nodes[i][j][0] = k\n path_nodes[i][j][1] = l\n l = l + 1\n k = k + 1\n\n min_energy_values[i][j] = minimum\n\n # !!\n\n path_indices = np.zeros((n, 2), dtype=np.int16)\n path_indices[0][0] = n - 1\n path_indices[0][1] = m - 1\n\n i = 0\n while path_indices[i][0] != 0 or path_indices[i][1] != 0 and i + 1 < path.size:\n result = path_nodes[path_indices[i][0]][path_indices[i][1]]\n path_indices[i + 1][0] = result[0]\n path_indices[i + 1][1] = result[1]\n i = i + 1\n i = 0\n previous = 1\n previousIndex = n - 1\n j = 1\n path[path_indices[0][0]] = gamma_interval * path_indices[0][1]\n while i < path_indices.size // 2 and previousIndex != 0:\n path[path_indices[i][0]] = gamma_interval * path_indices[i][1]\n if previousIndex - path_indices[i][0] > 1:\n j = 0\n step_size = (previous - gamma_interval*path_indices[i][1]) / (previousIndex - path_indices[i][0])\n while j < previousIndex - path_indices[i][0]:\n path[previousIndex - j] = previous - j * step_size\n j = j + 1\n previousIndex = path_indices[i][0]\n previous = gamma_interval * path_indices[i][1]\n i = i + 1\n previous_n = n\n current_iteration = current_iteration + 1\n tg = np.linspace(0., 1., n).astype(np.float64)\n return tg, path[:n], min_energy_values[n-1][m-1]\n\n\n@jit(float64(float64[:], float64[:], float64[:]), cache=True, nopython=True)\ndef inner_product(t, p, q):\n i = 0\n result = 0\n while i < p.size-1:\n result = result + (p[i] * q[i] + p[i+1] * q[i+1]) / 2 * (t[i+1] - t[i])\n i = i + 1\n return result\n\n\n@jit(float64(float64[:], float64[:], float64[:]), cache=True, nopython=True)\ndef find_shape_distance(t, p, q):\n p_q = inner_product(t, p, q)\n p_p = inner_product(t, p, p)\n q_q = inner_product(t, q, q)\n temp = p_q / (p_p**0.5 * q_q ** 0.5)\n if temp > 1:\n temp = 1\n return np.arccos(temp) / pi\n\n\n@jit(float64(float64[:], float64[:], float64[:]), cache=True, nopython=True)\ndef find_error(tg, gammar, gammat):\n \"\"\"\n Function that finds the error between two gamma curves for checking.\n\n Parameters\n ----------\n tg : array of floats\n The domain of the two gamma curves.\n gammar : array of floats\n The y-values of the known gamma curve.\n gammat : array of floats\n The y-values of gamma curve to be tested.\n\n Returns\n -------\n float\n The weighted error.\n \"\"\"\n n = tg.size\n error = 1 / 2 * (tg[1] - tg[0]) * (gammar[1] - gammat[1]) ** 2 + 1 / 2 * (tg[n-1] - tg[n-2]) * (gammar[n-1] - gammat[n-1]) ** 2\n k = 2\n if n != gammar.size or n != gammat.size:\n raise IndexError\n while k < n-1:\n error = error + 1/2 * (gammar[k] - gammat[k]) ** 2 * (tg[k+1] - tg[k-1])\n k = k + 1\n error = error ** (1/2)\n return error\n","sub_path":"shapedist_old/elastic_linear_uniform.py","file_name":"elastic_linear_uniform.py","file_ext":"py","file_size_in_byte":14013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"154128512","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\n\nimport requests\n\nfrom main.filter.basic.age_range_filter import AgeRangeFilter, MAX_AGE_VALUE, MAX_RANGE_POSSIBLE\nfrom main.filter.filter import FILTERS_PROTO\nfrom main.resource.image import Image\nfrom main.tools.age_range import AgeRange\n\n__author__ = \"Ivan de Paz Centeno\"\n\n\nclass AgeEstimationFilter(AgeRangeFilter):\n \"\"\"\n Applies an age estimation filter to an image.\n \"\"\"\n\n def __init__(self, weight, api_url, age_range_to_cover=None, max_age=MAX_AGE_VALUE, min_age=0,\n max_range_distance_value=MAX_RANGE_POSSIBLE, strict_checks=False):\n \"\"\"\n Constructor of the age estimation filter.\n :param weight: weight for this face detection filter.\n :param api_url: URL to CVMLModulerized age estimator.\n :param age_range_to_cover: Age range object where estimated age_range should fit totally (with strict_checks)\n or partially.\n :param max_age: maximum value for detected age.\n :param min_age: minimum value for detected age\n :param max_range_distance_value: maximum range for the detected age\n :param strict_checks: if set to true, all the subfilters are strict. for example, with true, the age range\n estimated must fit inside the age_range_to_cover *completely*.\n \"\"\"\n AgeRangeFilter.__init__(self, weight, age_range_to_cover, max_age, min_age,\n max_range_distance_value, strict_checks)\n\n self.api_url = api_url\n\n def apply_to(self, image):\n \"\"\"\n Applies this filter to the specified image.\n :param image:\n :return: True if filter passes. False otherwise.\n \"\"\"\n\n response = requests.put(self.api_url, data=image.get_jpeg())\n\n if response.status_code != 200:\n raise Exception(\"Backend ({}) for filtering with {} is returning a bad response!\".format(self.api_url,\n AgeEstimationFilter.__name__))\n\n response_json = json.loads(response.text)\n if 'Age_range' not in response_json:\n raise Exception(\"This filter does not understand backend's language. It may be a different version.\")\n\n age_range = AgeRange.from_string(response_json['Age_range'])\n\n return self._age_range_check_filter(age_range)\n\n def get_type(self):\n return Image\n\n\nFILTERS_PROTO[AgeEstimationFilter.__name__] = AgeEstimationFilter\n","sub_path":"main/filter/advanced/age_estimation_filter.py","file_name":"age_estimation_filter.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"43474397","text":"import flopy.modflow as mf\n\n\nclass NwtAdapter:\n _data = None\n\n def __init__(self, data):\n self._data = data\n\n def validate(self):\n # should be implemented\n # for key in content:\n # do something\n # return some hints\n pass\n\n def is_valid(self):\n # should be implemented\n # for key in content:\n # do something\n # return true or false\n return True\n\n def merge(self):\n default = self.default()\n for key in self._data:\n default[key] = self._data[key]\n return default\n\n def get_package(self, _mf):\n content = self.merge()\n return mf.ModflowNwt(\n _mf,\n **content\n )\n\n @staticmethod\n def default():\n default = {\n \"headtol\": 1E-2,\n \"fluxtol\": 500,\n \"maxiterout\": 100,\n \"thickfact\": 1E-5,\n \"linmeth\": 1,\n \"iprnwt\": 0,\n \"ibotav\": 0,\n \"options\": 'COMPLEX',\n \"Continue\": False,\n \"dbdtheta\": 0.4,\n \"dbdkappa\": 1.e-5,\n \"dbdgamma\": 0.,\n \"momfact\": 0.1,\n \"backflag\": 1,\n \"maxbackiter\": 50,\n \"backtol\": 1.1,\n \"backreduce\": 0.70,\n \"maxitinner\": 50,\n \"ilumethod\": 2,\n \"levfill\": 5,\n \"stoptol\": 1.e-10,\n \"msdr\": 15,\n \"iacl\": 2,\n \"norder\": 1,\n \"level\": 5,\n \"north\": 7,\n \"iredsys\": 0,\n \"rrctols\": 0.0,\n \"idroptol\": 1,\n \"epsrn\": 1.e-4,\n \"hclosexmd\": 1e-4,\n \"mxiterxmd\": 50,\n \"extension\": 'nwt',\n \"unitnumber\": 32\n }\n\n return default\n\n @staticmethod\n def read_package(package):\n content = {\n \"fluxtol\": package.fluxtol,\n \"maxiterout\": package.maxiterout,\n \"thickfact\": package.thickfact,\n \"linmeth\": package.linmeth,\n \"iprnwt\": package.iprnwt,\n # \"ibotavg\": package.ibotavg,\n \"options\": package.options,\n # \"continue\": package.continue,\n \"dbdtheta\": package.dbdtheta,\n \"dbdkappa\": package.dbdkappa,\n \"dbdgamma\": package.dbdgamma,\n \"momfact\": package.momfact,\n \"backflag\": package.backflag,\n \"maxbackiter\": package.maxbackiter,\n \"backtol\": package.backtol,\n \"backreduce\": package.backreduce,\n \"maxitinner\": package.maxitinner,\n \"ilumethod\": package.ilumethod,\n \"levfill\": package.levfill,\n \"stoptol\": package.stoptol,\n \"msdr\": package.msdr,\n \"iacl\": package.iacl,\n \"norder\": package.norder,\n \"level\": package.level,\n \"north\": package.north,\n \"iredsys\": package.iredsys,\n \"rrctols\": package.rrctols,\n \"idroptol\": package.idroptol,\n \"epsrn\": package.epsrn,\n \"hclosexmd\": package.hclosexmd,\n \"mxiterxmd\": package.mxiterxmd,\n \"extension\": package.extension[0],\n \"unitnumber\": package.unit_number[0]\n }\n return content\n","sub_path":"FlopyAdapter/MfPackages/NwtAdapter.py","file_name":"NwtAdapter.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"530095648","text":"import requests\nimport json\nimport pprint\n#import certifi\n#certifi.where()\n#'/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/site-packages/certifi/cacert.pem'\n\n#AutobuildId main_precheckin: 4869\n#AutobuildId 224_patch_precheckin: 28001 (main -2 precheckin, aka will this id remain the same?)\n#TestId main: testNapiliHomeAndLogin(ui.aura.components.selfservice.sites.NapiliPrecheckinTest): 70123236\n#TestId 224 patch: testNapiliHomeAndLogin(ui.aura.components.selfservice.sites.NapiliPrecheckinTest): 259252737\n#TestId main: testVerifyBuilderLoads(ui.aura.components.selfservice.sites.NapiliPrecheckinTest): 70197630\n\n\nclass PrecheckinAudit:\n\n # the api endpoint. we will format/merge the testId we wabt to audit and the runCount (how many runs we want to audit)\n apiEndPoint = 'https://lunadas.prod.ci.sfdc.net/api/results/v1/q/test-history?test-id={testId}&show-skipped-runs=true&load-all=false&start-index=0&row-count={runCount}'\n\n # initializer / instance attributes\n def __init__(self, testId, autoBuildName, runCount):\n \n if runCount == None:\n self.runCount = 50\n else:\n self.runCount = runCount\n\n self.url = PrecheckinAudit.apiEndPoint.format(testId=testId, runCount=runCount)\n self.autoBuildName = autoBuildName\n self.testId = testId\n self.successCount = 0\n self.failCount = 0\n self.runTimeList = []\n self.runTimesTotal = 0\n self.response = None\n self.precheckins = []\n self.failPrecheckins = []\n self.averageRunTime = 0;\n self.testName = ''\n\n\n def precheckinAudit(self):\n self.response = requests.get(self.url, verify=False)\n self.precheckins = json.loads(self.response.text)\n## print(self.response.text)\n\n # make it a function/method\n for precheckin in self.precheckins:\n runTimeStr = precheckin[\"running-time\"]\n\n # make it a function/method - strip the 's' from the end and turn it back to an int\n runTimeStrLen = len(runTimeStr)\n runTime = int(runTimeStr[:(runTimeStrLen - 1)])\n\n self.runTimesTotal += runTime\n\n self.runTimeList.append(runTime)\n if precheckin[\"status\"] == \"SUCCESS\":\n self.successCount += 1\n if precheckin[\"status\"] == \"FAILURE\":\n self.failCount += 1\n self.failPrecheckins.append({\n 'owner': precheckin[\"owner\"],\n 'changelist': precheckin['changelist'],\n 'running-time': precheckin['running-time'],\n 'uniqueTestName': precheckin['uniqueTestName']\n })\n\n self.averageRunTime = self.runTimesTotal / len(self.runTimeList)\n\n self.testName = ''\n if (len(self.precheckins) >= 1):\n self.testName = self.precheckins[0]['uniqueTestName']\n print(\"precheckin test: \" + self.autoBuildName + ': ' + self.testName)\n\n print(\"precheckin run count: \" + str(self.runCount))\n print(\"precheckin average runtime len: \" + str(self.averageRunTime)[:3])\n print(\"precheckin success count: \" + str(self.successCount))\n print(\"precheckin failure count: \" + str(self.failCount))\n\n\n #lFailPrecheckins = json.loads(failPrecheckins)\n #pp = pprint.PrettyPrinter()\n #pp.pprint(failPrecheckins)\n\n for failPrecheckin in self.failPrecheckins:\n print(\" **failure: \" + str(self.failPrecheckin))\n\n\nrunCount = 25\nautoBuildNameMain = 'main';\nautoBuildName224Patch = '224Patch';\n\ntestIdCommRunTimeMain = str(70123236)\ntestIdCommDesignTimeMain = str(70197630)\ntestIdCommRunTime224Patch = str(259252737)\ntestIdCommDesignTime224Patch = str(259252984)\n\ncommPrecheckinRuntimeMain = PrecheckinAudit(testIdCommRunTimeMain, autoBuildNameMain, runCount)\ncommPrecheckinRuntimeMain.precheckinAudit()\n\ncommPrecheckinDesignTimeMain = PrecheckinAudit(testIdCommDesignTimeMain, autoBuildNameMain, runCount)\ncommPrecheckinDesignTimeMain.precheckinAudit()\n\ncommPrecheckinRuntime224Patch = PrecheckinAudit(testIdCommRunTime224Patch, autoBuildName224Patch, runCount)\ncommPrecheckinRuntime224Patch.precheckinAudit()\n\ncommPrecheckinDesignTime224Patch = PrecheckinAudit(testIdCommDesignTime224Patch, autoBuildName224Patch, runCount)\ncommPrecheckinDesignTime224Patch.precheckinAudit()\n","sub_path":"sources/PrecheckinAudit.py","file_name":"PrecheckinAudit.py","file_ext":"py","file_size_in_byte":4376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"634343571","text":"#-*- coding: utf8 -*-\nimport numpy as np\nimport scipy.odr.odrpack as odrpack\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\nfrom os import listdir\nimport pandas as pd\nfrom uncertainties import ufloat\nimport uncertainties.unumpy as unp\nfrom uncertainties.umath import *\nimport uncertainties as uc\n\nimport sys\nsys.path.insert(0,\"../../scripts\")\nfrom myPlot import ShorthandFormatter\nimport fit_functions as ft\n#def quadratic(x,a,b,c):\n# return (a*x+b)*x+c\n\ndef get_data():\n T = np.array([353, 363, 373, 383, 393])\n p = np.array([0.4736, 0.7010, 1.0133, 1.4327, 1.9853])*100 #converting to kPa\n vd = np.array([3.410, 2.360, 1.674, 1.211, 0.892])\n return T,p,vd\n\ndef compute_l(T, popt_p, popt_v):\n vf = 0.001\n #f = ft.cubic\n #l = T*dpdt(T,popt_p)*(f(T,*popt_v)-vf)\n \n f = cubic\n l = T*dpdt(T,popt_p)*(f(popt_v,T)-vf)\n return l\n\ndef dpdt(x, popt):\n return x*(x*3*popt[0] + 2*popt[1]) + popt[2]\n\ndef fit_odr(f,x,y, beta_0):\n model = odrpack.Model(f)\n data = odrpack.RealData(x, y)\n myodr = odrpack.ODR(data, model, beta0=beta_0)\n output = myodr.run()\n output.pprint()\n return output\n\ndef cubic(B,x):\n #return B[0]*x*x*x + B[1]*x*x + B[2]*x + B[3]\n return ((B[0]*x+B[1])*x+B[2])*x+B[3]\n\ndef plot_pressure_curve():\n #f = ft.cubic\n f = cubic\n T_normal = 373.16\n T_labor = ufloat(371, 0.5)\n T, p, vd = get_data()\n #popt_p, cov_p = curve_fit(f,T,p)\n #popt_v, cov_v = curve_fit(f,T,vd)\n out_p = fit_odr(f,T,p, [1,1,1,1])\n out_v = fit_odr(f,T,vd, [1,1,1,1])\n popt_p, chi2_p = out_p.beta, out_p.res_var\n popt_v, chi2_v = out_v.beta, out_v.res_var\n\n p_labor = f(popt_p, T_labor)\n print (p_labor)\n l_normal = compute_l(T_normal, popt_p, popt_v)\n l_labor = compute_l(T_labor, popt_p, popt_v)\n fmtr = ShorthandFormatter()\n l_labor_str = fmtr.format('{0:.1u}', l_labor)\n #l_normal_str = \n\n fig, ax = plt.subplots()\n ax2 = ax.twinx()\n #ax2.plot(T,vd, 'g.')\n\n ax.plot(T,p, 'bo', label = 'Werte der Dampfdruckkurve vom Skript')\n x_fit = np.linspace(min(T)-20, max(T)+20)\n #ax.plot(x_fit, f(x_fit, *popt_p), 'r-', label = 'p(T), gefittet mit $y=ax^3+bx^2+cx+d$: \\n $a={:.4f},\\ b={:.2f}$,\\n $c={:.2f},\\ d={:.2f}$'.format(popt_p[0], popt_p[1], popt_p[2], popt_p[3]))\n ax.plot(x_fit, f(popt_p, x_fit), 'r-', label = 'p(T), gefittet mit $y=ax^3+bx^2+cx+d$: \\n $a={:.4f},\\ b={:.2f}$,\\n $c={:.2f},\\ d={:.2f}$'.format(popt_p[0], popt_p[1], popt_p[2], popt_p[3]))\n #ax.plot(x_fit, f(x_fit, *popt_p), 'r-', label = 'p(T), gefittet mit $y=ax^2+bx+c$: \\n $a={:.2f},\\ b={:.2f}$,\\n $c={:.2f}$'.format(popt_p[1], popt_p[2], popt_p[3]))\n #ax.plot(x_fit, f(x_fit, *popt_v), 'y-')\n ax2.plot(x_fit, dpdt(x_fit, popt_p), 'g-', label = r'$\\frac{dp}{dt}$, erhalten durch Ableiten von p(T)')\n \n ax2.plot([], [],' ', label = '$l_{{Labor}} = {}\\: J/(kg\\,K)$ bei $T=371.0(5)\\,K$, \\n $l_{{normal}} = {:.1f}\\: J/(kg\\,K)$ bei $T=373.16\\,K$'.format(l_labor_str, l_normal))\n ax.set_xlabel('Temperatur in K', fontsize = 18)\n ax.set_ylabel('Druck in kPa', fontsize = 18)\n ax.set_xbound(351, 395)\n ax.set_ybound(40, 210)\n ax.tick_params(labelsize = 18)\n\n ax2.set_ylabel(r'$\\frac{dp}{dt}\\ in\\ \\frac{kPa}{K}$', fontsize = 18, rotation = 90)\n ax2.tick_params(labelsize = 18)\n ax2.yaxis.labelpad = -5\n\n legend = ax.legend(fontsize = 13)\n legend.get_title().set_fontsize('14')\n ax2.legend(fontsize = 13, loc = 'center left')\n plt.tight_layout()\n plt.subplots_adjust(left = 0.14, right = 0.88, bottom = 0.13, top = 0.99)\n\n\nif __name__ == \"__main__\":\n plot_pressure_curve()\n plt.show()","sub_path":"Versuch9/scripts/clayperon.py","file_name":"clayperon.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"343435097","text":"#!/usr/bin/python\nimport json\nfrom pyfasta import Fasta\nfrom Bio import pairwise2\nfrom Bio.pairwise2 import format_alignment\n\nK12_name = {}\nK12_to_LT2 = {}\nLT2_name= {}\nwith open(\"K12_Ty2.csv\") as f:\n\tf.readline()\n\tfor line in f:\n\t\tline = line.strip()\n\t\tcell = line.split(\";\")\n\t\tu_k12 = cell[0].split(\",\")\n\t\tu_lt2 = cell[1].split(\",\")\n\t\tn_k12 = cell[2].split(\",\")\n\t\tn_lt2 = cell[3].split(\",\")\n\t\tfor n in n_k12:\n\t\t\tn = n.lower()\n\t\t\tif n not in K12_name:\n\t\t\t\tK12_name[n] = []\n\t\t\tfor u in u_k12:\n\t\t\t\tif u not in K12_name[n]:\n\t\t\t\t\tK12_name[n].append(u)\n\t\tfor n in n_lt2:\n\t\t\tn = n.lower()\n\t\t\tif n not in LT2_name:\n\t\t\t\tLT2_name[n] = []\n\t\t\tfor u in u_lt2:\n\t\t\t\tif u not in LT2_name[n]:\n\t\t\t\t\tLT2_name[n].append(u)\n\t\tfor u1 in u_k12:\n\t\t\tif u1 not in K12_to_LT2:\n\t\t\t\tK12_to_LT2[u1] = []\n\t\t\tfor u2 in u_lt2:\n\t\t\t\tif u2 not in K12_to_LT2[u1]:\n\t\t\t\t\tK12_to_LT2[u1].append(u2)\nTU_to_gene = {}\nwith open(\"TUSet.txt\") as f:\n\tfor line in f:\n\t\tif line[0] == \"#\":\n\t\t\tcontinue\n\t\tline = line.strip()\n\t\tcell = line.split(\"\\t\")\n\t\tTU_to_gene[cell[1]] = []\n\t\tfor n in cell[3].split(\",\"):\n\t\t\tn = n.lower()\n\t\t\tif n in K12_name:\n\t\t\t\tfor u in K12_name[n]:\n\t\t\t\t\tif u not in TU_to_gene[cell[1]]:\n\t\t\t\t\t\tTU_to_gene[cell[1]].append(u)\nUniprot_to_RefSeq = {}\nwith open(\"uniprot2refseq.tab\") as f:\n\tf.readline()\n\tfor line in f:\n\t\tline = line.strip()\n\t\tcell = line.split(\"\\t\")\n\t\tif cell[0] not in Uniprot_to_RefSeq:\n\t\t\tUniprot_to_RefSeq[cell[0]] = []\n\t\tif len(cell) > 1:\n\t\t\tfor rf in cell[1].split(\";\"):\n\t\t\t\tif rf != \"\":\n\t\t\t\t\trf = rf.split(\".\")[0]\n\t\t\t\t\tif rf not in Uniprot_to_RefSeq[cell[0]]:\n\t\t\t\t\t\tUniprot_to_RefSeq[cell[0]].append(rf)\n\n#MAPPING = json.load(open(\"MAPPIG.json\"))\n#PWMs = json.load(open(\"pwm.json\"))\nPROMOTER_up = Fasta(\"promoter_upstream.fasta\")\n\nTFBS = []\nwith open(\"BindingSiteSet.txt\") as f:\n\tfor line in f:\n\t\tif line[0] == \"#\":\n\t\t\tcontinue\n\t\tTF = []\n\t\tTARGET = []\n\t\tline = line.strip()\n\t\tcell = line.split(\"\\t\")\n\t\t#tf\n\t\tif cell[1].lower() not in K12_name:\n\t\t\tcontinue\n\t\tfor tf_u in K12_name[cell[1].lower()]:\n\t\t\tif tf_u in K12_to_LT2:\n\t\t\t\tfor u in K12_to_LT2[tf_u]:\n\t\t\t\t\tif u not in TF:\n\t\t\t\t\t\tTF.append(u)\n\t\t#target\n\t\tif cell[7] not in TU_to_gene:\n\t\t\tcontinue\n\t\tfor p in TU_to_gene[cell[7]]:\n\t\t\tif p in K12_to_LT2:\n\t\t\t\tfor u in K12_to_LT2[p]:\n\t\t\t\t\tif u not in TARGET:\n\t\t\t\t\t\tTARGET.append(u)\n\t\tif len(TF) == 0 or len(TARGET) == 0:\n\t\t\tcontinue\n\t\t####\n\t\tif len(cell) != 13:\n\t\t\tcontinue\n\t\tBindingSite = \"\"\n\t\tfor base in cell[11]:\n\t\t\tif base.isupper():\n\t\t\t\tBindingSite += base\n\t\t####\n\t\tfor T in TARGET:\n\t\t\tfor RefSeq in Uniprot_to_RefSeq[T]:\n\t\t\t\t\tif RefSeq in PROMOTER_up:\n\t\t\t\t\t\tif len(PROMOTER_up) > len(BindingSite):\n\t\t\t\t\t\t\tfor a in pairwise2.align.localms(str(PROMOTER_up[RefSeq]).lower(), BindingSite.lower(), 2, -2, -10, -1):\n\t\t\t\t\t\t\t\tif (a[4] - a[3]) > len(BindingSite)/2 and (a[2]/(a[4] - a[3])) >= 0.8:\n\t\t\t\t\t\t\t\t\tHIT = {}\n\t\t\t\t\t\t\t\t\tHIT['TF'] = TF\n\t\t\t\t\t\t\t\t\tHIT['TARGET'] = T\n\t\t\t\t\t\t\t\t\tHIT['tfbs'] = a[0][a[3]:a[4]].replace(\"-\", \"\")\n\t\t\t\t\t\t\t\t\tTFBS.append(HIT)\n\nwith open(\"TFBS_Ty2.csv\", \"w\") as f:\n\tf.write(\"tf;target;tfbs\\n\")\n\tfor t in TFBS:\n\t\tfor tt in t[\"TF\"]:\n\t\t\tf.write(\"%s;%s;%s\\n\" % (tt, t[\"TARGET\"], t[\"tfbs\"]) )","sub_path":"TFBSextrapolation.py","file_name":"TFBSextrapolation.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"401028066","text":"#!/usr/bin/env python3\n#author:Alnk(李成果)\nimport socket,hashlib,json,os,sys\n\nclass Client(object):\n '''ftp客户端'''\n\n def __init__(self):\n self.client = socket.socket()\n\n def __view_bar(self,received_size,filesize):\n '''进度条'''\n rate = received_size / filesize\n rate_num = int(rate * 100)\n r = '\\r%s>%d%%' % ('=' * rate_num, rate_num,)\n sys.stdout.write(r)\n sys.stdout.flush\n\n def __md5sum(self,filename,*args):\n '''用于获取文件的md5值'''\n if os.path.isfile(filename):\n f = open(filename,'rb')\n m = hashlib.md5()\n for line in f:\n m.update(line)\n filename_md5 = m.hexdigest()\n return filename_md5\n else:\n return\n\n def connect(self,ip,port):\n '''连接服务端'''\n self.client.connect((ip,port))\n\n def auth(self,name,password):\n '''认证,md5加密'''\n m1 = hashlib.md5()\n m2 = hashlib.md5()\n m1.update(name.encode('utf-8'))\n m2.update(password.encode('utf-8'))\n name_md5 = m1.hexdigest()\n password_md5 = m2.hexdigest()\n dic1 = {'action':'auth','name':name_md5,'password':password_md5,}\n self.client.send( json.dumps(dic1).encode('utf-8') )\n server_recive = self.client.recv(1024)\n if server_recive.decode() == '200':\n return True\n else:\n print(server_recive.decode())\n return False\n\n def help(self):\n msg = '''\n 只能输入如下命令哦\n get feilname\n put feilname\n cd dirname\n ls 后面只能接 . 或者目录名称\n login out\n '''\n print(msg)\n\n def put(self,*args):\n '''上传'''\n dic = args[0]\n #print('dic:',dic)\n filename = dic['name']\n print('dic:====',dic)\n if os.path.isfile(filename):\n filesize = os.stat(filename).st_size\n dic['size'] = filesize\n dic['md5'] = self.__md5sum(filename)\n\n m1 = hashlib.md5()\n m1.update( name.encode('utf-8') )\n user_name_md5 = m1.hexdigest()\n\n dic['user_name'] = user_name_md5\n print('send:',dic)\n self.client.send( json.dumps(dic).encode('utf-8') )\n # 防止粘包,接收服务器返回的状态参数\n server_response = json.loads( self.client.recv(1024).decode() )\n\n server_state = server_response['state']\n\n if server_state:\n\n #print('server_response:',server_response.decode())\n\n f = open(filename,'rb')\n received_size = 0\n for line in f:\n self.client.send(line)\n received_size += len(line)\n self.__view_bar(received_size,filesize)\n else:\n print('上传完成')\n f.close()\n\n else:\n print('文件太大了,不能上传')\n print('quota:',server_response['quota'])\n\n else:\n print('文件不存在!')\n\n def get(self,*args):\n '''下载'''\n DIR_PATH = '/home/%s/' % name\n dic = args[0]\n filename = dic['name']\n print('dic',dic)\n self.client.send( json.dumps(dic).encode('utf-8') )\n server_response_dic = json.loads( self.client.recv(1024).decode() )\n server_response_filesize = server_response_dic['size']\n server_response_md5 = server_response_dic['md5']\n state = server_response_dic['state']\n print('state:',state)\n #判断服务端是否有需要下载的文件名称\n if state:\n # 防止粘包\n self.client.send(str('True').encode('utf-8'))\n received_size = 0\n f = open( DIR_PATH + filename,'wb')\n while received_size < server_response_filesize:\n data = self.client.recv(1024)\n f.write(data)\n received_size += len(data)\n #进度条\n self.__view_bar(received_size,server_response_filesize)\n else:\n f.close()\n print('\\n[%s]文件下载完成!'%filename)\n client_md5 = self.__md5sum(filename)\n print('client_md5:',client_md5)\n print('server_md5:',server_response_md5)\n else:\n print('[%s]文件在服务器上不存在'%filename)\n\n def login(self,*args):\n self.client.close()\n sys.exit()\n\n def ls(self,*args):\n '''显示目录文件'''\n #print('name:',name)\n cmd_dic = args[0]\n dir_name = '/home/%s/' %name\n cmd_dic['name'] = dir_name + cmd_dic['name']\n print('cmd_dic:',cmd_dic)\n self.client.send( json.dumps( cmd_dic ).encode('utf-8') )\n server_data = self.client.recv(1024)\n server_data_dic = json.loads( server_data.decode() )\n total_size = server_data_dic['size']\n self.client.send( b'OK' )\n received_size = 0\n data_str = b''\n while received_size < total_size:\n data = self.client.recv(1024)\n data_str += data\n received_size += len( data )\n print( data_str.decode() )\n\n def cd(self,*args):\n '''切换目录'''\n print('cd')\n\n def view(self):\n '''视图菜单'''\n while True:\n cmd = input('输入命令>>>:').split()\n if len(cmd) > 1:\n cmd_dic = {'action':cmd[0],'name':cmd[1]}\n if hasattr(self,cmd[0]):\n func = getattr(self,cmd[0])\n func(cmd_dic)\n else:\n self.help()\n else:\n print('命令不正确')\n\nif __name__ == '__main__':\n IP,HOST = 'localhost',8889\n c = Client()\n c.connect(IP,HOST)\n name = 'alnk'\n password = '123456'\n tag = c.auth(name,password)\n if tag:\n c.view()\n else:\n print('认证失败')","sub_path":"day08/02作业/ftp_server/data/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"170620259","text":"from django.shortcuts import get_object_or_404\n\nfrom rest_framework import serializers\n\nfrom .models import Category, Comment, Genre, Review, Title\nfrom .validators import custom_slug_validation\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n slug = serializers.SlugField(required=False)\n\n class Meta:\n exclude = ('id', )\n model = Category\n\n def validate_slug(self, data):\n return custom_slug_validation(data, Category)\n\n\nclass GenreSerializer(serializers.ModelSerializer):\n slug = serializers.SlugField(required=False)\n\n class Meta:\n exclude = ('id', )\n model = Genre\n\n def validate_slug(self, data):\n return custom_slug_validation(data, Genre)\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n slug_field='username',\n read_only=True,\n default=serializers.CurrentUserDefault())\n\n class Meta:\n fields = ('id', 'author', 'text', 'pub_date')\n model = Comment\n\n\nclass TitleSerializer(serializers.ModelSerializer):\n year = serializers.IntegerField(required=False)\n rating = serializers.FloatField(read_only=True)\n description = serializers.CharField(required=False)\n\n genre = serializers.SlugRelatedField(many=True,\n slug_field='slug',\n queryset=Genre.objects.all())\n category = serializers.SlugRelatedField(slug_field='slug',\n queryset=Category.objects.all())\n\n class Meta:\n fields = (\n 'id',\n 'name',\n 'year',\n 'description',\n 'genre',\n 'category',\n 'rating',\n )\n model = Title\n\n def create(self, validated_data):\n genres = validated_data.pop('genre')\n title = Title.objects.create(**validated_data)\n for genre in genres:\n title.genre.add(genre)\n return title\n\n def to_representation(self, instance):\n representation = super(TitleSerializer,\n self).to_representation(instance)\n\n # Present genres in a readable way\n title_genres = Genre.objects.filter(slug__in=representation['genre'])\n representation['genre'] = title_genres.values('name', 'slug')\n\n # present category in a readable way\n category = get_object_or_404(Category, slug=representation['category'])\n\n title_category = {'name': category.name, 'slug': category.slug}\n representation['category'] = title_category\n\n return representation\n\n\nclass ReviewSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n slug_field='username',\n read_only=True,\n default=serializers.CurrentUserDefault())\n title = serializers.SlugRelatedField(\n slug_field='name',\n read_only=True,\n )\n\n class Meta:\n fields = '__all__'\n read_only_fields = ('author', 'title')\n model = Review\n\n def validate(self, data):\n current_user = self.context['request'].user\n if Review.objects.filter(\n title=self.context['title_id'], author=current_user\n ).exists() and self.context['request'].method == 'POST':\n raise serializers.ValidationError(\n 'Вы уже оставляли отзыв на это произведение')\n return data\n","sub_path":"api_media/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"482209818","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, set_template_name, msg, errmsg, site, tname\n\nfrom collections import defaultdict\n\ncoiner_count = defaultdict(set)\n\ndef count_coiners(index, pagename, text):\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagename, txt))\n def errandpagemsg(txt):\n errandmsg(\"Page %s %s: %s\" % (index, pagename, txt))\n\n if \"coin\" not in text:\n return\n\n pagemsg(\"Processing\")\n\n notes = []\n\n parsed = blib.parse_text(text)\n for t in parsed.filter_templates():\n tn = tname(t)\n if tn in [\"coin\", \"coinage\"]:\n lang = getparam(t, \"1\")\n coiner = getparam(t, \"2\")\n coiner_count[(lang, coiner)].add(pagename)\n pagemsg(\"Count for (%s, %s) is now %s\" % (lang, coiner, len(coiner_count[(lang, coiner)])))\n\ndef add_remove_nobycat(index, pagename, text):\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagename, txt))\n def errandpagemsg(txt):\n errandmsg(\"Page %s %s: %s\" % (index, pagename, txt))\n\n if \"coin\" not in text:\n return\n\n pagemsg(\"Processing\")\n\n notes = []\n\n parsed = blib.parse_text(text)\n for t in parsed.filter_templates():\n tn = tname(t)\n origt = str(t)\n if tn in [\"coin\", \"coinage\"]:\n lang = getparam(t, \"1\")\n coiner = getparam(t, \"2\")\n if len(coiner_count[(lang, coiner)]) == 1:\n if not getparam(t, \"nobycat\") and not getparam(t, \"nocat\"):\n t.add(\"nobycat\", \"1\")\n notes.append(\"add nobycat=1 to {{coinage|%s|%s}}\" % (lang, coiner))\n elif len(coiner_count[(lang, coiner)]) > 1:\n if getparam(t, \"nocat\"):\n pagemsg(\"WARNING: Lang %s, coiner %s has %s total words coined but has nocat=1: %s\" % (\n lang, coiner, len(coiner_count[(lang, coiner)]), str(t)))\n elif getparam(t, \"nobycat\"):\n rmparam(t, \"nobycat\")\n notes.append(\"remove nobycat= from {{coinage|%s|%s}}\" % (lang, coiner))\n if str(t) != origt:\n pagemsg(\"Replaced %s with %s\" % (origt, str(t)))\n\n return str(parsed), notes\n\nparser = blib.create_argparser(\"Add or remove nobycat= as necessary to/from {{coinage}}\", include_pagefile=True,\n include_stdin=True)\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nblib.do_pagefile_cats_refs(args, start, end, count_coiners, edit=True, stdin=True)\nblib.do_pagefile_cats_refs(args, start, end, add_remove_nobycat, edit=True, stdin=True)\n","sub_path":"add_coinage_nobycat.py","file_name":"add_coinage_nobycat.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"358418476","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 30 17:31:07 2021\n\n@author: stefa\n\"\"\"\nimport json\nimport sys\nimport multiprocessing\nimport time\nfrom s3run import S3RunCoordinator\nfrom jsonf import json_add_meta_sequence\nfrom datetime import datetime\n\ntry:\n file_tag = sys.argv[sys.argv.index('--tag') + 1]\nexcept ValueError:\n file_tag = 'test_haz_output'\n\nMAX_REASONABLE_AGG_TIME = 3600 # if it's been more than MAX_REASONABLE_AGG_TIME seconds since the start mark was placed, an aggregator probably stopped partway through\nSTART_MARK = 'aggregator_start_mark.json'\nEND_MARK = 'aggregator_end_mark.json'\n\ndef agg_all_hazard_fragments(hazard_folder):\n print(f'called on {hazard_folder}')\n s3_coord = S3RunCoordinator() # use default bucket\n s3_coord.cd_down('variants')\n with s3_coord.in_folder(hazard_folder):\n for fragment_folder in s3_coord.list_folder():\n with s3_coord.in_folder(fragment_folder):\n if not s3_coord.key_exists('helper_excluded_variants'):\n continue # TODO: remove because right now this skips all fragments with no helper results. (turn this into a warning instead of a skip?)\n if s3_coord.key_exists(END_MARK):\n print(f'{END_MARK} exists; skipping {s3_coord.cwf()}')\n continue # this folder was already finished by a different aggregator\n start_time = time.time()\n if s3_coord.key_exists(START_MARK):\n start_mark_json = s3_coord.get_obj_as_json(START_MARK)\n if start_mark_json['agg_start_time'] + MAX_REASONABLE_AGG_TIME > start_time:\n print(f'{START_MARK} exists and was created less than the max time ({MAX_REASONABLE_AGG_TIME}s) ago; skipping {s3_coord.cwf()}')\n continue # another aggregator is probably working on this folder right now\n else:\n print(f'{START_MARK} exists, but it was created more than the max time ({MAX_REASONABLE_AGG_TIME}s) ago; restarting aggregator job.')\n s3_coord.save_json_obj({'agg_start_time':start_time}, START_MARK)\n print(f'created {START_MARK}. starting to aggregate {s3_coord.cwf()}')\n variants_fragset = s3_coord.get_obj_as_json('variants.fragset.json')\n removed_variants = []\n variants_fragset['meta']['removed_variants'] = removed_variants\n if s3_coord.key_exists('helper_excluded_variants'):\n with s3_coord.in_folder('helper_excluded_variants'):\n for exclusions_key in s3_coord.list_folder():\n exclusion_list = s3_coord.get_obj_as_json(exclusions_key)\n for variant in exclusion_list:\n try:\n variants_fragset['sequences'].remove(variant)\n removed_variants.append(variant)\n except ValueError:\n pass # happens when multiple helpers excluded the same variant\n # keep metadata\n # if helper_excluded_variants is there, helper_meta_updates should also be there\n with s3_coord.in_folder('helper_meta_updates'):\n for meta_updates_key in s3_coord.list_folder():\n meta_updates = s3_coord.get_obj_as_json(meta_updates_key)\n for variant, seq_meta in meta_updates.items():\n json_add_meta_sequence(variants_fragset, variant, seq_meta)\n end_time = time.time()\n end_mark_json = {'agg_end_datetime':str(datetime.now()), 'agg_end_time':end_time, 'total_agg_time_mins':(end_time - start_time)/60}\n variants_fragset['meta'].update(end_mark_json)\n with s3_coord.in_folder('aggregated', from_run_root=True):\n s3_coord.save_json_obj(variants_fragset, f'agg_{hazard_folder[:-1]}_{fragment_folder[:-1]}.fragset.json')\n s3_coord.save_json_obj(end_mark_json, END_MARK)\n print(f'created {END_MARK}. finished aggregating {s3_coord.cwf()}')\n\nif __name__ == '__main__':\n cpus = multiprocessing.cpu_count()\n print(f'running on {cpus} cores')\n s3_coord = S3RunCoordinator() # use default bucket\n picks_fragset = s3_coord.get_obj_as_json('fragment_picks.fragset.json')\n hazard_folder_names = list(haz_props['path_name'] for haz_props in picks_fragset['meta']['hazard_properties'].values())\n print(hazard_folder_names)\n with multiprocessing.Pool(processes=cpus) as pool:\n list(pool.imap_unordered(agg_all_hazard_fragments, hazard_folder_names))\n\n#exit() #TODO: things after this do not execute at this time\n\n#directory = [obj.key for obj in resource_bucket.objects.all() if file_tag in obj.key and 'fragset' in obj.key and 'meta' not in obj.key]\n#\n#print(\"printing directory lengths\")\n#print(len(directory))\n#print(len(meta_directory))\n##fi = directory[0]\n#aggregated_seqs_dict= {'sequences' : [], 'sequence_meta' : {}, 'meta' : {}}\n#\n#for fi in directory:\n# frag_picks_file = boto3.resource('s3').Object('sdna-resources', fi).get()['Body'].read().decode('utf-8')\n# active_seq_dict = json.loads(frag_picks_file)\n# aggregated_seqs_dict['sequences'].extend(active_seq_dict['sequences'])\n# aggregated_seqs_dict['sequence_meta'].update(active_seq_dict['sequence_meta'])\n# index = [x for x in range(len(directory)) if directory[x]==fi]\n# print(\"Now on number \"+ str(index[0]))\n#\n##frag_picks_file = boto3.resource('s3').Object('sdna-resources', fi).get()['Body'].read().decode('utf-8')\n#\n##active_list = json.loads(frag_picks_file)\n#\n#s3object = boto3.resource('s3').Object('sdna-resources', output_path)\n#s3object.put(Body=(bytes(json.dumps(aggregated_seqs_dict).encode('utf-8'))))\n","sub_path":"aggregate_variants.py","file_name":"aggregate_variants.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"206314666","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 26 17:02:50 2020\n\n@author: DH1074TX\n\"\"\"\n\n#%%\nimport re\nfile = open(\"file.txt\",\"r\")\n\nl = file.read()\nl = re.sub(\"[:,.?()#$]\",\"\",l).split()\n\nd = {}\nfor i in l:\n if i in d.keys():\n d[i] = d[i]+1\n else:\n d[i] = 1\nfor k,v in d.items():\n print(k , \" \",v,\" times\" )\n\nfile.close()\n\n\n# %%","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"603365219","text":"def my_first_func():\n \"\"\"This is my first function on Python\"\"\"\n# import sys\n string = input('Enter the string:')\n symbol = input('Enter the symbol:')\n for char in string:\n if char == symbol:\n print('{}: {}'.format(symbol, string.count(symbol)))\n break\n# sys.exit() # выход из программы\n if not string.count(symbol):\n print('No such a symbol')\n\n## Функция с входными параметрами с хинтингом(подсказка типа)\ndef my_second_func(string:str, symbol:str) -> int:\n \"\"\"This is my first function on Python\"\"\"\n for char in string:\n if char == symbol:\n return string.count(char)\n return \"No found\"\n \n\ndef main():\n my_first_func()\n a = input(\"Enter a string: \")\n b = input(\"Enter a char: \")\n print('Result of second function: ', my_second_func(a, b))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Module4/examples/module_func_ex.py","file_name":"module_func_ex.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"499774865","text":"# coding=utf-8\n\"\"\"Extract words from strings.\"\"\"\n\nimport shelve\nfrom argparse import ArgumentParser\nfrom pkg_resources import resource_filename, resource_exists\n\n\ndef conditional_perms(s, r, cond=lambda s: True, pre=\"\"):\n \"\"\"Generate r-length permutations of s ignoring branches that don't\n satisfy the condition.\n\n The condition is only applied on prefixes, and not the permutations.\n \"\"\"\n if r == 0:\n # The empty string is the only 0-length permutation.\n yield \"\"\n return\n if r > len(s) or (pre != \"\" and not cond(pre)):\n # Don't generate any permutations.\n return\n for i in range(len(s)):\n # Recursively generate permutations with a different starting point.\n start = s[i]\n subs = s[:i] + s[i + 1:]\n for perm in conditional_perms(subs, r - 1, cond, pre + start):\n yield start + perm\n\n\nclass WordFinder:\n\n \"\"\"Wrapper around the find_words method.\"\"\"\n\n def __init__(self):\n # Check if the databases exist.\n if (not resource_exists(__name__, \"data/prefixes.db\") or\n not resource_exists(__name__, \"data/words.db\")):\n raise RuntimeError(\"can't find databases\")\n\n # Generate the shelve objects. The '.db' suffix needs to be removed\n # from the filenames.\n self._prefix_db = shelve.open(resource_filename(__name__,\n \"data/prefixes.db\")[:-3])\n self._word_db = shelve.open(resource_filename(__name__,\n \"data/words.db\")[:-3])\n\n def find_words(self, s, r):\n \"\"\"Find r-length words from the given string.\"\"\"\n seen = set()\n # Only prefixes of length at least 2 are stored; so the condition\n # needs to exclude them.\n for perm in conditional_perms(s, r,\n lambda x: len(x) < 2 or x in self._prefix_db):\n if perm in self._word_db and perm not in seen:\n seen.add(perm)\n yield perm\n\n\ndef main():\n # Parse command line arguments.\n arg_parser = ArgumentParser(description=\"Extract words from a string.\")\n arg_parser.add_argument(\n \"-s\", \"--string\",\n type=str,\n metavar=\"str\",\n required=True,\n help=\"extract words from given string\"\n )\n arg_parser.add_argument(\n \"-r\", \"--size\",\n type=int,\n metavar=\"num\",\n required=True,\n help=\"extract words of given size\"\n )\n args = arg_parser.parse_args()\n\n word_finder = WordFinder()\n for word in word_finder.find_words(args.string.lower(), args.size):\n print(word)\n","sub_path":"wordfinder/wordfinder.py","file_name":"wordfinder.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"56551753","text":"\"\"\"Restaurant rating lister.\"\"\"\nFILE_NAME = \"scores.txt\"\n\ndef restaurant_rating(FILE_NAME):\n #this returns a list from user input\n user_rating = user_input()\n\n #this combs through the FILE_NAME and add items to a dictionary\n rest_data = file_to_dict(FILE_NAME)\n\n #adding user input to the restaurant data dictionary\n user_input_name, user_input_score = user_rating\n rest_data[user_input_name] = user_input_score\n\n sorted_rest_data = sorted((rest_data.items()))\n for restaurant in sorted_rest_data:\n restaurant_name = restaurant[0]\n restaurant_rating = restaurant[1]\n\n print(f\"{restaurant_name} is rated at {restaurant_rating}\")\n\ndef file_to_dict(filename):\n new_dict = {}\n with open(filename) as file:\n for line in file:\n key, value = line.rstrip().split(\":\")\n new_dict[key] = value\n\n return new_dict\n\n\ndef user_input():\n ask_for_input = False\n while not ask_for_input:\n user_name = input(\"Enter restaurant name:\").title()\n user_score = int(input(\"Enter score:\"))\n if user_score < 1 or user_score > 5:\n print(\"Not a valid input, please enter again\")\n else:\n ask_for_input = True\n return [user_name,user_score]\n\nrestaurant_rating(FILE_NAME)","sub_path":"ratings.py","file_name":"ratings.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"446821164","text":"# -*- coding: utf-8 -*-\nfrom ecore import models, fields, api\n\nclass inter_company_rules_configuration(models.TransientModel):\n\n _inherit = 'base.config.settings'\n\n company_id = fields.Many2one('res.company', string='Select Company',\n help='Select company to setup Inter company rules.')\n rule_type = fields.Selection([('so_and_po', 'SO and PO setting for inter company'),\n ('invoice_and_refunds', 'Create Invoice/Refunds when encoding invoice/refunds')],\n help='Select the type to setup inter company rules in selected company.')\n so_from_po = fields.Boolean(string='Create Sale Orders when buying to this company',\n help='Generate a Sale Order when a Purchase Order with this company as vendor is created.')\n po_from_so = fields.Boolean(string='Create Purchase Orders when selling to this company',\n help='Generate a Purchase Order when a Sale Order with this company as customer is created.')\n auto_validation = fields.Boolean(string='Sale/Purchase Orders Auto Validation',\n help='''When a Sale Order or a Purchase Order is created by a multi\n company rule for this company, it will automatically validate it.''')\n warehouse_id = fields.Many2one('stock.warehouse', string='Warehouse For Purchase Orders',\n help='Default value to set on Purchase Orders that will be created based on Sale Orders made to this company.')\n\n @api.onchange('rule_type')\n def onchange_rule_type(self):\n if self.rule_type == 'invoice_and_refunds':\n self.so_from_po = False\n self.po_from_so = False\n self.auto_validation = False\n\n elif self.rule_type == 'so_and_po':\n self.invoice_and_refunds = False\n\n @api.onchange('company_id')\n def onchange_company_id(self):\n if self.company_id:\n rule_type = False\n if self.company_id.so_from_po or self.company_id.po_from_so or self.company_id.auto_validation:\n rule_type = 'so_and_po'\n elif self.company_id.auto_generate_invoices:\n rule_type = 'invoice_and_refunds'\n\n self.rule_type = rule_type\n self.so_from_po = self.company_id.so_from_po\n self.po_from_so = self.company_id.po_from_so\n self.auto_validation = self.company_id.auto_validation\n self.warehouse_id = self.company_id.warehouse_id.id\n\n @api.multi\n def set_inter_company_configuration(self):\n if self.company_id:\n vals = {\n 'so_from_po': self.so_from_po,\n 'po_from_so': self.po_from_so,\n 'auto_validation': self.auto_validation,\n 'auto_generate_invoices': True if self.rule_type == 'invoice_and_refunds' else False,\n 'warehouse_id': self.warehouse_id.id\n }\n self.company_id.write(vals)\n","sub_path":"ecore-server/ecore/addons/inter_company_rules/models/res_config.py","file_name":"res_config.py","file_ext":"py","file_size_in_byte":2861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"343656328","text":"##############################################################################\n#\n# OSIS stands for Open Student Information System. It's an application\n# designed to manage the core business of higher education institutions,\n# such as universities, faculties, institutes and professional schools.\n# The core business involves the administration of students, teachers,\n# courses, programs and so on.\n#\n# Copyright (C) 2015-2018 Université catholique de Louvain (http://www.uclouvain.be)\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# A copy of this license - GNU General Public License - is available\n# at the root of the source code of this program. If not,\n# see http://www.gnu.org/licenses/.\n#\n##############################################################################\n\nfrom base.models.entity import Entity\nfrom base.models.entity_version import EntityVersion\n\n\ndef get_entities_ids(entity_acronym, with_entity_subordinated):\n if entity_acronym:\n entity_versions = EntityVersion.objects.filter(acronym__iregex=entity_acronym)\n entities_ids = set(entity_versions.values_list('entity', flat=True))\n\n if with_entity_subordinated:\n list_descendants = EntityVersion.objects.get_tree(\n Entity.objects.filter(entityversion__acronym__iregex=entity_acronym)\n )\n entities_ids |= {row[\"entity_id\"] for row in list_descendants}\n return list(entities_ids)\n return []\n","sub_path":"base/business/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"67"} +{"seq_id":"15754708","text":"import sys, os\nimport logging\n\nfrom label_studio_ml.model import LabelStudioMLBase\nfrom label_studio.core.settings.base import DATA_UNDEFINED_NAME\n\nsys.path.append('../src/')\nimport video\nimport audio\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass HugASR(LabelStudioMLBase):\n\n def __init__(self, **kwargs):\n super(HugASR, self).__init__(**kwargs)\n\n # Find TextArea control tag and bind ASR model to it\n self.from_name, self.to_name, self.value = self._bind_to_textarea()\n\n model, processor = audio.get_model_and_processor()\n self.model = model\n self.processor = processor\n\n def predict(self, tasks, **kwargs):\n output = []\n audio_paths = []\n transcriptions = []\n for task in tasks:\n audio_url = task['data'].get(self.value) or task['data'].get(DATA_UNDEFINED_NAME)\n audio_path = self.get_local_path(audio_url)\n print(audio_path)\n print(audio_url)\n audio_paths.append(audio_path)\n transcriptions.append(\n audio.predict_audio(\n self.model,\n self.processor,\n audio_path\n )\n )\n\n for transcription in transcriptions:\n output.append({\n 'result': [{\n 'from_name': self.from_name,\n 'to_name': self.to_name,\n 'type': 'textarea',\n 'value': {\n 'text': [transcription]\n }\n }],\n 'score': 1.0\n })\n return output\n\n def _bind_to_textarea(self):\n from_name, to_name, value = None, None, None\n for tag_name, tag_info in self.parsed_label_config.items():\n if tag_info['type'] == 'TextArea':\n from_name = tag_name\n if len(tag_info['inputs']) > 1:\n logger.warning(\n 'ASR model works with single Audio or AudioPlus input, '\n 'but {0} found: {1}. We\\'ll use only the first one'.format(\n len(tag_info['inputs']), ', '.join(tag_info['to_name'])))\n if tag_info['inputs'][0]['type'] not in ('Audio', 'AudioPlus'):\n raise ValueError('{0} tag expected to be of type Audio or AudioPlus, but type {1} found'.format(\n tag_info['to_name'][0], tag_info['inputs'][0]['type']))\n to_name = tag_info['to_name'][0]\n value = tag_info['inputs'][0]['value']\n if from_name is None:\n raise ValueError('ASR model expects