diff --git "a/2383.jsonl" "b/2383.jsonl" new file mode 100644--- /dev/null +++ "b/2383.jsonl" @@ -0,0 +1,664 @@ +{"seq_id":"201472125","text":"'''\nUsuário escolhe um número inteiro entre 0 e 10. Programa também escolherá um \nnúmero nesse intervalo. Usuário informa se acha que a soma desses dois valores \nserá par ou ímpar. O programa é interrompido quando o usuário perder, mostrando \nao final o total de vitórias.\n'''\n\nfrom random import randint\n\nprint('Jogo de par ou ímpar')\nprint()\ni = 0\nwhile True:\n jogador = int(input('Digite um número inteiro [0 a 10]: '))\n computador = randint(0,10)\n total = jogador + computador\n if total % 2 == 0:\n resposta = 'p'\n else:\n resposta = 'i'\n escolha = ' '\n while escolha not in 'pi':\n escolha = input('Par [p] ou Ímpar [i] ? ').lower().strip()[0]\n print()\n print(f'O computador escolheu: {computador}')\n print(f'Soma: {total}')\n if resposta == escolha:\n print('Você venceu!')\n i += 1\n else:\n print('você perdeu...')\n print()\n break\n print()\nprint(f'Total de vitórias: {i}')\nprint()\n","sub_path":"exercicio_py/ex0053_jogo_par_impar.py","file_name":"ex0053_jogo_par_impar.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"642292393","text":"# The MIT License (MIT)\n#\n# Copyright (c) 2019 Seon \"Unexpected Maker\" Rozenblum\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\n`tinypico-playshield-snake` - Snake Game for the TinyPICO Play Shield\n=====================================================================\n* Author(s): Seon Rozenblum\n\"\"\"\n\n__version__ = \"0.0.0-auto.0\"\n__repo__ = \"https://github.com/unexpectedmaker/tinypico\"\n\nfrom machine import I2C, Pin, Timer, PWM\nimport tinypico as TinyPICO\nimport time, random, ssd1306, framebuf, bitmaps, math, notes\nfrom micropython import const\n\n# Turn off the power to the DotStar\nTinyPICO.set_dotstar_power( False )\n\nclass Snake:\n\n def reset(self, x, y, len, dir):\n self._moves = 0\n self._dead = False\n self._length = len\n self._dir = 0\n self._speed = 0.12\n self._score = 0\n self._fruit = []\n\n # set snake head position\n self._list = [ [x,y] ]\n # dynamically create snake body based on starting position\n for i in range( self._length-1 ):\n\n if self._dir == 0:\n y += 2\n elif self._dir == 1:\n x -= 2\n elif self._dir == 2:\n y -= 2\n elif self._dir == 3:\n x += 2\n \n self._list.append( [x,y] )\n \n self.add_fruit()\n\n def __init__(self, x, y, len, dir):\n self.reset( x, y, len, dir )\n\n def set_dir(self, dir):\n # Chnage directiom\n self._dir += dir\n\n # Wrap direction\n if self._dir < 0:\n self._dir = 3\n elif self._dir > 3:\n self. _dir = 0\n\n def move(self):\n # Increase snake length every 10 moves\n # self._moves += 1\n # if self._moves == 10:\n # self._moves = 0\n # self._length += 1\n\n remove_tail = [0,0,0,0]\n\n if len( self._list ) == self._length:\n x,y = self._list[ self._length-1 ]\n remove_tail[0] = x\n remove_tail[1] = y\n del self._list[ self._length-1 ]\n\n # Grab the x,y of the head\n x, y = self._list[0]\n\n # move the head based on the current direction\n if self._dir == 0:\n y -= 2\n elif self._dir == 1:\n x += 2\n elif self._dir == 2:\n y += 2\n elif self._dir == 3:\n x -= 2\n\n # Did we hit the outer bounds of the level?\n hit_bounds = x < 1 or y < 1 or x > 125 or y > 61\n\n # Is the x,y position already in the list? If so, we hit ourselves and died - we also died if we hit the edge of the level \n self._dead = self._list.count( [x,y] ) > 0 or hit_bounds\n\n # Add the next position as the head of the snake\n self._list.insert( 0, [x,y] )\n\n # Did we eat any fruit?\n for f in self._fruit:\n fx,fy = f\n\n if x >= fx-2 and x <= fx+1 and y >= fy-2 and y <= fy+1:\n remove_tail[2] = fx\n remove_tail[3] = fy\n self.eat_food()\n self._fruit.remove( f )\n self.add_fruit()\n\n return remove_tail\n\n def is_dead(self):\n return self._dead\n\n def get_positions(self):\n return self._list\n\n def get_speed(self):\n return self._speed\n\n def get_score(self):\n return self._score\n\n def eat_food(self):\n self._score += 1\n self._length += 2\n # reduce the speed time delay, burt clamped between 0.05 and 0.12\n self._speed = max(0.01, min( self._speed - 0.01, 0.12))\n\n # print(\"Score {}, Speed {}\".format( self._score, self._speed))\n\n def add_fruit(self):\n x = random.randrange(2,60) * 2\n y = random.randrange(2,30) * 2\n self._fruit.append( (x,y) )\n\n def get_fruit_positions(self):\n return self._fruit\n\n\n# Globals\n\ngame_state = -1 #0 = menu, 1 = playing, 2 = pause, 3 = gameover\ngame_state_changed = False\nfruit_interval = 10\nfruit_next = 0\n\n\n# Sound\ndef play_boot_music():\n speaker = PWM(Pin(25), freq=20000, duty=512)\n boot_sound = [notes.D4, 0, notes.G4, 0, notes.D4, 0, notes.A4, 0]\n for i in boot_sound:\n if i == 0:\n speaker.freq(0)\n time.sleep_ms(50)\n pass\n else:\n speaker.freq(i)\n time.sleep_ms(250)\n\n speaker.freq(0)\n speaker.deinit()\n\ndef play_death():\n speaker = PWM(Pin(25), freq=20000, duty=512)\n speaker.freq(notes.D4)\n time.sleep_ms(200)\n speaker.freq(0)\n time.sleep_ms(25)\n speaker.freq(notes.A2)\n time.sleep_ms(400)\n speaker.freq(0)\n speaker.deinit()\n\ndef play_sound( note, duration ):\n speaker = PWM(Pin(25), freq=20000, duty=512)\n speaker.freq(note)\n time.sleep_ms(duration)\n speaker.freq(0)\n speaker.deinit()\n\n# Create an instance of Snake\nsnake = Snake( x=62, y=30, len=6, dir=0 )\n\ndef switch_state( new_state ):\n global game_state, game_state_changed\n if game_state == new_state:\n pass\n else:\n game_state = new_state\n game_state_changed = True\n\ndef player_turn(dir):\n global snake\n snake.set_dir(dir)\n\n# Helpers\n\ndef text_horiz_centred(fb, text, y, char_width=8):\n fb.text(text, (fb.width - len(text) * char_width) // 2, y)\n\n# Buttons\nBUT_1 = Pin(26, Pin.IN )\nBUT_2 = Pin(27, Pin.IN )\nBUT_3 = Pin(15, Pin.IN )\nBUT_4 = Pin(14, Pin.IN )\n\nlast_button_press_time = 0\n\ndef process_button_1():\n if game_state == 1:\n player_turn(1)\n\ndef process_button_2():\n if game_state == 0:\n switch_state(1)\n elif game_state == 3:\n switch_state(0)\n\ndef process_button_3():\n if game_state == 1:\n player_turn(-1)\n\ndef process_button_4():\n print(\"Pressed Button 4\")\n\nbutton_handlers = { str(BUT_1): process_button_1, str(BUT_2): process_button_2, str(BUT_3):process_button_3, str(BUT_4): process_button_4 }\n\ndef button_press_callback(pin):\n global last_button_press_time\n # block button press as software debounce\n if last_button_press_time < time.ticks_ms():\n \n # add 150ms delay between button presses... might be too much, we'll see!\n last_button_press_time = time.ticks_ms() + 150\n\n # If the pin is in the callback handler dictionary, call the appropriate function \n if str(pin) in button_handlers:\n button_handlers[str(pin)]()\n # else:\n # # print a debug message if button presses were too quick or a dounce happened\n # print(\"Button Bounce - {}ms\".format( ( last_button_press_time - time.ticks_ms() ) ) )\n\n# Create all of the triggers for each button pointing to the single callback handler\nBUT_1.irq(trigger=Pin.IRQ_FALLING, handler=button_press_callback)\nBUT_2.irq(trigger=Pin.IRQ_FALLING, handler=button_press_callback)\nBUT_3.irq(trigger=Pin.IRQ_FALLING, handler=button_press_callback)\nBUT_4.irq(trigger=Pin.IRQ_FALLING, handler=button_press_callback)\n\n# create timer for flashing UI\nflasher = Timer(0)\nflash_state = False\ndef flasher_update(timer):\n global flash_state\n flash_state = not flash_state\n\nflasher.init(period=500, mode=Timer.PERIODIC, callback=flasher_update)\n\ndef flash_text(x,y,text):\n global flash_state\n if flash_state:\n oled.text(text, x, y, 2)\n else:\n oled.fill_rect( 1, y, 126, 12, 0)\n\n\n# Begin\n\n# Turn off the power to the DotStar\nTinyPICO.set_dotstar_power( False )\n\n# Configure I2C for controlling anything on the I2C bus\n# Software I2C only for this example but the next version of MicroPython for the ESP32 supports hardware I2C too\ni2c = I2C(scl=Pin(22), sda=Pin(21))\n\n# Initialise the OLED screen\noled = ssd1306.SSD1306_I2C(128, 64, i2c)\n\n# Add the TP logo to a frameBuf buffer and show it for 2 seconds\nfbuf = framebuf.FrameBuffer(bytearray(bitmaps.icon_tinypico), 128, 30, framebuf.MONO_HLSB)\noled.blit(fbuf, 0, 2)\ntext_horiz_centred( oled, \"PLAY SHIELD\", 35)\ntext_horiz_centred( oled, \"INTEL NOT INSIDE\", 50)\n\noled.show()\nplay_boot_music()\ntime.sleep(2)\n\n# show the menu on start\nswitch_state(0)\n\ndef show_menu():\n # clear the display\n oled.fill(0)\n # Show welcome message\n text_horiz_centred( oled, \"TINY SNAKE\", 0 )\n text_horiz_centred( oled, \"3-Left 1-Right\", 50 )\n oled.line(0, 12, 127, 12,1 )\n # oled.text(\"TINY SNAKE\", 25, 0, 2)\n # oled.text(\"3-Left 1-Right\", 4, 50, 2)\n oled.show()\n\ndef draw_snake():\n global snake, fruit_next, fruit_interval\n # Move the snake and return if we need to clear the tail or if the snake grew\n result = snake.move()\n\n # The snake tail position is stored in result index 0,1 if it needs to be removed\n # If x or y are > 0 then we reove that pos from the screen\n if result[0] > 0 or result[1] > 0:\n oled.fill_rect(result[0], result[1], 2, 2, 0)\n\n # The last eaten fruit position is stored in indexs 2,3 if it needs to be removed\n # If x or y are > 0 then we reove that pos from the screen\n if result[2] > 0 or result[3] > 0:\n oled.fill_rect(result[2]-1, result[3]-1, 3, 3, 0)\n play_sound(notes.C4,100)\n\n # Go through the snake positions and draw them\n for pos in snake.get_positions():\n oled.fill_rect(pos[0], pos[1], 2, 2, 1)\n\n # Redraw all fruit\n for pos in snake.get_fruit_positions():\n oled.fill_rect(pos[0]-1, pos[1]-1, 3, 3, 1)\n\n # Update the OLED\n oled.show()\n time.sleep( snake.get_speed() )\n\n # If the snake died in that move, end the game\n if snake.is_dead():\n play_death()\n switch_state( 3 )\n\ndef setup_new_game():\n oled.fill(0)\n oled.rect(0, 0, 128, 64, 1)\n # oled.rect(1, 1, 127, 63, 1)\n oled.show()\n\n #reset variables\n global snake, fruit_next, fruit_interval\n snake.reset( x=62, y=30, len=3, dir=0 )\n\n fruit_next = time.time() + fruit_interval\n\n draw_snake()\n\ndef show_gameover():\n global snake\n oled.fill(0)\n text_horiz_centred( oled, \"YOU SCORED \" + str( snake.get_score() ), 10 )\n text_horiz_centred( oled, \"2 - Continue\", 50 )\n # oled.text(\"YOU SCORED \" + str( snake.get_score() ), 10, 10, 2)\n # oled.text(\"2 - Continue\", 15, 50, 2)\n oled.show()\n\n\n\nwhile True:\n if game_state_changed:\n game_state_changed = False\n\n if game_state == 0:\n show_menu()\n elif game_state == 1:\n setup_new_game()\n elif game_state == 3:\n show_gameover()\n\n # menu\n if game_state == 0:\n flash_text( 0, 30, \"Press 2 to start\")\n oled.show()\n time.sleep(.001)\n\n elif game_state == 1:\n draw_snake()\n\n elif game_state == 3:\n flash_text( 24, 30, \"GAME OVER\")\n oled.show()\n time.sleep(.001)\n\n \n \n\n\n\n\n\n","sub_path":"play shield examples/tiny-snake/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"429051494","text":"import re\n\nfrom .bot import bot\nfrom ..constants import (\n COULD_NOT_UPDATE_BOT_INFORMATION,\n SENT_TO_TELEGRAM_CHANNEL,\n TELEGRAM_CFPLAND_CHANNEL,\n)\nfrom ..logger import logger\nfrom ..models import CFP\n\n\ndef telegram_bot(event, context):\n lambda_logger = logger.bind(lambda_event=event, lambda_context=vars(context))\n body = event.get('body')\n\n if event.get('httpMethod') == 'POST' and body:\n try:\n bot.update(body)\n except AttributeError as exception:\n lambda_logger.exception(\n {\n 'description': COULD_NOT_UPDATE_BOT_INFORMATION,\n 'exception': exception,\n },\n code=COULD_NOT_UPDATE_BOT_INFORMATION, exc_info=True,\n )\n\n return bot.ok_response()\n\n lambda_logger.info({\n 'message_received': bot.message_received,\n 'chat_id': bot.chat_id,\n })\n\n if bot.message_received == '/start':\n bot.send_start_message(bot.chat_id)\n\n latest_category_match = re.search(r'^(/latest) (\\w+)$', bot.message_received)\n if latest_category_match:\n category = latest_category_match.groups()[-1].lower()\n\n cfps = CFP.get_latest_by_category(category)\n if cfps:\n for cfp in cfps:\n message = bot.format_cfp(cfp)\n bot.send_message(bot.chat_id, message)\n else:\n message = (\n f'Sorry, I could not find any {category} CFPs 😔\\n\\n'\n f'You can find all the available categories with the /categories command!'\n )\n bot.send_message(bot.chat_id, message)\n\n if bot.message_received == '/latest':\n for cfp in CFP.get_latest():\n message = bot.format_cfp(cfp)\n bot.send_message(bot.chat_id, message)\n\n if bot.message_received == '/categories':\n message = '👉 *Here are the categories available:*\\n\\n'\n for entry in CFP.get_categories():\n message = message + entry.category + '\\n'\n\n bot.send_message(bot.chat_id, message)\n\n return bot.ok_response()\n\n return bot.error_response()\n\n\ndef send_telegram_messages_to_channel(event, context):\n \"\"\"\n Send a CFP message to the Telegram distribution channel.\n \"\"\"\n\n lambda_logger = logger.bind(lambda_event=event, lambda_context=vars(context))\n\n not_sent = CFP.get_not_sent_telegram()\n\n for cfp in not_sent:\n message = bot.format_cfp(cfp)\n bot.send_message(TELEGRAM_CFPLAND_CHANNEL, message)\n cfp.sent_on_telegram()\n\n lambda_logger.info(\n {\n 'description': SENT_TO_TELEGRAM_CHANNEL,\n 'cfp_title': cfp.title,\n 'chat_id': TELEGRAM_CFPLAND_CHANNEL,\n }, code=SENT_TO_TELEGRAM_CHANNEL,\n )\n\n\ndef set_telegram_webhook(event, context):\n webhook = bot.set_webhook(event)\n\n if webhook:\n return bot.ok_response()\n\n return bot.error_response()\n","sub_path":"cfpland_bot/bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"503850410","text":"\"\"\"Define configuration variables for StagPy.\n\nSee :mod:`stagpy.args` for additional definitions related to the command line\ninterface.\n\"\"\"\n\nfrom collections import OrderedDict\nimport pathlib\n\nfrom loam.manager import ConfOpt as Conf\nfrom loam.tools import switch_opt, config_conf_section, set_conf_opt\n\n\ndef _actual_index(arg):\n \"\"\"Turn a string in a integer or slice.\"\"\"\n if ':' in arg:\n idxs = arg.split(':')\n if len(idxs) > 3:\n raise ValueError(f'{arg} is an invalid slice')\n idxs[0] = int(idxs[0]) if idxs[0] else None\n idxs[1] = int(idxs[1]) if idxs[1] else None\n if len(idxs) == 3:\n idxs[2] = int(idxs[2]) if idxs[2] else None\n else:\n idxs = idxs[0:2] + [1]\n return slice(*idxs)\n return int(arg)\n\n\ndef _index_collection(arg):\n \"\"\"Build an index collection from a command line input.\"\"\"\n return [_actual_index(item) for item in arg.split(',') if item]\n\n\nHOME_DIR = pathlib.Path.home()\nCONFIG_DIR = HOME_DIR / '.config' / 'stagpy'\nCONFIG_FILE = CONFIG_DIR / 'config.toml'\nCONFIG_LOCAL = pathlib.Path('.stagpy.toml')\n\nCONF_DEF = OrderedDict()\n\nCONF_DEF['common'] = OrderedDict((\n ('config', Conf(None, True, None, {'action': 'store_true'},\n False, 'print config options')),\n ('set', set_conf_opt()),\n))\n\nCONF_DEF['core'] = OrderedDict((\n ('path', Conf('./', True, 'p', {},\n True, 'path of StagYY run directory or par file', '_files')),\n ('outname', Conf('stagpy', True, 'n', {},\n True, 'StagPy output file name prefix')),\n ('shortname', switch_opt(False, None,\n 'StagPy output file name is only prefix')),\n ('timesteps', Conf(None, True, 't',\n {'nargs': '?', 'const': '', 'type': _index_collection},\n False, 'timesteps slice')),\n ('snapshots', Conf(None, True, 's',\n {'nargs': '?', 'const': '', 'type': _index_collection},\n False, 'snapshots slice')),\n))\n\nCONF_DEF['plot'] = OrderedDict((\n ('ratio', Conf(None, True, None,\n {'nargs': '?', 'const': 0.6, 'type': float},\n False, 'force aspect ratio of field plot')),\n ('raster', switch_opt(True, None, 'rasterize field plots')),\n ('format', Conf('pdf', True, None, {},\n True, 'figure format (pdf, eps, svg, png)')),\n ('vmin', Conf(None, True, None, {'type': float},\n False, 'minimal value on plot')),\n ('vmax', Conf(None, True, None, {'type': float},\n False, 'maximal value on plot')),\n ('cminmax', switch_opt(False, 'C', 'constant min max across plots')),\n ('mplstyle', Conf('stagpy-paper', True, None,\n {'nargs': '?', 'const': '', 'type': str},\n True, 'matplotlib style')),\n ('xkcd', Conf(False, False, None, {},\n True, 'use the xkcd style')),\n))\n\nCONF_DEF['scaling'] = OrderedDict((\n ('yearins', Conf(3.154e7, False, None, {},\n True, 'year in seconds')),\n ('ttransit', Conf(1.78e15, False, None, {},\n True, 'transit time in My')),\n ('dimensional', switch_opt(False, None, 'use dimensional units')),\n ('time_in_y', switch_opt(True, None, 'dimensionful time is in year')),\n ('vel_in_cmpy', switch_opt(True, None,\n 'dimensionful velocity is in cm/year')),\n ('factors', Conf({'s': 'M',\n 'm': 'k',\n 'Pa': 'G'},\n False, None, {}, True, 'custom factors')),\n))\n\nCONF_DEF['field'] = OrderedDict((\n ('plot',\n Conf('T,stream', True, 'o',\n {'nargs': '?', 'const': '', 'type': str},\n True, 'variables to plot (see stagpy var)')),\n ('perturbation', switch_opt(False, None,\n 'plot departure from average profile')),\n ('shift', Conf(None, True, None, {'type': int},\n False, 'shift plot horizontally')),\n ('timelabel', switch_opt(False, None, 'add label with time')),\n ('interpolate', switch_opt(True, None, 'apply Gouraud shading')),\n ('colorbar', switch_opt(True, None, 'add color bar to plot')),\n ('ix', Conf(None, True, None, {'type': int},\n False, 'x-index of slice for 3D fields')),\n ('iy', Conf(None, True, None, {'type': int},\n False, 'y-index of slice for 3D fields')),\n ('iz', Conf(None, True, None, {'type': int},\n False, 'z-index of slice for 3D fields')),\n ('isocolors', Conf('', True, None, {}, True,\n 'comma-separated list of colors for isolines')),\n ('cmap',\n Conf({'T': 'RdBu_r',\n 'eta': 'viridis_r',\n 'rho': 'RdBu',\n 'sII': 'plasma_r',\n 'edot': 'Reds'},\n False, None, {}, True, 'custom colormaps')),\n))\n\nCONF_DEF['rprof'] = OrderedDict((\n ('plot',\n Conf('Tmean', True, 'o',\n {'nargs': '?', 'const': ''},\n True, 'variables to plot (see stagpy var)')),\n ('style',\n Conf('-', True, None, {},\n True, 'matplotlib line style')),\n ('average', switch_opt(False, 'a', 'plot temporal average')),\n ('grid', switch_opt(False, 'g', 'plot grid')),\n ('depth', switch_opt(False, 'd', 'depth as vertical axis')),\n))\n\nCONF_DEF['time'] = OrderedDict((\n ('plot',\n Conf('Nutop,ebalance,Nubot.Tmean', True, 'o',\n {'nargs': '?', 'const': ''},\n True, 'variables to plot (see stagpy var)')),\n ('style',\n Conf('-', True, None, {},\n True, 'matplotlib line style')),\n ('compstat',\n Conf('', True, None, {'nargs': '?', 'const': ''},\n False, 'compute mean and rms of listed variables')),\n ('tstart',\n Conf(None, True, None, {'type': float},\n False, 'beginning time')),\n ('tend',\n Conf(None, True, None, {'type': float},\n False, 'end time')),\n ('fraction',\n Conf(None, True, None, {'type': float},\n False, 'ending fraction of series to process')),\n ('marktimes',\n Conf('', True, 'M', {},\n False, 'list of times where to put a mark')),\n ('marksteps',\n Conf('', True, 'T', {'type': _index_collection},\n False, 'list of steps where to put a mark')),\n ('marksnaps',\n Conf('', True, 'S', {'type': _index_collection},\n False, 'list of snaps where to put a mark')),\n))\n\nCONF_DEF['refstate'] = OrderedDict((\n ('plot',\n Conf('T', True, 'o',\n {'nargs': '?', 'const': ''},\n True, 'variables to plot (see stagpy var)')),\n ('style',\n Conf('-', True, None, {},\n True, 'matplotlib line style')),\n))\n\nCONF_DEF['plates'] = OrderedDict((\n ('plot',\n Conf('c,eta,sc', True, 'o',\n {'nargs': '?', 'const': '', 'type': str},\n True, 'variables to plot (see stagpy var)')),\n ('vzcheck', switch_opt(False, None,\n 'activate Colin\\'s version with vz checking')),\n ('timeprofile', switch_opt(False, None,\n 'nb of plates as function of time')),\n ('zoom',\n Conf(None, True, None, {'type': float},\n False, 'zoom around surface')),\n ('topomin', Conf(-40, False, None, {},\n True, 'min topography in plots')),\n ('topomax', Conf(100, False, None, {},\n True, 'max topography in plots')),\n ('agemin', Conf(-50, False, None, {},\n True, 'min age in plots')),\n ('agemax', Conf(500, False, None, {},\n True, 'max age in plots')),\n ('vmin', Conf(-5000, False, None, {},\n True, 'min velocity in plots')),\n ('vmax', Conf(5000, False, None, {},\n True, 'max velocity in plots')),\n ('dvmin', Conf(-250000, False, None, {},\n True, 'min velocity derivative in plots')),\n ('dvmax', Conf(150000, False, None, {},\n True, 'max velocity derivative in plots')),\n ('stressmin', Conf(0, False, None, {},\n True, 'min stress in plots')),\n ('stressmax', Conf(800, False, None, {},\n True, 'max stress in plots')),\n ('lstressmax', Conf(50, False, None, {},\n True, 'max lithospheric stress in plots')),\n))\n\nCONF_DEF['info'] = OrderedDict((\n ('output', Conf('t,Tmean,vrms,Nutop,Nubot', True, 'o', {},\n True, 'time series to print')),\n))\n\nCONF_DEF['var'] = OrderedDict((\n ('field', Conf(None, True, None, {'action': 'store_true'},\n False, 'print field variables')),\n ('sfield', Conf(None, True, None, {'action': 'store_true'},\n False, 'print surface field variables')),\n ('rprof', Conf(None, True, None, {'action': 'store_true'},\n False, 'print rprof variables')),\n ('time', Conf(None, True, None, {'action': 'store_true'},\n False, 'print time variables')),\n ('refstate', Conf(None, True, None, {'action': 'store_true'},\n False, 'print refstate variables')),\n ('plates', Conf(None, True, None, {'action': 'store_true'},\n False, 'print plates variables')),\n))\n\nCONF_DEF['config'] = config_conf_section()\n","sub_path":"stagpy/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":9374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"477391033","text":"import openpyxl\nimport random\nwb=openpyxl.load_workbook('维护作业明细日志记录--吴耀学 - 副本.xlsx')\nsheet=wb.get_sheet_by_name('经分及前台维护')\nwb1=openpyxl.Workbook()\nsheet1=wb1.get_sheet_by_name('Sheet')\nn=1\nlist=[]\n\nrow=sheet.max_row\nfor i in range(2,row+1):\n list.append(i)\nfor i in range(2,row+1):\n m=random.choice(list)\n for j in range(1,15):\n sheet1.cell(row=i,column=j).value=sheet.cell(row=m,column=j).value\n n+=1\n\nwb1.save('维护作业明细日志记录--吴耀学 - 副本1.xlsx')\n\n\n# print(sheet1)","sub_path":"pythonautotest/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"83971775","text":"#!/usr/local/bin/python\n# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import patterns, include, url\nfrom series import views\n\nurlpatterns = patterns('',\n # Frontpage\n url(r'^$', views.series_index, name='series_index'),\n url(r'^finished', views.series_finished, name='series_finished'),\n \n # Ajax\n url(r'^ajax/reorder/', views.series_ajax_reorder, name='series_ajax_reorder'),\n url(r'^ajax/update/', views.series_ajax_update, name='series_ajax_update'),\n url(r'^ajax/togglseen/', views.series_ajax_togglefinished, name='series_ajax_togglefinished'),\n url(r'^ajax/delete/', views.series_ajax_delete, name='series_ajax_delete'),\n)\n","sub_path":"series/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"77236090","text":"class Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> bool:\n def dp(s,wordset,mem):\n n = len(s)\n if mem[n-1]is not None:\n return mem[n-1]\n if s in wordset:\n mem[n-1]=True\n return mem[n-1]\n for i in range(n-1):\n first = s[:(i+1)]\n second = s[(i+1):]\n if dp(first,wordset,mem) and second in wordset:\n mem[n-1]=True\n return True\n mem[n-1]=False\n return False\n wordset = set(wordDict)\n mem = [None]*len(s)\n return dp(s,wordset,mem)\n\n\n################################################################################\n##\n## Recursion with memory\n##\n################################################################################\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> bool:\n word_set = frozenset(wordDict)\n @lru_cache\n def wordBreak_sub(s,word_set):\n if s in word_set:\n return True\n for i in range(len(s)):\n if s[:(i+1)] in word_set and wordBreak_sub(s[(i+1):],word_set):\n return True\n return False\n return wordBreak_sub(s,word_set)\n\n## for @lru_cache decorator see https://docs.python.org/3/library/functools.html\n\n\n\n################################################################################\n##\n## Iterative DP solution\n##\n################################################################################\nclass Solution:\n def wordBreak(self, s: str, wordDict: List[str]) -> bool:\n word_set = set(wordDict)\n dp = [False]*(len(s)+1)\n dp[0]=True\n for i in range(1,len(s)+1):\n for j in range(i):\n if dp[j] and s[j:i] in word_set:\n dp[i]=True\n break\n return dp[-1]\n","sub_path":"Problem139_Word_Break.py","file_name":"Problem139_Word_Break.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"606534690","text":"import os\n\nfrom cs50 import SQL\n\ndb = SQL(\"sqlite:///bak.db\")\n\ndef main():\n menu_list = db.execute(\"SELECT * FROM menu_list\")\n for menu in menu_list:\n print(f\"Id_No. {menu['id']} {menu['items']} of flavor {menu['flavors']} at rate of {menu['rate']}.\")\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"371033203","text":"import pytest\n\nfrom busy_beaver.models import KeyValueStore\n\n\ndef test_put_get(kv_store):\n expected_value = \"value\"\n\n kv_store.put(\"key\", expected_value)\n returned_value = kv_store.get(\"key\")\n\n assert returned_value == expected_value\n\n\ndef test_put_get_int(kv_store):\n expected_value = 100\n\n kv_store.put_int(\"key\", expected_value)\n returned_value = kv_store.get_int(\"key\")\n\n assert returned_value == expected_value\n\n\ndef test_put_overwrite(kv_store):\n expected_value = 150\n kv_store.put_int(\"key\", 100)\n kv_store.put_int(\"key\", expected_value)\n\n returned_value = kv_store.get_int(\"key\")\n\n assert returned_value == expected_value\n all_records = KeyValueStore.query.filter_by(key=\"key\").all()\n assert len(all_records) == 1\n\n\ndef test_get_key_does_not_exist_raise_ValueError(kv_store):\n with pytest.raises(ValueError):\n kv_store.get(\"key_does_not_exist\")\n","sub_path":"tests/common/wrappers/key_value_store_test.py","file_name":"key_value_store_test.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"449154624","text":"from machine import Pin, SPI, ADC\nfrom .utils import *\n\n\nclass ODROID_GO:\n \"\"\"\n Class for helping to code with ODROID-GO.\n \"\"\"\n\n def __init__(self):\n self._init_lcd()\n self._init_buttons()\n self._init_speaker()\n self._init_battery()\n\n def _init_lcd(self):\n self.lcd = ILI9341(SPI(2, baudrate=40000000,\n miso=Pin(TFT_MISO_PIN, Pin.IN),\n mosi=Pin(TFT_MOSI_PIN, Pin.OUT),\n sck=Pin(TFT_SCLK_PIN, Pin.OUT)),\n cs=Pin(TFT_CS_PIN, Pin.OUT),\n dc=Pin(TFT_DC_PIN, Pin.OUT))\n\n def _init_buttons(self):\n self.btn_joy_x = Button(BUTTON_JOY_X_PIN, True, BUTTON_DEBOUNCE_MS)\n self.btn_joy_y = Button(BUTTON_JOY_Y_PIN, True, BUTTON_DEBOUNCE_MS)\n self.btn_menu = Button(BUTTON_MENU_PIN, True, BUTTON_DEBOUNCE_MS)\n self.btn_volume = Button(BUTTON_VOLUME_PIN, True, BUTTON_DEBOUNCE_MS)\n self.btn_select = Button(BUTTON_SELECT_PIN, True, BUTTON_DEBOUNCE_MS)\n self.btn_start = Button(BUTTON_START_PIN, True, BUTTON_DEBOUNCE_MS)\n self.btn_a = Button(BUTTON_A_PIN, True, BUTTON_DEBOUNCE_MS)\n self.btn_b = Button(BUTTON_B_PIN, True, BUTTON_DEBOUNCE_MS)\n\n def _init_speaker(self):\n self.speaker = Speaker(SPEAKER_PIN, SPEAKER_DAC_PIN)\n\n def _init_battery(self):\n self.battery = Battery(BATTERY_PIN, BATTERY_RESISTANCE_NUM,\n ADC.WIDTH_12BIT, ADC.ATTN_11DB)\n\n def begin(self):\n # LCD\n self.lcd.erase()\n self.lcd.fill(colors.BLACK)\n self.lcd.set_pos(0, 0)\n self.lcd.colors = colors\n self.lcd.fonts = fonts\n Pin(TFT_LED_PIN, Pin.OUT).value(1)\n\n # Buttons\n Pin(BUTTON_JOY_X_PIN, Pin.IN, Pin.PULL_UP)\n Pin(BUTTON_JOY_Y_PIN, Pin.IN, Pin.PULL_UP)\n Pin(BUTTON_MENU_PIN, Pin.IN, Pin.PULL_UP)\n Pin(BUTTON_VOLUME_PIN, Pin.IN, Pin.PULL_UP)\n Pin(BUTTON_SELECT_PIN, Pin.IN, Pin.PULL_UP)\n Pin(BUTTON_START_PIN, Pin.IN, Pin.PULL_UP)\n Pin(BUTTON_A_PIN, Pin.IN, Pin.PULL_UP)\n Pin(BUTTON_B_PIN, Pin.IN, Pin.PULL_UP)\n\n # Speaker\n self.speaker.set_volume(0.1)\n self.speaker.set_beep(262, 1)\n\n def update(self):\n self.btn_joy_x.read_axis()\n self.btn_joy_y.read_axis()\n self.btn_menu.read()\n self.btn_volume.read()\n self.btn_select.read()\n self.btn_start.read()\n self.btn_a.read()\n self.btn_b.read()\n\n\nGO = ODROID_GO()\nGO.begin()\n","sub_path":"odroid_go/odroid_go.py","file_name":"odroid_go.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"187718953","text":"__author__ = 'lslacker'\n# -*- coding: utf-8 -*-\nimport argparse\nimport xlwt\nimport csv\nimport os\nimport logging\n\n\ndef merge(csvs, output):\n wb = xlwt.Workbook(encoding='latin1')\n\n for f in csvs:\n logging.info('Process {}'.format(f.name))\n csv_reader = csv.reader(f)\n ws = wb.add_sheet(os.path.basename(f.name))\n\n for row_idx, row in enumerate(csv_reader):\n for col_idx, cell in enumerate(row):\n ws.write(row_idx, col_idx, cell)\n wb.save(output)\n\n\ndef consoleUI():\n parser = argparse.ArgumentParser(description='Merge multiple csv files into excel file, each csv')\n parser.add_argument('-v', '--verbose', action='count', default=0)\n parser.add_argument('-o', '--output', required=True)\n parser.add_argument('csvs', nargs='+', type=argparse.FileType('r', encoding='latin1'), help='CSV files')\n a = parser.parse_args()\n if a.verbose > 1:\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__name__)\n\n merge(a.csvs, a.output)\n\nif __name__ == '__main__':\n consoleUI()\n\n\n\n\n","sub_path":"mergecsvsintoexcel.py","file_name":"mergecsvsintoexcel.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"417582088","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 2 11:09:05 2019\n\n@author: David Chong Tian Wei\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\ndef pre_process_ventilation_data(path, cols):\n \"\"\"\n Loads ventilation data and fills in NA values via linear interpolation\n \n Parameters\n ----------\n path : string\n Path to the input data file\n cols : array like of integers\n Integers corresponding to the columns that are to be used for analysis. Assumes the columns refer to an index/time, pressure, and flows column.\n \n Returns\n -------\n Pandas Dataframe\n \"\"\"\n data = pd.read_csv(path, usecols=cols)\n # Impute missing values for pressure, flow, volume\n for i in range(0,data.shape[1]):\n data.iloc[:,i] = data.iloc[:,i].interpolate()\n return data\n\ndef correct_baseline(data, window):\n \"\"\"\n Attempts to do basic window mean centering of the data to correct baseline wander\n \n Parameters\n ----------\n data : Pandas Dataframe\n The data obtained from pre_process_ventilation_data call\n window : int\n The size of the window to use for baseline correction\n \"\"\"\n for i in range(0,int(len(data)/window)):\n if (i+1)*window < data.shape[0]:\n data[(i*window):((i+1)*window)] = data[(i*window):((i+1)*window)] - np.mean(data[(i*window):((i+1)*window)])\n else:\n data[(i*window):] = data[(i*window):] - np.mean(data[(i*window):])","sub_path":"ventiliser/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"555810369","text":"\nimport numpy \nfrom apgl.graph.VertexList import VertexList\nfrom apgl.util.Util import Util\nfrom apgl.util.Parameter import Parameter \n\nclass HIVVertices(VertexList):\n def __init__(self, numVertices):\n numFeatures = 8\n super(HIVVertices, self).__init__(numVertices, numFeatures)\n\n #Need to randomly set up the initial values\n self.V[:, self.dobIndex] = numpy.random.rand(numVertices)\n self.V[:, self.genderIndex] = Util.randomChoice(numpy.array([1, 1]), numVertices)\n #Note in reality females cannot be recorded as bisexual but we model the real scenario\n #We assume that 5% of the population is gay or bisexual \n self.V[:, self.orientationIndex] = Util.randomChoice(numpy.array([19, 1]), numVertices)\n\n self.V[:, self.stateIndex] = numpy.zeros(numVertices)\n self.V[:, self.infectionTimeIndex] = numpy.ones(numVertices)*-1\n self.V[:, self.detectionTimeIndex] = numpy.ones(numVertices)*-1\n self.V[:, self.detectionTypeIndex] = numpy.ones(numVertices)*-1\n\n self.V[:, self.hiddenDegreeIndex] = numpy.ones(numVertices)*-1\n\n def setInfected(self, vertexInd, time):\n Parameter.checkIndex(vertexInd, 0, self.getNumVertices())\n Parameter.checkFloat(time, 0.0, float('inf'))\n\n if self.V[vertexInd, HIVVertices.stateIndex] == HIVVertices.infected:\n raise ValueError(\"Person is already infected\")\n\n self.V[vertexInd, HIVVertices.stateIndex] = HIVVertices.infected\n self.V[vertexInd, HIVVertices.infectionTimeIndex] = time\n \n\n def setDetected(self, vertexInd, time, detectionType):\n Parameter.checkIndex(vertexInd, 0, self.getNumVertices())\n Parameter.checkFloat(time, 0.0, float('inf'))\n\n if detectionType not in [HIVVertices.randomDetect, HIVVertices.contactTrace]:\n raise ValueError(\"Invalid detection type : \" + str(detectionType))\n\n if self.V[vertexInd, HIVVertices.stateIndex] != HIVVertices.infected:\n raise ValueError(\"Person must be infected to be detected\")\n\n self.V[vertexInd, HIVVertices.stateIndex] = HIVVertices.removed\n self.V[vertexInd, HIVVertices.detectionTimeIndex] = time\n self.V[vertexInd, HIVVertices.detectionTypeIndex] = detectionType\n\n\n def copy(self):\n \"\"\"\n Returns a copy of this object. \n \"\"\"\n vList = HIVVertices(self.V.shape[0])\n vList.setVertices(numpy.copy(self.V))\n return vList\n\n #Some static variables\n dobIndex = 0\n genderIndex = 1\n orientationIndex = 2\n\n #Time varying features\n stateIndex = 3\n infectionTimeIndex = 4\n detectionTimeIndex = 5\n detectionTypeIndex = 6\n hiddenDegreeIndex = 7\n\n male = 0\n female = 1\n \n hetero = 0\n bi = 1\n \n susceptible = 0\n infected = 1\n removed = 2\n randomDetect = 0\n contactTrace = 1 ","sub_path":"exp/viroscopy/model/HIVVertices.py","file_name":"HIVVertices.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"41994423","text":"# -*- coding: utf-8 -*-\n\n# -------------------------------------------------------------------------------\n# Name: Forecast_segment_with_Prophet\n# Purpose: Predict segment from past segments, taking capacity into account\n#\n# Author: berder\n#\n# Created: 01/03/2017\n# Copyright: (c) Arsynet 2015\n# Licence: Tous droits réservés\n# -------------------------------------------------------------------------------\n\nfrom __future__ import print_function\nimport sys\nsys.path.append('../')\nfrom xxx import Model\nfrom xxx.model import *\nimport pandas as pd\nimport numpy as np\nfrom fbprophet import Prophet\nfrom datetime import datetime\n\n\nlogging.basicConfig(level=logging.DEBUG, format=0)\nlog = logging.getLogger('ForecastSegmentsProphet')\nlog.setLevel(logging.DEBUG)\n\n\ndef open_db():\n Model.init_db()\n\n\nclass Results(Model):\n __collection__ = 'o_and_d_laurent_tests'\n\n\ndef get_segment(origin, destination, company, end_date):\n \"\"\"\n :param origin: IATA code typed in by user\n :param destination: IATA code typed in by user\n :param company: optional, airline IATA code. If not provided, default = all\n :param end_date: date, under format YYYY-MM.\n :return: a data frame with pax per monthly one-way segments between origin and destination\n \"\"\"\n\n if company:\n segment_cursor = SegmentInitialData.find(\n {'year_month': {\"$gte\": \"2004-01\", \"$lte\": end_date}, 'leg_origin': origin, 'leg_destination': destination,\n 'operating_airline': company, 'record_ok': True},\n {'leg_origin': 1, 'leg_destination': 1, 'year_month': 1, 'passengers': 1})\n else:\n segment_cursor = SegmentInitialData.find(\n {'year_month': {\"$gte\": \"2004-01\", \"$lte\": end_date}, 'leg_origin': origin, 'leg_destination': destination,\n 'record_ok': True},\n {'leg_origin': 1, 'leg_destination': 1, 'year_month': 1, 'passengers': 1})\n segment = pd.DataFrame(list(segment_cursor))\n return segment\n\n\ndef get_capa(origin, destination, company, end_date):\n \"\"\"\n :param origin: IATA code typed in by user\n :param destination: IATA code typed in by user\n :param company: optional, airline IATA code. If not provided, default = all\n :param end_date: date, under format YYYY-MM. Optional: If not provided, default = 3 years from now\n :return: a data frame with capacities between origin and destination\n \"\"\"\n\n if company:\n capa_cursor = CapacityInitialData.find(\n {'year_month': {\"$lte\": end_date, \"$gte\": \"2002-01\"}, 'origin': origin, 'destination': destination,\n 'operating_airline': company, 'record_ok': True, 'active_rec': True},\n {'origin': 1, 'destination': 1, 'year_month': 1, 'capacity': 1})\n else:\n capa_cursor = CapacityInitialData.find(\n {'year_month': {\"$lte\": end_date, \"$gte\": \"2002-01\"}, 'origin': origin, 'destination': destination,\n 'record_ok': True, 'active_rec': True},\n {'origin': 1, 'destination': 1, 'year_month': 1, 'capacity': 1})\n capa = pd.DataFrame(list(capa_cursor))\n return capa\n\n\ndef main():\n print(\"START\")\n open_db()\n origin = raw_input(\"Type origin: \")\n destination = raw_input(\"Type destination: \")\n start = str(raw_input(\"Type start year_month (MM/YY): \"))\n end = str(raw_input(\"Type end year_month (optional): \"))\n company = raw_input(\"Type company (optional): \")\n\n start_date = \"20\"+start[3:6]+\"-\"+start[0:2]\n now = utcnow()\n if end:\n end_date = \"20\" + end[3:6] + \"-\" + end[0:2]\n else:\n end_date = str(now.year + 3) + \"-\" + str(now.month).zfill(2)\n # periods: number of months between now and end_date\n if int(end_date[0:4]) == now.year:\n periods = (datetime.strptime(end_date + '-01', '%Y-%m-%d').month - now.month)\n else:\n periods = (datetime.strptime(end_date+'-01', '%Y-%m-%d').year - now.year) * 12\n\n capa = get_capa(origin, destination, company, end_date)\n capa['ds'] = pd.to_datetime(capa['year_month']+\"-01\")\n capa = capa.groupby(capa.ds).sum()\n capa['ds'] = capa.index\n capa = capa[['ds', 'capacity']]\n capa.columns = ['ds', 'cap']\n segment = get_segment(origin, destination, company, end_date)\n segment['ds'] = pd.to_datetime(segment['year_month']+\"-01\")\n segment = segment.groupby(segment.ds).sum()\n segment['ds'] = segment.index\n segment = segment[['ds', 'passengers']]\n segment.columns = ['ds', 'y']\n\n # Train the model, then prepare dates to be predicted\n model = Prophet(mcmc_samples=200)\n model.fit(segment)\n future = model.make_future_dataframe(periods=periods, freq='MS')\n # Add capacities to the prediction\n future = future.merge(capa, 'left', on='ds')\n forecast = model.predict(future)\n # restrict data to after the start_date\n forecast = forecast.loc[forecast['ds'] > (start_date + \"-01\")]\n model.history = model.history.loc[model.history['ds'] > (start_date + \"-01\")]\n\n # Since there is no weekly data, remove the weekly columns to avoid getting an empty plot (no longer needed with more recent versions of Prophet)\n forecast = forecast[['ds', 'cap', 't', 'trend', 'seasonal_lower', 'seasonal_upper', 'trend_lower', 'trend_upper',\n 'yhat_lower', 'yhat_upper', 'yearly', 'yearly_lower', 'yearly_upper', 'seasonal', 'yhat']]\n model.plot(forecast).show()\n # The capacity doesn't look nice on the trend plot, so remove it before this plot\n del forecast['cap']\n model.plot_components(forecast).show()\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"Forecasting/Forecast_segment_with_Prophet.py","file_name":"Forecast_segment_with_Prophet.py","file_ext":"py","file_size_in_byte":5583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"649208765","text":"import logging\nfrom abc import ABC, abstractmethod\nfrom typing import List\n\nfrom sn_agent.ontology import Ontology, Service\nfrom sn_agent.job.job_descriptor import JobDescriptor\nfrom sn_agent.service_adapter.manager import ServiceManager\n\nlogger = logging.getLogger(__name__)\n\n\nclass ServiceAdapterABC(ABC):\n \"\"\"\n This is the service adapter base, all other service adapters are based on it.\n \"\"\"\n\n type_name = \"Base\"\n\n def __init__(self, app, service: Service, required_service_node_ids) -> None:\n self.app = app\n self.service = service\n self.required_service_node_ids = required_service_node_ids\n self.required_service_adapters = []\n self.requirements_met = False\n self.available = False\n\n def post_load_initialize(self, service_manager : ServiceManager):\n \"\"\"\n This will hunt out all the agents required to fulfill the required ontology ids\n\n We should periodically call this if it is false - an agent might come alive that can support this\n :return:\n \"\"\"\n if not self.required_service_node_ids is None:\n for node_id in self.required_service_node_ids:\n service_adapter = service_manager.get_service_adapter_for_id(node_id)\n self.required_service_adapters.append(service_adapter)\n self.requirements_met = self.has_all_requirements()\n\n logger.info('Service Adapter: %s initialized. Requirements met: %s', self.type_name, self.requirements_met)\n # print('Service Adapter: %s initialized. Requirements met: %s' % (self.type_name, self.requirements_met))\n\n def has_all_requirements(self):\n \"\"\"\n Check to see if our all required services are available\n :return:\n \"\"\"\n for required_service_adapter in self.required_service_adapters:\n if not required_service_adapter.has_all_requirements():\n return False\n return True\n\n def start(self):\n \"\"\"\n If init sets up all the connections, start is here to ensure that the worker is actually alive and can process\n :return:\n \"\"\"\n self.available = True\n\n def stop(self):\n \"\"\"\n This will take the worker offline but does not need to be re-initialized\n :return:\n \"\"\"\n self.available = False\n\n def can_perform(self) -> bool:\n \"\"\"\n This is a boolean flag indicating if this worker can do whatever work it says it can.\n\n An answer of no can be because it is offline, or perhaps it is too busy.\n :return:\n \"\"\"\n return self.requirements_met and self.available and all_required_agents_can_perform()\n\n def all_required_agents_can_perform(self):\n\n if self.required_ontology_node_ids is None:\n return True\n\n for required_service_adapter in self.required_service_adapters:\n if not required_service_adapter.can_perform():\n return False\n return True\n\n @abstractmethod\n def perform(self, job: JobDescriptor):\n \"\"\"\n This is where the work gets done, the worker will block here until the work itself is done\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n pass\n\nclass ModuleServiceAdapterABC(ServiceAdapterABC):\n \"\"\"\n This is the service adapter base, all other service adapters are based on it.\n \"\"\"\n\n type_name = \"ModuleServiceAdapter\"\n\n def __init__(self, app, service: Service, required_services: List[Service], name: str) -> None:\n super().__init__(app, service, required_services)\n self.name = name\n","sub_path":"agent/sn_agent/service_adapter/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"257858826","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(\"../data/video/day_2.avi\")\n\nfgbg1 = cv2.BackgroundSubtractorMOG()\nfgbg2 = cv2.BackgroundSubtractorMOG2()\n\nkernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))\n\nwhile True:\n ret, frame = cap.read()\n fgmask1 = fgbg1.apply(frame)\n fgmask2 = fgbg2.apply(frame)\n fgmask2 = cv2.morphologyEx(fgmask2, cv2.MORPH_OPEN, kernel)\n\n # cv2.imshow('original', frame)\n cv2.imshow('fg1', fgmask1)\n cv2.imshow('fg2', fgmask2)\n\n if cv2.waitKey(1) & 0xff == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n\n\n","sub_path":"learn_opencv/15.MOG_background_reduction.py","file_name":"15.MOG_background_reduction.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"100639101","text":"\nimport turtle\n\nTARGET_LLEFT_X = 100 # Target's lower-left X\nTARGET_LLEFT_Y = 250 # Target's lower-left Y\nTARGET_WIDTH = 25 # Width of the target\n\nfrom game_constants import MAX_DISTANCE\n\n# define constants locally if they are not shared\ndef draw_target(pen):\n EAST = 0 # Angle of east direction\n NORTH = 90 # Angle of north direction\n SOUTH = 270 # Angle of south direction\n WEST = 180 # Angle of west direction\n PROJECTILE_SPEED = 1 # Projectile's animation speed\n\n\n\n pen.hideturtle()\n pen.speed(0)\n pen.penup()\n pen.goto(TARGET_LLEFT_X, TARGET_LLEFT_Y)\n pen.pendown()\n pen.setheading(EAST)\n pen.forward(TARGET_WIDTH)\n pen.setheading(NORTH)\n pen.forward(TARGET_WIDTH)\n pen.setheading(WEST)\n pen.forward(TARGET_WIDTH)\n pen.setheading(SOUTH)\n pen.forward(TARGET_WIDTH)\n pen.penup()\n\n pen.home()\n pen.showturtle()\n pen.speed(PROJECTILE_SPEED)\n\ndef get_angle_distance():\n angle = int(input(\"Enter the projectile's angle 0-360: \"))\n distance = int(input(f'Enter the launch distance (1-{MAX_DISTANCE}): '))\n return angle, distance\n\ndef launch_turtle(pen, angle, distance):\n pen.setheading(angle)\n pen.pendown()\n pen.forward(distance)\n\ndef show_hit_message(pen):\n xcor = pen.xcor()\n ycor = pen.ycor()\n\n # Did it hit the target?\n is_in_x = ((xcor >= TARGET_LLEFT_X) and \n (xcor <= (TARGET_LLEFT_X + TARGET_WIDTH)))\n is_in_y = ((ycor >= TARGET_LLEFT_Y) and\n (ycor <= (TARGET_LLEFT_Y + TARGET_WIDTH)))\n is_hit = is_in_x and is_in_y\n\n # show message\n if is_hit:\n print('Target hit!')\n else:\n print('You missed the target.')\n\ndef test_get_angle_distance():\n angle, distance = get_angle_distance()\n print(angle, distance)\n\ndef run_game(pen):\n\n draw_target(pen)\n\n angle, distance = get_angle_distance()\n\n launch_turtle(pen, angle, distance)\n\n show_hit_message(pen)\n\nif __name__ == '__main__':\n test_get_angle_distance()\n","sub_path":"intermezzo/function-module/hit_target_game.py","file_name":"hit_target_game.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"79014296","text":"import os, glob\nimport json\nimport pandas as pd\n\nPATH_DATASET = '/mas/u/arana/datasets/kumbh/kumbh_labelled/kumbh_data'\npatient_ids = os.listdir(PATH_DATASET)\ncsv_file_name = 'kumbh_metadata.csv'\n\nlog = []\n\nfor i, patient in enumerate(patient_ids):\n\tpath_data = os.path.join(PATH_DATASET, patient, 'Perio_Frame')\n\tpath_to_gnd_img = os.path.join(PATH_DATASET, patient, 'masks')\n\n\timg_paths = glob.glob('{}/*.png'.format(path_data))\n\n\tfor j, img_path in enumerate(img_paths):\n\t\t_id = img_path.split('/')[-3]\n\t\timg_name = img_path.split('/')[-1]\n\n\t\tpath_x = img_path\n\t\tpath_y = glob.glob('{}/{}'.format(path_to_gnd_img, img_name))[0]\n\t\tjson_path = glob.glob('{}/{}.liveuser.perio.1_annotated.json'.format(path_data, img_name))[0]\n\n\t\twith open(json_path) as f:\n\t\t\tjson_data = json.load(f)\n\n\t\tkeyz = list(json_data.keys())\n\n\t\ttry:\n\t\t\t_score = int(json_data['overall_label'][-1])\n\t\t\tlog.append({'inp_img_path': path_x, 'ground_img_path': path_y, 'JSON_path': json_path, 'score': _score})\n\t\texcept:\n\t\t\tprint('No score provided for patirnt id: {}, frame: {}'.format(_id, img_name))\n\n\ndf = pd.DataFrame(log, columns=['inp_img_path', 'ground_img_path', 'JSON_path', 'score'])\ndf.to_csv(csv_file_name)\n","sub_path":"Preprocessing/create_csv.py","file_name":"create_csv.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"299781847","text":"import Plotdatafile\nimport numpy as np\n\nTbfile='/home/andrewjohnson/nobackup/SurfaceMelt/NRT/ID2-F17-SL2016015D.91H'\nTbfilename = 'ID2-F17-NL2016015D.91H'\n\nindata = np.fromfile(Tbfile, dtype='int16')\nindata = indata.reshape([721, 721])\ncube_91H = indata / float(10)\n\nPlotdatafile.PlotMap1Layer(cube_91H,Tbfile)","sub_path":"SaveSpecificEASE.py","file_name":"SaveSpecificEASE.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"577648168","text":"\"\"\" Add source files from a folder, and represent the data for futher use\n returns an dict with name as first index, then the dimensions (z,y,x etc)\n If a format contains nested variables, they will get their own dict name,\n And then the dimensions\n Example:\n Singe Content File\n imageData['file name'][x][y][z]\n Multi Content File\n imageData['file name']['variable name'][x][y][z]\n\"\"\"\n\nimport os\nimport scipy\nimport nibabel as nib\nfrom config import log\nfrom config import glob\n\nclass ImageContainer:\n \"\"\"!\n Import medical images from a folder, and convert to specified datatype\n only include is a list of numbers, matching the filename.\n \"\"\"\n # Attributes:\n # path(str): Path to file imported\\n\n # imageData(list[str][str][int][int][int]): Contains all the pixel data of all the images loaded\\n\n # imageClassification(list[int]): Contains label of the asociated image being real or test\\n\n # imageName(list[str]): Name of image files\\n\n\n \n\n def __init__(self, path, onlyInclude=[]):\n \"\"\"!\n @param path(str): Path to file imported\n @param onlyinclude(str): Name of file to be imported exclusively\n \"\"\"\n\n log.info(\"Data import started\", extra={\"pID\": \"I\"})\n\n ## str: Path to file imported\n self.path = path\n\n ## list[str][str][int][int][int]: Contains all the pixel data of all the images loaded\n self.imageData = []\n\n ## list[int]: Contains label of the asociated image being real or control\n self.imageClassification = []\n\n ## list[str]: name of image files\n self.imageName = []\n self._import(self.path, onlyInclude)\n\n def _loadClasses(self, path, onlyInclude):\n \"\"\"!\n Loads the labels files, containing classifications.\n\n @param path(str): Path to labels files\n @param onlyInclude(int): Number of files to load\n \"\"\"\n filePath = os.path.join(path, \"labels.mat\")\n indexFile = self._loadMATfile(filePath)\n self.imageClassification = [x[0] for x in indexFile['labels']]\n log.info(\"Loaded labels\", extra={\"pID\": \"I\"})\n\n def _import(self, path, onlyInclude):\n \"\"\"!\n Imports all .mat file located at path.\n\n @param path(str): Path to files\n @param onlyInclude(int): Number of files to be imported\n \"\"\"\n self._loadClasses(path, onlyInclude)\n self.imageData = []\n self.imageName = []\n for n in range(1, len(self.imageClassification) + 1):\n if ((n in onlyInclude) or (len(onlyInclude) == 0)):\n fileName = str(n) + \".mat\"\n thisPath = os.path.join(path, fileName)\n data = self._loadMATfile(thisPath)\n self.imageData.append(data)\n self.imageName.append(fileName)\n self.imageClassification.append(self.imageClassification[n - 1])\n log.info(\"Loaded image:\" + fileName, extra={\"pID\": \"I\"})\n\n def _loadMATfile(self, filePath):\n \"\"\"!\n Uses scipy to import .mat file.\n\n @param filePath(str): Path to file that should be imported\n \"\"\"\n dataObject = scipy.io.loadmat(filePath)\n return dataObject\n\n # Another medical image format, that can contain multiple images\n def _loadNIIfile(self, filePath):\n \"\"\"!\n Uses scipy to import .nii file.\n\n @param filePath(str): Path to file that should be imported\n \"\"\"\n dataObject = nib.load(filePath)\n return dataObject\n","sub_path":"Code/DataImport.py","file_name":"DataImport.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"4053689","text":"import itertools\nimport matplotlib.pyplot as plt\n\n\ndef plot(dict):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n sample_outcomes = dict.keys()\n probabilities = dict.values()\n ax.bar(sample_outcomes, probabilities)\n plt.ylabel('Probability')\n plt.xlabel('Event outcomes')\n plt.show()\n\n\nif __name__ == '__main__':\n\n # all combinations for 3 coin flips\n # ('H', 'H', 'H'),\n # ('H', 'H', 'T'),\n # ('H', 'T', 'H'),\n # ('H', 'T', 'T'),\n # ('T', 'H', 'H'),\n # ('T', 'H', 'T'),\n # ('T', 'T', 'H'),\n # ('T', 'T', 'T')\n coin_flips = list(itertools.product(\"HT\", repeat=3))\n\n h0, h1, h2, h3 = 0, 0, 0, 0\n for i in coin_flips:\n if i.count('H') == 0: h0 += 1\n if i.count('H') == 1: h1 += 1\n if i.count('H') == 2: h2 += 1\n if i.count('H') == 3: h3 += 1\n\n h0_p, h1_p, h2_p, h3_p = h0 / len(coin_flips), h1 / len(coin_flips), h2 / len(coin_flips), h3 / len(coin_flips)\n\n dict = {'Zero H': h0_p, 'H': h1_p, 'HH': h2_p, 'HHH': h3_p}\n print(dict)\n plot(dict)\n","sub_path":"app/probability_distribution.py","file_name":"probability_distribution.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"88173714","text":"\nfrom http.server import HTTPServer, SimpleHTTPRequestHandler\n\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\n\nfrom sources.sources import count_age_company, transform_excel_into_products_list\n\n\nenv = Environment(\n loader=FileSystemLoader('.'),\n autoescape=select_autoescape(['html', 'xml'])\n)\n\ntemplate = env.get_template('template.html')\npath_excel_file = './sources/wine.xlsx'\n\nrendered_page = template.render(\n age_company=count_age_company(),\n products_list=transform_excel_into_products_list(path_excel_file),\n)\n\nwith open('index.html', 'w', encoding=\"utf8\") as file:\n file.write(rendered_page)\n\nserver = HTTPServer(('0.0.0.0', 8000), SimpleHTTPRequestHandler)\nserver.serve_forever()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"263191951","text":"\nfrom skimage import io\nfrom skimage.feature import (match_descriptors, ORB, plot_matches)\nfrom skimage.color import rgb2gray\nimport matplotlib.pyplot as plt\nimport os\n\n\n\"\"\"\nThis example demonstrates the ORB feature detection and binary description algorithm.\nIt uses an oriented FAST detection method and the rotated BRIEF descriptors.\nUnlike BRIEF, ORB is comparatively scale and rotation invariant while still employing the very efficient\nHamming distance metric for matching.\n\"\"\"\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nimg1 = rgb2gray(io.imread(dir_path+'/media/IRIS3.jpg'))\nimg2 = rgb2gray(io.imread(dir_path+'/media/IRIS4.jpg'))\n\ndescriptor_extractor = ORB(n_keypoints=200)\n\ndescriptor_extractor.detect_and_extract(img1)\nkeypoints1 = descriptor_extractor.keypoints\ndescriptors1 = descriptor_extractor.descriptors\n\ndescriptor_extractor.detect_and_extract(img2)\nkeypoints2 = descriptor_extractor.keypoints\ndescriptors2 = descriptor_extractor.descriptors\n\n\nmatches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)\n\nfig, ax = plt.subplots(nrows=2, ncols=1)\n\nplt.gray()\n\nplot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)\nax[0].axis('off')\nax[0].set_title(\"Original Image vs. Transformed Image\")\n\n\nplt.show()","sub_path":"OpenCV/orb.py","file_name":"orb.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"425952685","text":"import csv\nimport json\nimport argparse\n\n\ndef results_to_csv(result_file, output_file, delimiter=';'):\n with open(result_file, 'r') as f:\n json_data = json.load(f)\n headers = ['elapsed', 'epoch', 'turret_name', 'scriptrun_time', 'error']\n headers_row = {}\n\n for item in json_data:\n for k in item['custom_timers'].keys():\n if k not in headers:\n headers.append(k)\n headers_row[k] = k\n\n with open(output_file, \"w+\") as f:\n writer = csv.DictWriter(f, fieldnames=headers, delimiter=delimiter)\n headers_row.update({\n 'elapsed': 'elapsed time',\n 'epoch': 'epoch (in seconds)',\n 'turret_name': 'turret name',\n 'scriptrun_time': 'transaction time',\n 'error': 'error'\n })\n writer.writerow(headers_row)\n for item in json_data:\n timers = item['custom_timers']\n del item['custom_timers']\n item.update(timers)\n writer.writerow(item)\n\n\ndef main():\n parser = argparse.ArgumentParser(\"Create a csv file from a json results file\")\n parser.add_argument('result_file', help=\"The orignial result file\")\n parser.add_argument('output_file', help=\"The output path for the csv file\")\n parser.add_argument('-d', '--delimiter', type=str, help=\"The delimiter for the csv file\", default=';')\n args = parser.parse_args()\n\n results_to_csv(args.result_file, args.output_file, args.delimiter)\n","sub_path":"oct/tools/results_to_csv.py","file_name":"results_to_csv.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"164728275","text":"import json\r\nimport requests\r\nimport re\r\nimport time\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nwhile (True):\r\n t = open (\"Temprature.csv\",'a')\r\n l = open (\"Light.csv\",'a')\r\n h = open (\"Humidity1.csv\",'a')\r\n\r\n url_rec=\"https://api.thingspeak.com/channels/522645/feeds.json?results=2\"\r\n resp=requests.get(url_rec)\r\n #print (resp.text)\r\n\r\n select = repr(resp.text)\r\n select = select[300:];\r\n\r\n pick = re.search('field1\":\"(.+?)\",', select)\r\n pick2 = re.search('field2\":\"(.+?)\",', select)\r\n pick3 = re.search('field3\":\"(.+?)\"', select)\r\n\r\n if (pick):\r\n print (\"Temprature recorded: \"+pick.group(1))\r\n t.write(str(pick.group(1))+ '\\n')\r\n if (pick2):\r\n print (\"Light intensity recorded: \"+pick2.group(1))\r\n l.write(str(pick2.group(1))+ '\\n')\r\n if (pick3):\r\n print (\"Humidity recorded: \"+pick3.group(1))\r\n h.write(str(pick3.group(1))+ '\\n')\r\n\r\n t.close()\r\n l.close()\r\n h.close()\r\n\r\n \r\n print(\"All done\")\r\n time.sleep(10)\r\n \r\n\r\n","sub_path":"Delta IOT/delta iot/Delta Retrive1.py","file_name":"Delta Retrive1.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"616719578","text":"import os\nimport random\nimport json\nfrom contextlib import contextmanager\nfrom http.server import SimpleHTTPRequestHandler\nimport shutil\nimport pathlib\nimport socketserver\nimport socket\nimport logging\nimport threading\nimport watchdog.events, watchdog.observers, time\nfrom lxml import etree as ET\n\nfrom . import static\n\n# Get access to logger\nlog = logging.getLogger('ptxlogger')\n\n@contextmanager\ndef working_directory(path):\n \"\"\"\n Temporarily change the current working directory.\n\n Usage:\n with working_directory(path):\n do_things() # working in the given path\n do_other_things() # back to original path\n \"\"\"\n current_directory=os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(current_directory)\n\ndef linux_path(path):\n # hack to make core ptx and xsl:import happy\n p = pathlib.Path(path)\n return p.as_posix()\n\ndef ensure_directory(path):\n \"\"\"\n If the directory doesn't exist yet, create it.\n \"\"\"\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n\ndef directory_exists(path):\n \"\"\"\n Checks if the directory exists.\n \"\"\"\n return os.path.exists(path)\n\n\n# Grabs project directory based on presence of `project.ptx`\ndef project_path(dirpath=os.getcwd()):\n if os.path.isfile(os.path.join(dirpath,'project.ptx')):\n # we're at the project root\n return dirpath\n parentpath = os.path.dirname(dirpath)\n if parentpath == dirpath:\n # cannot ascend higher, no project found\n return None\n else:\n # check parent instead\n return project_path(dirpath=parentpath)\n\ndef project_xml(dirpath=os.getcwd()):\n if project_path(dirpath) is None:\n project_manifest = static.path('templates','project.ptx')\n else:\n project_manifest = os.path.join(project_path(dirpath), 'project.ptx')\n return ET.parse(project_manifest)\n\ndef target_xml(alias=None,dirpath=os.getcwd()):\n if alias is None:\n return project_xml().find(\"targets/target\")\n xpath = f'targets/target[@name=\"{alias}\"]'\n matches = project_xml().xpath(xpath)\n if len(matches) == 0:\n log.info(f\"No targets with alias {alias} found in project manifest file project.ptx.\")\n return None\n return project_xml().xpath(xpath)[0]\n\ndef text_from_project_xml(xpath,default=None):\n matches = project_xml().xpath(xpath)\n if len(matches) > 0:\n return matches[0].text.strip()\n else:\n return default\n\n#check xml syntax\ndef xml_syntax_is_valid(xmlfile):\n # parse xml\n try:\n source_xml = ET.parse(xmlfile)\n # we need to call xinclude once for each level of nesting (just to check for errors). 25 levels should be more than sufficient\n for i in range(25):\n source_xml.xinclude()\n log.debug('XML syntax appears well formed.')\n if (source_xml.getroot().tag != 'pretext'):\n log.error(f'The file {xmlfile} does not have \"\" as its root element. Did you use a subfile as your source? Check the project manifest (project.ptx).')\n return False\n # check for file IO error\n except IOError:\n log.error(f'The file {xmlfile} does not exist')\n return False\n\n # check for XML syntax errors\n except ET.XMLSyntaxError as err:\n log.error('XML Syntax Error, see error_syntax.log. Quitting...')\n with open('error_syntax.log', 'w') as error_log_file:\n error_log_file.write(str(err.error_log))\n return False\n except ET.XIncludeError as err:\n log.error(\n 'XML Syntax Error with instance of xinclude; see error_syntax.log. Quitting...')\n with open('error_syntax.log', 'w') as error_log_file:\n error_log_file.write(str(err.error_log))\n return False\n return True\n\ndef xml_source_validates_against_schema(xmlfile):\n #get path to RelaxNG schema file:\n schemarngfile = static.path('schema','pretext.rng')\n\n # Open schemafile for validation:\n relaxng = ET.RelaxNG(file=schemarngfile)\n\n # Parse xml file:\n source_xml = ET.parse(xmlfile)\n\n ## just for testing:\n # relaxng.validate(source_xml)\n # log = relaxng.error_log\n # print(log)\n\n # validate against schema\n try:\n relaxng.assertValid(source_xml)\n log.info('PreTeXt source passed schema validation.')\n except ET.DocumentInvalid as err:\n log.debug('PreTeXt document did not pass schema validation; unexpected output may result. See .error_schema.log for hints. Continuing with build.')\n with open('.error_schema.log', 'w') as error_log_file:\n error_log_file.write(str(err.error_log))\n return False\n return True\n\n# watchdog handler for watching changes to source\nclass HTMLRebuildHandler(watchdog.events.FileSystemEventHandler):\n def __init__(self,callback):\n self.last_trigger_at = time.time()-5\n self.callback = callback\n def on_any_event(self,event):\n # only trigger at most every 5 seconds\n if time.time() > self.last_trigger_at + 5:\n self.last_trigger_at = time.time()\n log.info(f\"\\nChanges to source detected.\\n\")\n self.callback()\n\n# boilerplate to prevent overzealous caching by preview server, and\n# avoid port issues\ndef binding_for_access(access=\"private\"):\n if os.path.isfile(\"/home/user/.smc/info.json\") or access==\"public\":\n return \"0.0.0.0\"\n else:\n return \"localhost\"\ndef url_for_access(access=\"private\",port=8000):\n if os.path.isfile(\"/home/user/.smc/info.json\"):\n project_id = json.loads(open('/home/user/.smc/info.json').read())['project_id']\n return f\"https://cocalc.com/{project_id}/server/{port}/\"\n elif access=='public':\n return f\"http://{socket.gethostbyname(socket.gethostname())}:{port}\"\n else:\n return f\"http://localhost:{port}\"\ndef serve_forever(directory,access=\"private\",port=8000):\n log.info(f\"Now starting a server to preview directory `{directory}`.\\n\")\n binding = binding_for_access(access)\n class RequestHandler(SimpleHTTPRequestHandler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, directory=directory, **kwargs)\n \"\"\"HTTP request handler with no caching\"\"\"\n def end_headers(self):\n self.send_my_headers()\n SimpleHTTPRequestHandler.end_headers(self)\n def send_my_headers(self):\n self.send_header(\"Cache-Control\", \"no-cache, no-store, must-revalidate\")\n self.send_header(\"Pragma\", \"no-cache\")\n self.send_header(\"Expires\", \"0\")\n class TCPServer(socketserver.TCPServer):\n allow_reuse_address = True\n looking_for_port = True\n while looking_for_port:\n try:\n with TCPServer((binding, port), RequestHandler) as httpd:\n looking_for_port = False\n url = url_for_access(access,port)\n log.info(f\"Server started at {url}\\n\")\n log.info(\"Use [Ctrl]+[C] to halt the server.\\n\")\n httpd.serve_forever()\n except OSError:\n log.warning(f\"Port {port} could not be used.\")\n port = random.randint(49152,65535)\n log.warning(f\"Trying port {port} instead.\\n\")\n\ndef run_server(directory,access,port,watch_directory=None,watch_callback=lambda:None):\n binding = binding_for_access(access)\n threading.Thread(target=lambda: serve_forever(directory,access,port),daemon=True).start()\n if watch_directory is not None:\n log.info(f\"\\nWatching for changes in `{watch_directory}` ...\\n\")\n event_handler = HTMLRebuildHandler(watch_callback)\n observer = watchdog.observers.Observer()\n observer.schedule(event_handler, watch_directory, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n log.info(\"\\nClosing server...\")\n if watch_directory is not None: observer.stop()\n if watch_directory is not None: observer.join()\n\n# Info on namespaces: http://lxml.de/tutorial.html#namespaces\nNSMAP = {\n \"xi\": \"http://www.w3.org/2001/XInclude\",\n \"xml\": \"http://www.w3.org/XML/1998/namespace\",\n}\ndef nstag(prefix,suffix):\n return \"{\" + NSMAP[prefix] + \"}\" + suffix\n\ndef expand_pretext_href(lxml_element):\n '''\n Expands @pretext-href attributes to point to the distributed xsl directory.\n '''\n for ele in lxml_element.xpath('//*[@pretext-href]'):\n ele.set('href',str(linux_path(static.core_xsl(ele.get('pretext-href'),as_path=True))))\n\ndef copy_fix_xsl(xsl_path, output_dir):\n xsl_dir = os.path.abspath(os.path.dirname(xsl_path))\n output_dir = os.path.abspath(output_dir)\n with working_directory(xsl_dir):\n for filename in os.listdir('.'):\n if filename.endswith('.xsl'):\n lxml_element = ET.parse(filename)\n expand_pretext_href(lxml_element)\n output_path = os.path.join(output_dir, filename)\n lxml_element.write(output_path)\n elif filename.endswith('.ent'):\n # an author might include a copy of the .ent file which should also be copied.\n shutil.copyfile(filename, os.path.join(output_dir, filename))\n","sub_path":"pretext/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"382713430","text":"import argparse\n\ndef write_column(list_of_dicts, col, file_path):\n with open(file_path, 'w+') as f:\n f.write(col+\"\\n\")\n for item in list_of_dicts:\n f.write(item[col]+'\\n') \n \ndef row_parser(string_input, head_list):\n row={}\n for num in range(len(head_list)):\n row[head_list[num]]=string_input.split(\",\")[num]\n return row\n\ndef head_parser(string_input):\n return string_input.split(\",\")\n\ndef csv_reader(input_path):\n linenum=0\n data=[]\n with open(input_path) as f:\n for line in f:\n linenum+=1\n if linenum==1:\n head=head_parser(line)\n else:\n data.append(row_parser(line, head))\n return data\n\nif __name__ == \"__main__\":\n # Define arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\"inputpath\", help=\"Input csv file path\", type=str)\n parser.add_argument(\"outputpath\", help=\"Output csv file path\", type=str)\n parser.add_argument(\"col\", help=\"Column name to be selected\", type=str)\n parser.set_defaults(keep=True)\n \n # Parse arguments\n args=parser.parse_args()\n \n # Select column \n data=csv_reader(args.inputpath)\n write_column(data,args.col,args.outputpath) \n \n","sub_path":"select_column.py","file_name":"select_column.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"241155404","text":"import hashlib\nimport json\nimport pickle\nfrom typing import Any, Dict\n\nimport pandas as pd\nimport redis\nimport requests\nfrom flask import Flask, json, request\nfrom google.cloud import storage\n\n\ndef dict_hash(dictionary: Dict[str, Any]) -> str:\n \"\"\"MD5 hash of a dictionary.\"\"\"\n dhash = hashlib.md5()\n # We need to sort arguments so {'a': 1, 'b': 2} is\n # the same as {'b': 2, 'a': 1}\n encoded = json.dumps(dictionary, sort_keys=True).encode()\n dhash.update(encoded)\n return dhash.hexdigest()\n\n\ndef download_model():\n trained_model_pickle_path = \"trained_model.pkl\"\n download_blob(\n \"trained-titanic-model\",\n \"logistic_regression.pkl\",\n trained_model_pickle_path,\n )\n model = None\n with open(trained_model_pickle_path, 'rb') as f:\n model = pickle.load(f)\n return model\n\n\ndef download_blob(bucket_name, source_blob_name, destination_file_name):\n \"\"\"Downloads a blob from the bucket.\"\"\"\n # The ID of your GCS bucket\n # bucket_name = \"your-bucket-name\"\n\n # The ID of your GCS object\n # source_blob_name = \"storage-object-name\"\n\n # The path to which the file should be downloaded\n # destination_file_name = \"local/path/to/file\"\n\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n\n # Construct a client side representation of a blob.\n # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve\n # any content from Google Cloud Storage. As we don't need additional data,\n # using `Bucket.blob` is preferred here.\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n print(\n \"Downloaded storage object {} from bucket {} to local file {}.\".format(\n source_blob_name, bucket_name, destination_file_name\n )\n )\n\n\napp = Flask(__name__)\napp.config[\"DEBUG\"] = True\nmodel = download_model()\nredis = redis.Redis(host='redis', port=6379)\n\n\n@app.route('/prediction/results', methods=['POST'])\ndef predict_perf():\n # receive the prediction request data as the message body\n content = request.get_json()\n hash = dict_hash(content)\n cached_response = requests.post(\"http://cache:5003/cache/get\", json={\"key\": hash})\n \n if cached_response.text != \"Not found\":\n return cached_response.text\n \n df = pd.read_json(json.dumps(content), orient='records')\n resp = model.predict(df)\n requests.post(\"http://cache:5003/cache/set\", json={\"key\": hash, \"value\": str(resp)})\n return str(resp)\n\n\napp.run(host='0.0.0.0', port=5002)\n","sub_path":"api/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"112537640","text":"\nclass MaxSumOf3:\n \"\"\"\n https://leetcode.com/problems/maximum-sum-of-3-non-overlapping-subarrays/description/\n \"\"\"\n\n def maxSumOfThreeSubarrays(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: List[int]\n \"\"\"\n\n one = {}\n one_idx = {}\n two = {}\n two_idx = {}\n three = {}\n three_idx = {}\n\n n = len(nums)\n i1 = n - k\n\n s = sum(nums[i1:])\n\n one[i1] = s\n one_idx[i1] = (i1,)\n for i in range(i1 - 1, -1, -1):\n s = s + nums[i] - nums[i + k]\n one[i] = one[i + 1]\n one_idx[i] = one_idx[i + 1]\n if s >= one[i]:\n one[i] = s\n one_idx[i] = (i,)\n\n i2 = i1 - k\n s = sum(nums[i2:i1])\n two[i2] = s + one[i1]\n two_idx[i2] = (i2,) + one_idx[i1]\n for i in range(i2 - 1, -1, -1):\n s = s + nums[i] - nums[i + k]\n two[i] = two[i + 1]\n two_idx[i] = two_idx[i + 1]\n if s + one[i + k] >= two[i + 1]:\n two[i] = s + one[i + k]\n two_idx[i] = (i,) + one_idx[i + k]\n\n i3 = i2 - k\n s = sum(nums[i3:i2])\n three[i3] = s + two[i2]\n three_idx[i3] = (i3,) + two_idx[i2]\n for i in range(i3 - 1, -1, -1):\n s = s + nums[i] - nums[i + k]\n three[i] = three[i + 1]\n three_idx[i] = three_idx[i + 1]\n if s + two[i + k] >= three[i]:\n three[i] = s + two[i + k]\n three_idx[i] = (i,) + two_idx[i + k]\n\n return list(three_idx[0])\n\n\n# --------------\nms = MaxSumOf3()\nprint([0, 3, 5], ms.maxSumOfThreeSubarrays([1, 2, 1, 2, 6, 7, 5, 1], 2))\nprint([1, 4, 7], ms.maxSumOfThreeSubarrays([7, 13, 20, 19, 19, 2, 10, 1, 1, 19], 3))\n","sub_path":"src/leetcode/MaxSumOf3.py","file_name":"MaxSumOf3.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"20420472","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom douban_movie_top250.items import DoubanMovieTop250Item\n\n\nclass MovieSpider(scrapy.Spider):\n name = 'movie'\n # spider单独配置pipelines\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'douban_movie_top250.pipelines.DuplicatesPipeline': 200,\n 'douban_movie_top250.pipelines.DoubanMovieTop250Pipeline': 300,\n }\n }\n allowed_domains = ['douban.com']\n start_urls = ['https://movie.douban.com/top250']\n\n def parse(self, response):\n item = DoubanMovieTop250Item()\n for it in response.xpath('//div[@class=\"item\"]'):\n item['ranking'] = it.xpath('div[@class=\"pic\"]/em/text()').extract_first()\n item['title'] = it.xpath('div/div/a/span[@class=\"title\"][1]/text()').extract_first()\n item['score'] = it.xpath('div/div/div/span[@class=\"rating_num\"]/text()').extract_first()\n # print(item['ranking'], item['title'], item['score'])\n yield item\n\n for page_num in range(2, 11):\n # 每页25部电影信息,共10页,下面循环2-10页。\n # page 2 : 'https://movie.douban.com/top250?start=25&filter='\n next_url = 'https://movie.douban.com/top250?start={}&filter='.format((page_num - 1) * 25)\n yield scrapy.http.Request(next_url, callback=self.parse)\n","sub_path":"douban_movie_top250/spiders/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"640782868","text":"\"\"\"\nConnects to the Reddit API to get rising submissions details and posts\nthem to a Discord webhook.\n\"\"\"\n\nimport os\nimport requests\nimport random\n\n\nWEBHOOK_URL = os.environ[\"WEBHOOK\"]\n\n\ndef main():\n \"\"\"Start the script.\"\"\"\n list_reddit = ['doodles','DigitalPainting','drawing','Illustration','conceptart','painting','Watercolor']\n print(\"Connecting to Reddit...\")\n message, image_url = get_rising_submissions(random.choice(list_reddit))\n\n print(\"Data received. Sending webhook...\")\n post_message(message, image_url)\n\n\ndef get_rising_submissions(subreddit):\n \"\"\"Connects to the Reddit API and queries the top rising submission\n from the specified subreddit.\n \n Parameters\n ----------\n subreddit : str\n The name of the subreddit without forward slashes.\n\n Returns\n -------\n tuple\n A tuple containing a formatted message and an image url.\n \n \"\"\"\n\n endpoint = random.choice(['top','rising'])\n\n url = f\"https://www.reddit.com/r/{subreddit}/{endpoint}.json?limit=1\"\n headers = {\"User-Agent\": \"Reddit Rising Checker v1.0\"}\n\n with requests.get(url, headers=headers) as response:\n\n data = response.json()[\"data\"][\"children\"]\n\n # Iterate over all the children.\n for item in data:\n\n item_data = item[\"data\"]\n\n # We will collect only the fields we are interested in.\n title = item_data[\"title\"]\n permalink = \"https://reddit.com\" + item_data[\"permalink\"]\n author = item_data[\"author\"]\n score = item_data[\"score\"]\n image_url = item_data[\"url\"]\n\n # Compose a Markdown message using string formatting.\n message = f\"[{title}]({permalink})\\nby **{author}**\\n**{score:,}** points\"\n\n return (message, image_url)\n\n\ndef post_message(message, image_url):\n \"\"\"Sends the formatted message to a Discord server.\n \n Parameters\n ----------\n message : str\n The formatted message to post.\n\n image_url : str\n The URL used as the thumbnail.\n \n \"\"\"\n\n payload = {\n \"username\": \"Mapache.Bot\",\n \"embeds\": [\n {\n \"title\": \"Tendencia de Arte en Reddit\",\n \"color\": 102204,\n \"description\": message,\n \"thumbnail\": {\"url\": image_url},\n \"footer\": {\"text\": \"Powered by Trashpandas™\"}\n }\n ]\n }\n\n with requests.post(WEBHOOK_URL, json=payload) as response:\n print(response.status_code)\n\n\nif __name__ == \"__main__\":\n\n main()\n","sub_path":"script_remote.py","file_name":"script_remote.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"461501620","text":"#!/usr/bin/python3\nclass Student():\n \"\"\"Student class with name and age\"\"\"\n def __init__(self, first_name, last_name, age):\n \"\"\"initializes new instance of Student\"\"\"\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\"returns dict attributes of Student\"\"\"\n if attrs is None:\n obj_dict = self.__dict__\n return obj_dict\n else:\n o_D = self.__dict__\n D = dict(([k, v] for k, v in o_D.items() if k in attrs))\n return D\n","sub_path":"0x0B-python-input_output/12-student.py","file_name":"12-student.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"75211558","text":"from mgpAPI.erp_services.conncet_database import initconncetion\nfrom products.models import Product, ProductCategory\nfrom units.models import Unit\nimport binascii\n\n\ndef product_sync():\n cnxn = initconncetion()\n cursor = cnxn.cursor()\n\n try:\n product = Product.objects.filter(erp_type=True).order_by('-timestamp')[0]\n timestamp = str(product.timestamp)\n except Exception as e:\n timestamp = str(0)\n\n cursor.execute(\n 'SELECT \"timestamp\",\"No_\",\"Description\",\"Base Unit of Measure\",\"Inventory Posting Group\",\"Description 2\" FROM \"Mahachai Green co_,Ltd$Item\"Where CAST(timestamp as int) >' + timestamp)\n row = cursor.fetchone()\n while row:\n try:\n product = Product.objects.get(ext_code=str(row[1].encode('utf-8').strip()))\n hex2 = '0x' + str(binascii.b2a_hex(row[0]))\n time = int(hex2, 16)\n try:\n product_category = ProductCategory.objects.get(ext_code=str(row[4].encode('utf-8').strip()))\n except Exception as E:\n product_category = None\n\n try:\n unit = Unit.objects.get(ext_code=str(row[3].encode('utf-8').strip()))\n except Exception as E:\n unit = None\n\n product.timestamp = time\n product.product_category = product_category\n product.unit = unit\n product.title = str(row[2].encode('utf-8').strip())\n product.title2 = str(row[5].encode('utf-8').strip())\n product.description = str(row[2].encode('utf-8').strip())\n product.save()\n\n except Exception as a:\n hex2 = '0x' + str(binascii.b2a_hex(row[0]))\n time = int(hex2, 16)\n\n if len(ProductCategory.objects.filter(ext_code=str(row[4].encode('utf-8').strip()))) == 1:\n product_category = ProductCategory.objects.get(ext_code=str(row[4].encode('utf-8').strip()))\n else:\n product_category = None\n\n unit = Unit.objects.get(ext_code=str(row[3].encode('utf-8').strip()))\n Product.objects.create(timestamp=time, ext_code=str(row[1].encode('utf-8').strip()),\n title=str(row[2].encode('utf-8').strip()),\n description=str(row[2].encode('utf-8').strip()), uom=unit,\n product_category=product_category, erp_type=True,\n title2=str(row[5].encode('utf-8').strip()))\n\n row = cursor.fetchone()\n","sub_path":"mgpAPI/erp_services/product_sync.py","file_name":"product_sync.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"458696883","text":"#!/usr/bin/env python3\nimport argparse\nimport datetime\nimport os\nfrom pathlib import Path\n\nimport boto3\n\nDEFAULT_STAGING_DIRECTORY = '~/work/DeploymentsToS3Sync'\nDEFAULT_DROPBOX_DIRECTORY = '~/Dropbox (Amplio)'\n\nusage = '''Update latest Deployments from Dropbox to S3.'''\n\nbucket_name = 'amplio-progspecs'\nbucket = None # s3 bucket\nargs = None # argparse object with parsed command line args\n\ns3_client = boto3.client('s3')\n\ndropbox = None\nstaging = None\n\nreport = ['Checking for new deployments at {}'.format(datetime.datetime.now())]\nfound_deployments = {}\n\n@property\ndef s3_client2():\n return None\n\ndef error(msg):\n report.append('ERROR: {}'.format(msg))\n\n\n# Format and send an ses message. Options are\n# html - if true, send as html format\n# dry_run - if true, do not actually send email\ndef send_ses(fromaddr='ictnotifications@amplio.org',\n subject='',\n body_text='',\n recipient='ictnotifications@amplio.org'):\n \"\"\"Send an email via the Amazon SES service.\n\n Example:\n send_ses('me@example.com, 'greetings', \"Hi!\", 'you@example.com)\n\n Return:\n If 'ErrorResponse' appears in the return message from SES,\n return the message, otherwise return an empty '' string.\n \"\"\"\n # If we ever want to send as html, here's how.\n html = False\n\n message = {'Subject': {'Data': subject}}\n if html:\n message['Body'] = {'Html': {'Data': body_text}}\n else:\n message['Body'] = {'Text': {'Data': body_text}}\n\n client = boto3.client('ses')\n response = client.send_email(Source=fromaddr, Destination={'ToAddresses': [recipient]}, Message=message)\n\n print(response)\n\n# List the objects with the given prefix.\n# noinspection PyPep8Naming\ndef _list_objects(Bucket=bucket, Prefix='', **kwargs):\n paginator = s3_client.get_paginator(\"list_objects_v2\")\n kwargs = {'Bucket': Bucket, 'Prefix': Prefix, **kwargs}\n for objects in paginator.paginate(**kwargs):\n for obj in objects.get('Contents', []):\n yield obj\n\n\n# Lazy get the s3 bucket; cache for future calls\ndef get_bucket():\n global bucket\n if bucket == None:\n s3 = boto3.resource('s3')\n bucket = s3.Bucket('acm-content-updates')\n return bucket\n\n\n# Given a project or acm name, return the project name.\ndef cannonical_project_name(acmdir):\n project = acmdir.upper()\n if project[0:4] == 'ACM-':\n project = project[4:]\n return project\n\n\n# Given a project or acm name, return the acm name.\ndef cannonical_acm_name(project):\n acmdir = project.upper()\n if acmdir[0:4] != 'ACM-':\n acmdir = 'ACM-' + acmdir\n return acmdir\n\n\ndef get_s3_projects():\n global s3_client\n result = set()\n paginator = s3_client.get_paginator('list_objects_v2')\n kwargs = {'Bucket': bucket_name, 'Delimiter': '/'}\n for objects in paginator.paginate(**kwargs):\n for pref in objects.get('CommonPrefixes', []):\n prj = pref['Prefix'].strip('/')\n result.add(prj)\n return result\n\n\ndef get_db_projects(given_projects):\n global dropbox\n if given_projects is not None:\n # translate project names 'acm-test' => 'TEST'\n result = map(cannonical_project_name, given_projects)\n else:\n # ACM-ish files\n acm_dirs = [x.name for x in os.scandir(dropbox) if x.name[0:4] == 'ACM-']\n # Limit to those with a \"published\" directory\n result = [cannonical_project_name(x) for x in acm_dirs if Path(dropbox, x, 'programspec').exists()]\n print(result)\n return result\n\n\ndef get_projects(given_projects):\n db_projects = set(get_db_projects(given_projects))\n s3_projects = get_s3_projects()\n return s3_projects.intersection(db_projects)\n\n\ndef get_server_etags(project):\n result = {}\n for obj in _list_objects(Bucket=bucket_name, Prefix=project+'/'):\n fn = obj['Key'][len(project)+1:]\n result[fn] = obj['ETag'][1:-1] # Amazon adds bogus quotes around value.\n return result\n\ndef get_local_etags(progspecdir):\n result = {}\n fn = Path(progspecdir, 'etags.properties')\n if fn.exists():\n with open(fn, 'r') as vf:\n for line in vf:\n line = line.strip()\n parts = line.split('=')\n result[parts[0]] = parts[1]\n return result\n\ndef write_local_etags(progspecdir, etags):\n with open(Path(progspecdir, 'etags.properties'), 'w') as vf:\n for fn,local_ver in etags.items():\n line = '{}={}'.format(fn, local_ver)\n print(line, file=vf)\n\n\ndef sync_project(project):\n global dropbox, s3_client\n server_etags = get_server_etags(project)\n progspecdir = Path(dropbox, cannonical_acm_name(project), 'programspec')\n local_etags = get_local_etags(progspecdir)\n needed_etags = {}\n for fn,server_etag in server_etags.items():\n local_etag = local_etags.get(fn)\n if local_etag != server_etag:\n needed_etags[fn] = server_etag\n if len(needed_etags) > 0 or True:\n for fn,etag in needed_etags.items():\n try:\n # This should be much easier. And maybe it is, but boto3 \"documentation\" is so thin that one\n # can see through it.\n key = project+'/'+fn\n download_path = str(Path(progspecdir, fn))\n # head_object lets us get the versionid\n obj_head = s3_client.head_object(Bucket=bucket_name, Key=key)\n xtra = {'VersionId': obj_head['VersionId']}\n rslt = s3_client.download_file(Bucket=bucket_name, Key=key, Filename=download_path, ExtraArgs=xtra)\n except Exception as ex:\n return False\n\n # Clean extraneous files. Note: preserves any directories\n with os.scandir(progspecdir) as it:\n for entry in it:\n if entry.is_file() and entry.name != 'etags.properties' and entry.name not in server_etags:\n Path(entry).unlink()\n\n write_local_etags(progspecdir, server_etags) # now they're local as well\n return True\n\ndef sync_projects(projects):\n for proj in projects:\n # Retry twice in case of races\n tries = 3\n while tries > 0:\n if sync_project(proj):\n break\n tries -= 1\n\ndef main():\n global args, dropbox, staging\n arg_parser = argparse.ArgumentParser(description=\"Synchronize published content to S3.\", usage=usage)\n arg_parser.add_argument('--project', nargs='*', help='Project(s) to update. Default: all projects in Dropbox.')\n arg_parser.add_argument('--user', nargs='*', help='Users(s) to update. Default: all users in staging directory.')\n arg_parser.add_argument('--dropbox', default=DEFAULT_DROPBOX_DIRECTORY,\n help='Dropbox directory (default is ~/Dropbox).')\n arg_parser.add_argument('--staging', default=DEFAULT_STAGING_DIRECTORY,\n help='Directory in which latest Deployments are staged.')\n arg_parser.add_argument('--dryrun', '--dry-run', '-n', default=False, action='store_true',\n help='Do not copy or delete anything.')\n arg_parser.add_argument('--nos3', default=False, action='store_true', help='Do not upload to or delete from S3.')\n arg_parser.add_argument('--noemail', '--no-email', default=False, action='store_true',\n help='Do not send email.')\n arg_parser.add_argument('--force', '-f', default=False, action='store_true',\n help='Force updates, even if no changes detected.')\n args = arg_parser.parse_args()\n\n if args.dryrun:\n report.append('DRYRUN: No files will be changed.')\n if args.force:\n report.append('FORCE: All files considered missing or old.')\n\n dropbox = Path(os.path.expanduser(args.dropbox))\n staging = Path(os.path.expanduser(args.staging))\n\n projects = get_projects(args.project)\n sync_projects(projects)\n\n print(projects)\n\n\nif __name__ == \"__main__\":\n exit(main())\n","sub_path":"AWS-LB/cron/DeploymentsToS3Sync/specsync.py","file_name":"specsync.py","file_ext":"py","file_size_in_byte":8001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"601778774","text":"from firebase_admin import firestore\nfrom firebase_admin.firestore import ArrayUnion, ArrayRemove\nimport time\nfrom firebaseAdmin.setup import *\n\nfrom apis.functions.Symbol_Market import *\nfrom apis.functions.Funds import *\n\nstore = firestore.client()\n\ndef calculate_com(data,exitprice):\n print(get_symbol_type(data['symbol']))\n com = data['com']\n return com\n\ndef active_trigger(orderid,price):\n print(orderid)\n ref = store.collection(\"orders\").document(orderid).get()\n if(ref._data['status']=='Pending'):\n return active_limit_order(orderid)\n if(ref._data['status']=='Active'):\n exit_order(orderid,price)\n return {\"code\":201}\n\ndef exit_order(id,cur_price):\n ref = store.collection(\"orders\").document(id).get()\n data2 = {}\n data2['side'] = ref._data['side']\n data2['entryprice'] = ref._data['price']\n data2['exitprice'] = cur_price\n data2['entrytime'] = ref._data['ordertime']\n data2['exittime'] = time.strftime('%A, %Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n data2['qty'] = ref._data['qty']\n data2['symbol'] = ref._data['symbol']\n cur_com = calculate_com(ref._data,cur_price)\n data2['com'] = cur_com + ref._data['com']\n\n store.collection(\"orders\").document(id).delete()\n\n username = ref._data['username']\n ref2 = store.collection(\"ordersComplete\").where('username','==',username).get()\n if(len(ref2)==0):\n store.collection(\"ordersComplete\").document().set({\n \"username\": username,\n \"data\" : [data2]\n })\n else:\n for r in ref2:\n ref2 = store.collection(\"ordersComplete\").document(r.id).update({\n \"data\": ArrayUnion([data2])\n }) \n if(data2['side']=='buy'):\n add_eq_fund(username,(ref._data['margin']*data2['qty']) + (data2['exitprice']-data2['entryprice'])*(data2['qty']) - cur_com)\n else:\n add_eq_fund(username, (ref._data['margin']*data2['qty'])- (data2['exitprice']-data2['entryprice'])*(data2['qty']) - cur_com)\n return {\"data\":data2,\"code\":200}\n\ndef active_limit_order(id):\n store.collection(\"orders\").document(id).update({'status':'Active'})\n ref = store.collection(\"orders\").document(id).get()\n add_eq_fund(ref._data[\"username\"],-(ref._data['margin']*ref._data['qty']+ref._data['com']))\n data = ref._data\n data['code'] = 200\n return data\n\n","sub_path":"server/apis/functions/Triggers.py","file_name":"Triggers.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"368308808","text":"from socket import *\nfrom urllib.parse import urlparse\nimport threading\nimport sys\nimport traceback\nimport datetime\nBUFSIZE = 2048\nTIMEOUT = 10\nCRLF = '\\r\\n'\nlock = False\n#invalid usage check\nif len(sys.argv) < 2 or len(sys.argv) > 4:\n print(\"need at least three parameters and require at most five parameters\")\n sys.exit()\n\n#default: only port number given\nhost = \"0.0.0.0\"\nport = int(sys.argv[1])\nMT = False\nPT = False \n\n#port number with one option\nif len(sys.argv) == 3:\n if sys.argv[2] == \"-mt\":\n MT = True\n elif sys.argv[2] == \"-pt\":\n PT = True\n else:\n print(\"wrong inputs\")\n sys.exit()\n\n#port number with two options\nif len(sys.argv) == 4:\n if sys.argv[2] == \"-mt\" and sys.argv[3] == \"-pt\":\n MT = True\n PT = True\n elif sys.argv[2] == \"-pt\" and sys.argv[3] == \"-mt\":\n MT = True\n PT = True\n else:\n print(\"Wrong inputs\")\n sys.exit()\n\n\n# Dissect HTTP header into line(first line), header(second line to end), body\ndef parseHTTP(data): \n #byte to string\n\n dataArr = data.split(b'\\r\\n\\r\\n')\n first = dataArr[0].decode().split(CRLF)\n #line str\n line = first[0]\n\n body = b''\n if len(dataArr[0]) + 4 != len(data):\n body = data[len(dataArr[0]) + 4:]\n \n\n #header dict\n header = {}\n\n for i in range(1,len(first)):\n headerTemp = first[i].split(': ')\n header[headerTemp[0]] = headerTemp[1]\n \n return HTTPPacket(line, header, body)\n\n\n# Receive HTTP packet with socket\n# It support seperated packet receive\ndef recvData(conn):\n # Set time out for error or persistent connection end\n try:\n conn.settimeout(TIMEOUT)\n except:\n if PT:\n conn.close()\n pass\n pass\n\n\n #data received from a connected socket\n data = conn.recv(BUFSIZE)\n\n while b'\\r\\n\\r\\n' not in data:\n data += conn.recv(BUFSIZE)\n\n packet = parseHTTP(data)\n \n body = packet.body\n\n # Chunked-Encoding\n if packet.isChunked():\n readed = 0\n while True:\n while b'\\r\\n' not in body[readed:len(body)]:\n d = conn.recv(BUFSIZE)\n body += d\n size_str = body[readed:len(body)].split(b'\\r\\n')[0]\n size = int(size_str, 16)\n readed += len(size_str) + 2\n while len(body) - readed < size + 2:\n d = conn.recv(BUFSIZE)\n body += d\n readed += size + 2\n if size == 0: break\n packet.setHeader('Content-Length',str(len(body))) \n # Content-Length\n elif packet.getHeader('Content-Length'):\n received = 0\n expected = packet.getHeader('Content-Length')\n if expected == None:\n expected = '0'\n expected = int(expected)\n received += len(body)\n \n while received < expected:\n d = conn.recv(BUFSIZE)\n received += len(d)\n body += d\n\n packet.body = body\n\n # return packet.pack()\n return packet.pack()\n\n# HTTP packet class\n# Manage packet data and provide related functions\nclass HTTPPacket:\n # Constructer\n def __init__(self, line, header, body):\n self.line = line # Packet first line(String)\n self.header = header # Headers(Dict.{Field:Value})\n self.body = body # Body(Bytes)\n \n # Make encoded packet data\n def pack(self):\n ret = self.line + CRLF\n for field in self.header:\n ret += field + ': ' + self.header[field] + CRLF\n ret += CRLF\n ret = ret.encode()\n ret += self.body\n return ret\n \n # Get HTTP header value\n # If not exist, return empty string\n def getHeader(self, field):\n if field in self.header:\n return self.header[field]\n else:\n return ''\n \n # Set HTTP header value\n # If not exist, add new field\n # If value is empty string, remove field\n def setHeader(self, field, value):\n if field in self.header:\n if value == '':\n del self.header[field]\n else:\n self.header[field] = value\n else:\n if value != '':\n self.header[field] = value\n\n # Get URL from request packet line\n def getURL(self):\n lineArr = self.line.split(' ')\n return lineArr[1]\n \n def isChunked(self):\n if self.getHeader('Transfer-Encoding'):\n if self.getHeader('Transfer-Encoding') == 'chunked':\n return True\n return False\n #return 'chunked' in self.getHeader('Transfer-Encoding')\n #empty handle\n\n# Proxy handler thread class\nclass ProxyThread(threading.Thread):\n def __init__(self, conn, addr, counter):\n super().__init__()\n self.conn = conn # Client socket\n self.addr = addr # Client address\n self.counter = counter\n # Thread Routine\n def run(self):\n while True:\n try:\n #global lock\n #lock = True\n\n stringBuffer = ''\n connNow = datetime.datetime.now()\n conndate = connNow.strftime(\"%d/%b/%Y %H:%M:%S.%f\")\n stringBuffer += '[{}] {}\\n'.format(self.counter,conndate)\n stringBuffer += '[{}] > Connection from {} {}\\n'.format(self.counter,self.addr[0],self.addr[1])\n #when occurs erros from recvData\n try:\n data = recvData(self.conn)\n except:\n self.conn.close()\n pass\n break\n #when receive none\n if self.conn.fileno() == -1 :\n self.conn.close()\n pass\n break \n req = parseHTTP(data)\n url = urlparse(req.getURL())\n\n # Do I have to do if it is not persistent connection?\n if not PT:\n req.setHeader('Proxy-Connection', 'close')\n # Remove proxy infomation\n req.setHeader('Connection', req.getHeader('Proxy-Connection'))\n req.setHeader('Proxy-Connection', '')\n\n\n # socket created for a Server connection\n svr = socket(AF_INET, SOCK_STREAM)\n svr.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n # and so on...\n #check port number\n port_ = 80\n \n if url.port == None:\n port_ = 80\n else:\n port_ = url.port \n \n #try connect, handle connection fail\n try:\n svr.connect((url.hostname, port_))\n except Exception as e:\n # tb = traceback.format_exc()\n # print(tb)\n print (e)\n svr.close() \n sys.exit()\n \n # send a client's request to the server\n svr.sendall(req.pack())\n # receive data from the server\n\n data = recvData(svr)\n res = parseHTTP(data)\n\n\n res.setHeader('Proxy-Connection', res.getHeader('Connection'))\n res.setHeader('Connection', '') \n if not PT:\n res.setHeader('Proxy-Connection', res.getHeader('close'))\n #send back data to connected socket(client)\n #lock = False\n\n self.conn.sendall(res.pack())\n\n stringBuffer +='[{}] < {} {}\\n'.format(self.counter,res.getHeader('Content-Type'), res.getHeader('Content-Length'))\n stringBuffer +='[{}] < {}\\n'.format(self.counter,res.line)\n print(stringBuffer)\n\n # Set content length header\n \n # If support pc, how to do socket and keep-alive?\n if not PT:\n self.conn.close()\n except Exception as e:\n # tb = traceback.format_exc()\n # print(tb)\n print (e)\n except KeyboardInterrupt:\n self.conn.close()\n raise KeyboardInterrupt\n \ndef main():\n try:\n global lock\n lock = False\n counter = 0\n #create a socket objcet\n sock = socket(AF_INET, SOCK_STREAM)\n #prevent Address already in use error\n sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n \n #binds socket to specific host and port in case of fail close\n try:\n sock.bind((host, port))\n except Exception as e:\n print('Bind failed.')\n sock.close()\n sys.exit()\n #socket listen upto 20\n sock.listen(20)\n now = datetime.datetime.now()\n date = now.strftime(\"%d/%B/%Y %H:%M:%S.\")\n print('Proxy Server started on port %d at %s' % (port, date))\n if MT:\n print('* Multithreading – [ON]')\n elif not MT:\n print('* Multithreading – [OFF]')\n if PT:\n print('* Persistent Connection – [ON]')\n elif not PT:\n print('* Persistent Connection – [OFF]')\n print(\"\")\n #lock = False\n ptOnce = False\n\n #live server on\n while True:\n try:\n\n # Client connect\n if MT:\n conn, addr = sock.accept()\n counter += 1\n if not MT and not lock:\n if not ptOnce:\n conn, addr = sock.accept()\n counter += 1\n if PT:\n ptOnce = True\n # Start Handling\n if MT:\n pt = ProxyThread(conn, addr, counter)\n pt.start()\n if not MT and not PT:\n if not lock:\n lock = True\n pt = ProxyThread(conn, addr, counter)\n pt.start()\n pt.join()\n lock = False\n if not MT and PT:\n pt = ProxyThread(conn, addr, counter)\n pt.start()\n pt.join() \n\n\n \n except KeyboardInterrupt:\n print('\\nKeyboardInterrupt')\n sock.close()\n sys.exit()\n except:\n # tb = traceback.format_exc()\n # print(tb)\n pass\n\n\nif __name__ == '__main__':\n main()","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":10653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"85701702","text":"from sklearn.linear_model import LogisticRegression\r\nfrom sklearn.datasets.samples_generator import make_blobs\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn import linear_model\r\n#separating into labels and features\r\ndf = pd.read_csv('train.csv', delimiter=\",\")\r\nlabel = df['class']\r\nfeatures = df.drop('class', axis=1)\r\n#defining our logistic regression estimator and training\r\nregr = linear_model.LogisticRegression()\r\nregr.fit(features, label)\r\n# new instances where we do not know the answer\r\ndf2 = pd.read_csv('test.csv', delimiter=\",\")\r\ndf2=df2.values\r\n# make a prediction\r\nynew = regr.predict_proba(df2)\r\n# show the inputs and predicted outputs\r\nthislist = []\r\nfor i in range(len(ynew)):\r\n\t#print(\"Predicted=%s\" % ( ynew[i][1]))\r\n\t#print(ynew[i][1])\r\n\tthislist.append(ynew[i][1])\r\n#print(thislist)\r\n#probabilities for class=1\r\ndf = pd.DataFrame(thislist)\r\ndf.to_csv('ronaldsrundans.csv', index=False,header=False)\r\n\r\n\r\n","sub_path":"Sample/model4.py","file_name":"model4.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"292981125","text":"from django.urls import path\n\nfrom clients.views import *\n\nurlpatterns = [\n path(\"\", homeView, name=\"home\"),\n path(\"clients/\", clientsView, name=\"getClients\"),\n path(\"professions/\", professionsView, name=\"getProfessions\"),\n path(\"users/\", UsersFormView.as_view(), name=\"getUsers\"),\n\n path(\"users/list///\", usersView, name=\"usersList\"),\n path(\"users/list////\", usersView, name=\"usersList\"),\n\n path(\"schedule/\", ScheduleFormView.as_view(), name=\"getSchedule\"),\n path(\"schedule/list//////\",\n scheduleView, name=\"scheduleList\"),\n]\n","sub_path":"clients/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"159589531","text":"import os\nimport sys\nimport pyperclip\nfrom QuickProject import menu_output, get_config, dir_char, QproDefaultConsole, QproErrorString\n\nconfig = get_config()\nargv = []\nretain_arg = ['-br', '-b', '-r', '-h', '-i']\nhas_recog = {i: False for i in retain_arg}\n\n\ndef run(use_txt=False, executable_file=str(config['executable_filename'])):\n if os.path.exists(executable_file):\n cmd = executable_file.replace(' ', '\\ ') + ' '\n else:\n cmd = executable_file + ' '\n if argv:\n cmd += ' '.join(argv)\n if cmd.strip():\n cmd += (' < \"' + config['input_file'] + '\"' if use_txt else '')\n os.system(cmd)\n\n\ndef main():\n to_build = '-b' in sys.argv or '-br' in sys.argv\n to_run = '-r' in sys.argv or '-b' not in sys.argv\n filename = config['compile_filename']\n flag = False\n if '-debug' in sys.argv:\n raise ImportError\n if '-h' in sys.argv:\n return menu_output({'title': 'qrun usage\\n',\n 'lines': [\n ('-b', 'build'),\n ('qrun [bold green][-r]', 'run'),\n ('-br', 'build and run'),\n ('-h', 'help'),\n ('-i', 'use input.txt as input'),\n ('-if [bold magenta]', 'set file as input'),\n ('-if [bold magenta]-paste', 'use Clipboard content as input'),\n ('-f [bold magenta]', 'set file as build file'),\n ('*', 'add parameters for program')],\n 'prefix': 'qrun '})\n if '-f' in sys.argv:\n index = sys.argv.index('-f')\n if index == len(sys.argv) - 1:\n return QproDefaultConsole.print(QproErrorString, 'No file with -f')\n filename = sys.argv[index + 1]\n if not os.path.exists(filename):\n return QproDefaultConsole.print(QproErrorString, f'No such file: {filename}')\n flag = True\n if '-if' in sys.argv:\n index = sys.argv.index('-if')\n if index == len(sys.argv) - 1:\n QproDefaultConsole.print(QproErrorString, 'No file with -if')\n tmp_file = sys.argv[index + 1]\n if tmp_file == '-paste':\n with open('cmake-build-debug' + dir_char + 'input.txt', 'w') as file:\n file.write(pyperclip.paste())\n else:\n __input_file__ = tmp_file\n if not os.path.exists(__input_file__):\n return QproDefaultConsole.print(QproErrorString, f'No such file: {__input_file__}')\n config['input_file'] = __input_file__\n o_file = config['executable_filename']\n record_file_name = os.path.basename(filename).split('.')[0]\n if config['compile_tool'] and to_build:\n os.system(config['compile_tool'].replace(config['compile_filename'], filename))\n if to_run:\n add_flag = True\n for i in sys.argv[1:]:\n if not add_flag:\n add_flag = True\n continue\n if i in retain_arg:\n if has_recog[i]:\n argv.append(i)\n else:\n has_recog[i] = True\n elif i in ['-if', '-f']:\n add_flag = False\n else:\n argv.append(i)\n run('-i' in sys.argv or '-if' in sys.argv, o_file)\n if config['compile_tool'] and flag:\n if config['compile_tool'].split()[0] == 'javac':\n os.remove('dist' + dir_char + record_file_name + '.class')\n else:\n os.remove(o_file)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"QuickProject/qrun.py","file_name":"qrun.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"68805372","text":"# coding=utf8\n\n# Copyright 2018 JDCLOUD.COM\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# NOTE: This class is auto generated by the jdcloud code generator program.\n\n\nclass UserAttachment(object):\n\n def __init__(self, remark=None, industry=None, subIndustry=None, business=None, website=None, cpState=None, cpProvince=None, cpCity=None, cpCountry=None, cpAddress=None, cpTelphone=None):\n \"\"\"\n :param remark: (Optional) 公司名称\n :param industry: (Optional) 主营行业\n :param subIndustry: (Optional) 主营行业子选项\n :param business: (Optional) 主营业务\n :param website: (Optional) 网站\n :param cpState: (Optional) 国家\n :param cpProvince: (Optional) 省\n :param cpCity: (Optional) 市\n :param cpCountry: (Optional) 区县\n :param cpAddress: (Optional) 详细地址\n :param cpTelphone: (Optional) 联系手机\n \"\"\"\n\n self.remark = remark\n self.industry = industry\n self.subIndustry = subIndustry\n self.business = business\n self.website = website\n self.cpState = cpState\n self.cpProvince = cpProvince\n self.cpCity = cpCity\n self.cpCountry = cpCountry\n self.cpAddress = cpAddress\n self.cpTelphone = cpTelphone\n","sub_path":"jdcloud_sdk/services/iam/models/UserAttachment.py","file_name":"UserAttachment.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"242025367","text":"import pandas as pd\nimport numpy as np\nimport json\n\ndf = pd.read_json('data.json', lines=True)\ndf_new = df.rename(columns={\"a\": \"address\", \"liid\": \"profile_url\",\n \"linkedin\": \"linkedin_id\", \"n\": \"name\", \"t\": \"mob_number\", \"e\": \"email\"})\n\n# data_main.csv\ndf_1 = df_new.drop(['mob_number', 'email'], axis=1)\ndf_1.to_csv('data_main.csv', index=False)\n\n# data_email.csv\ndf_2 = df_new.drop(['address', 'profile_url', 'name', 'mob_number'], axis=1)\ndf_2.to_csv('data_email.csv', index=False)\n\n# data_number.csv\ndf_3 = df_new.drop(['address', 'profile_url', 'name', 'email'], axis=1)\ndf_3.to_csv('data_number.csv', index=False)\n","sub_path":"Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"314718890","text":"import ai_assignments\nfrom ai_assignments.utils.visualization import plot_field_and_costs\nimport argparse\nimport textwrap\nimport os\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=textwrap.dedent('''\n this script lets you view a problem instance in JSON format,\n and will optionally display solution paths superimposed on\n the problem instance. the simple 2D environment for most of\n these assignments consists of two boards that encode\n\n SPACES in white color\n WALLS in black color\n\n in the upper display, and the costs for 'transitioning to a new state',\n which means stepping onto a new position on the board, in the lower\n display.\n '''),\n epilog=textwrap.dedent('''\n example usage:\n\n $ python view.py test/board.json test/bfs.path\n this will view the problem instance 'test/board.json', and superimpose\n the path 'test/bfs.path' on top of both views.\n\n $ python view.py test/board.json test/*.path\n this will view the problem instance 'test/board.json', and superimpose\n ALL the files in the directory 'test', that end in '.path'\n '''),\n formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument('problem_instance_name', type=str)\n parser.add_argument('paths', nargs='*')\n parser.add_argument('--coords', default=False, action='store_true')\n parser.add_argument('--grid', default=False, action='store_true')\n args = parser.parse_args()\n\n problem = ai_assignments.load_problem_instance(args.problem_instance_name)\n\n sequences = []\n for path_filename in args.paths:\n name = os.path.splitext(os.path.split(path_filename)[-1])[0]\n with open(path_filename, 'r') as fh:\n sequence_string = fh.read()\n if sequence_string == '':\n print('path file {} is empty'.format(path_filename))\n else:\n sequence = sequence_string.split(',')\n sequences.append(\n (name, problem.get_start_node(), sequence)\n )\n\n start_and_end = [\n ('start', 'o', [problem.get_start_node()]),\n ('end', 'o', [problem.get_end_node()])\n ]\n\n plot_field_and_costs(\n problem,\n sequences=sequences,\n nodes=start_and_end,\n show_coordinates=args.coords,\n show_grid=args.grid\n )\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Artificial_Intelligence_UE/ai_assignment_4/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":2477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"349056827","text":"import json\nimport sys\nfrom pyspark import SparkContext\nfrom pyspark import SparkConf\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\nfrom pyspark.sql.context import SQLContext\nimport pysolr\nfrom datetime import datetime\n\nclass BlankDict(dict):\n def __missing__(self, key):\n return ''\n\ndef send2solr(data):\n tweet=json.loads(data)\n try:\n created_at=str(datetime.strptime(str(tweet[\"created_at\"].decode('utf-8')), \"%a %b %d %H:%M:%S %z %Y\").strftime(\"%Y-%m-%dT%H:%M:%SZ\"))\n index = [{\n \"created_at\": created_at,\n \"id\": tweet[\"id_str\"],\n \"text\": tweet[\"text\"],\n \"user_name\": tweet[\"user\"][\"screen_name\"],\n \"longitude\":(tweet[\"place\"][\"bounding_box\"][\"coordinates\"][0][0][0]+tweet[\"place\"][\"bounding_box\"][\"coordinates\"][0][2][0])/2, \n \"latitude\":(tweet[\"place\"][\"bounding_box\"][\"coordinates\"][0][0][1]+tweet[\"place\"][\"bounding_box\"][\"coordinates\"][0][1][1])/2,\n \"city\":tweet[\"place\"][\"full_name\"],\n \"country_code\":tweet[\"place\"][\"country_code\"],\n \"country\":tweet[\"place\"][\"country\"]\n\n }]\n solr = pysolr.Solr('http://192.168.36.131:8886/solr/geoBasedTweets')\n solr.add(index, commit=True)\n solr.commit()\n return index\n except Exception as e:\n print(e)\n return tweet\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n print(\"Usage: GeoBasedProcessor.py \", file=sys.stderr)\n exit(-1)\n\n sc = SparkContext(appName=\"GeoBasedTweets\")\n ssc = StreamingContext(sc, 20)\n ssc.checkpoint(\"GeoBasedTweets-Checkpoint\")\n\n zkQuorum, topic = sys.argv[1:]\n twitterStream = KafkaUtils.createStream(ssc, zkQuorum, \"GeoBasedTweets\", {topic: 1}, {\"auto.offset.reset\": \"largest\"})\n docs = twitterStream.map(lambda x: send2solr(x[1])).count()\n docs.pprint()\n\n ssc.start()\n ssc.awaitTermination()","sub_path":"GeoBasedProcessor.py","file_name":"GeoBasedProcessor.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"368862073","text":"########################\n# Author: Te-Yuan Liu\n########################\n\nimport socket\nimport time \n\nhost = \"164.67.226.118\"\nport = 80\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((host, port))\nrequest = \"GET\"\n\ntry:\n while True:\n s.send(str.encode(request))\n reply = s.recv(1024)\n cmd = reply.decode(\"utf-8\")\n print('command %s' % cmd)\n \n if cmd == 's':\n raise KeyboardInterrupt('keyboard interrupt')\n time.sleep(1)\n\nexcept KeyboardInterrupt:\n print(\"keyboard interrupt\")\ns.close()\n\n\n","sub_path":"sample_client.py","file_name":"sample_client.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"351814314","text":"#Faça um Programa que \n#peça o raio de um círculo, \n#calcule e mostre sua área.\nimport math\n\ndef main():\n\traio = float(input(\"Entre com o raio: \"))\n\n\tarea = (raio ** 2) * math.pi\n\tprint(\"Area:\",area)\nmain()\n\n","sub_path":"python_exerc/Lista_De_Exercicios/Estrutura_Sequencial/estrutura_sequencial_6.py","file_name":"estrutura_sequencial_6.py","file_ext":"py","file_size_in_byte":212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"123199850","text":"# Copyright 2018-2021 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Contains transforms and helpers functions for decomposing arbitrary unitary\noperations into elementary gates.\n\"\"\"\nimport pennylane as qml\nfrom pennylane import math\n\n\ndef _convert_to_su2(U):\n r\"\"\"Check unitarity of a matrix and convert it to :math:`SU(2)` if possible.\n\n Args:\n U (array[complex]): A matrix, presumed to be :math:`2 \\times 2` and unitary.\n\n Returns:\n array[complex]: A :math:`2 \\times 2` matrix in :math:`SU(2)` that is\n equivalent to U up to a global phase.\n \"\"\"\n # Check unitarity\n if not math.allclose(math.dot(U, math.T(math.conj(U))), math.eye(2), atol=1e-7):\n raise ValueError(\"Operator must be unitary.\")\n\n # Compute the determinant\n det = U[0, 0] * U[1, 1] - U[0, 1] * U[1, 0]\n\n # Convert to SU(2) if it's not close to 1\n if not math.allclose(det, [1.0]):\n exp_angle = -1j * math.cast_like(math.angle(det), 1j) / 2\n U = math.cast_like(U, exp_angle) * math.exp(exp_angle)\n\n return U\n\n\ndef zyz_decomposition(U, wire):\n r\"\"\"Recover the decomposition of a single-qubit matrix :math:`U` in terms of\n elementary operations.\n\n Diagonal operations will be converted to a single :class:`.RZ` gate, while non-diagonal\n operations will be converted to a :class:`.Rot` gate that implements the original operation\n up to a global phase in the form :math:`RZ(\\omega) RY(\\theta) RZ(\\phi)`.\n\n Args:\n U (tensor): A 2 x 2 unitary matrix.\n wire (Union[Wires, Sequence[int] or int]): The wire on which to apply the operation.\n\n Returns:\n list[qml.Operation]: A ``Rot`` gate on the specified wire that implements ``U``\n up to a global phase, or an equivalent ``RZ`` gate if ``U`` is diagonal.\n\n **Example**\n\n Suppose we would like to apply the following unitary operation:\n\n .. code-block:: python3\n\n U = np.array([\n [-0.28829348-0.78829734j, 0.30364367+0.45085995j],\n [ 0.53396245-0.10177564j, 0.76279558-0.35024096j]\n ])\n\n For PennyLane devices that cannot natively implement ``QubitUnitary``, we\n can instead recover a ``Rot`` gate that implements the same operation, up\n to a global phase:\n\n >>> decomp = zyz_decomposition(U, 0)\n >>> decomp\n [Rot(-0.24209529417800013, 1.14938178234275, 1.7330581433950871, wires=[0])]\n \"\"\"\n U = _convert_to_su2(U)\n\n # Check if the matrix is diagonal; only need to check one corner.\n # If it is diagonal, we don't need a full Rot, just return an RZ.\n if math.allclose(U[0, 1], [0.0]):\n omega = 2 * math.angle(U[1, 1])\n return [qml.RZ(omega, wires=wire)]\n\n # If not diagonal, compute the angle of the RY\n cos2_theta_over_2 = math.abs(U[0, 0] * U[1, 1])\n theta = 2 * math.arccos(math.sqrt(cos2_theta_over_2))\n\n # If the top left element is 0, can only use the off-diagonal elements We\n # have to be very careful with the math here to ensure things that get\n # multiplied together are of the correct type in the different interfaces.\n if math.allclose(U[0, 0], [0.0]):\n phi = 1j * math.log(U[0, 1] / U[1, 0])\n omega = -phi - math.cast_like(2 * math.angle(U[1, 0]), phi)\n else:\n el_division = U[0, 0] / U[1, 0]\n tan_part = math.cast_like(math.tan(theta / 2), el_division)\n omega = 1j * math.log(tan_part * el_division)\n phi = -omega - math.cast_like(2 * math.angle(U[0, 0]), omega)\n\n return [qml.Rot(math.real(phi), math.real(theta), math.real(omega), wires=wire)]\n","sub_path":"pennylane/transforms/decompositions/single_qubit_unitary.py","file_name":"single_qubit_unitary.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"250557112","text":"import discord\r\nimport time\r\nimport openpyxl\r\nfrom captcha.image import ImageCaptcha\r\nimport captcha\r\nimport random\r\nimport asyncio\r\nimport os\r\n\r\nclient = discord.Client()\r\nrole_id = 835647291628322837\r\ncome_id = 835474138881327134\r\nexit_id = 835474138881327134\r\ngun_id = 835474138881327134\r\nteauk = 835714663102546030\r\ninj = 835647291628322837\r\nnotice_id = 831683438603010129\r\n\r\n@client.event\r\nasync def on_member_join(member):\r\n embed = discord.Embed(description=f'{member.mention}님 {message.guild.name}에 오신걸 환영합니다!', colour=0x2F3136)\r\n embed.set_footer(text=dev)\r\n await client.get_channel(come_id).send(embed=embed)\r\n await message.mamber.send(embed=embed)\r\n\r\n@client.event\r\nasync def on_member_remove(member):\r\n embed = discord.Embed(description=f'{member.mention}님 {message.guild.name}에 다시 오실때까지 기다릴게요!', colour=0x2F3136)\r\n embed.set_footer(text=dev)\r\n await client.get_channel(exit_id).send(embed=embed)\r\n await message.member.send(embed=embed)\r\n\r\n@client.event\r\nasync def on_ready():\r\n await client.change_presence(activity=discord.Streaming(name=f\"{len(client.users)} 분들과 FM 봉준 라디오\", url='https://www.twitch.tv/bongradio'))\r\n #await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name='FM 봉준 라디오'))\r\n #await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='FM 봉준 라디오'))\r\n \r\n print('봇에 연결되었습니다: {}'.format(client.user.name))\r\n print('봇 아이디: {}'.format(client.user.id))\r\n\r\n@client.event\r\nasync def on_message(message):\r\n if message.content.startswith(\"!유저인증\"):\r\n if message.author.guild_permissions.ban_members:\r\n target = message.mentions[0]\r\n embed=discord.Embed(title='인증되었습니다', description=f'{target.mention} 님이 {message.guild.name} 서버 에서 인증되었습니다')\r\n await message.channel.send(embed=embed)\r\n await message.mentions[0].send(embed=embed)\r\n role = discord.utils.get(message.guild.roles, id=role_id)\r\n await message.mentions[0].add_roles(role)\r\n\r\n if message.content.startswith(\"!권한지급\"):\r\n if message.author.guild_permissions.ban_members:\r\n target = message.mentions[0]\r\n embed=discord.Embed(title='권한추가', description=f'{target.mention} 님에게 권한이 추가되었습니다')\r\n embed1=discord.Embed(title='확인하��시오', description=f'{target.mention} 님에게 주신 권한이 제대로 된 권한인지 한번더 확인해주십시오')\r\n await message.author.send(embed=embed1)\r\n await message.channel.send(embed=embed)\r\n await message.mentions[0].send(embed=embed)\r\n role = message.role_mentions[0]\r\n await message.mentions[0].add_roles(role)\r\n\r\n if message.content.startswith(\"!도움말\"):\r\n await message.channel.send(f'{message.author.mention} 님 을 확인해주세요')\r\n embed=discord.Embed(title='봇 도움말', description='봇을 많이 사용 해주세요')\r\n embed.add_field(name='사연 보내기', value='<#836241957435998239> 방에서 !사연 <할말> 로 사연을 보내보세요!')\r\n embed.add_field(name='건의사항 전송하기', value='봇의 DM 채널에서 !건의 <건의사항> 으로 건의사항을 보내주세요!')\r\n embed.set_footer(text='<@837876715983994901><<각종 문의')\r\n await message.author.send(embed=embed)\r\n\r\n if message.content.startswith('!전송'):\r\n await message.delete()\r\n if message.author.guild_permissions.manage_messages:\r\n msg = message.content[26:]\r\n await message.mentions[0].send(embed=discord.Embed(title=f\"**{message.author.name}** 님이 전송하신 메시지: {msg}\", colour=discord.Colour.blurple()))\r\n await message.channel.send(embed=discord.Embed(title=f'`{message.mentions[0]}`에게 DM을 보냈습니다', colour=discord.Colour.blue()))\r\n \r\n else:\r\n await message.channel.send(f'{member.mention}')\r\n message = await message.channel.send(embed=discord.Embed(title='⚠️ `명령어 사용권한이 없습니다` ⚠️', colour=discord.Colour.red()))\r\n return\r\n\r\n if message.content.startswith(\"!건의\"):\r\n if message.channel.type is discord.ChannelType.private:\r\n await message.channel.send(\"정상적으로 건의가 전송되었습니다\")\r\n ss = message.content[4:]\r\n embed1=discord.Embed(title='건의가 왔습니다', description='관리자님 내용을 검토해주세요')\r\n embed1.add_field(name='전송자', value=f'{message.author.mention}')\r\n embed1.add_field(name='전송된 건의사항 : ', value=ss, inline=False)\r\n embed1.add_field(name=f'전송자 아이디 : {message.author.id}', value='내용이 부적절할시 이에따른 처벌을 내려주세요')\r\n embed1.set_footer(text='!전송 <@아이디> 로 답변 부탁드릴게요!')\r\n await client.get_channel(837914082463055892).send(embed=embed1)\r\n else:\r\n await message.channel.send(\"건의사항은 DM으로 부탁드릴게요\")\r\n\r\n if message.content.startswith('!폭파') or message.content.startswith('!채널삭제'):\r\n try:\r\n if message.author.guild_permissions.ban_members:\r\n await message.channel.delete()\r\n else:\r\n await message.channel.send(f'{message.author.mention}')\r\n await message.delete()\r\n message = await message.channel.send(embed=discord.Embed(title='⚠️ `명령어 사용권한이 없습니다` ⚠️', colour=discord.Colour.red()))\r\n except:\r\n pass\r\n\r\n if message.content == (\"!인증\"):\r\n await message.channel.send(\"디엠을 확인해주세요\")\r\n Image_captcha = ImageCaptcha()\r\n msg = \"\"\r\n a = \"\"\r\n for i in range(6):\r\n a += str(random.randint(0, 9))\r\n\r\n name = str(message.author.id) + \".png\"\r\n Image_captcha.write(a, name)\r\n\r\n embed=discord.Embed(title='위 사진에 보이는 숫자를 작성하셔야 인증이 완료됩니다.', colour=0x2F3136)\r\n embed.set_footer(text='제한시간 10초')\r\n await message.author.send(embed=embed, file=discord.File(name))\r\n def check(msg):\r\n return msg.author == message.author and message.channel == message.channel\r\n\r\n try:\r\n msg = await client.wait_for(\"message\", timeout=10, check=check)\r\n except:\r\n await message.author.send(embed=discord.Embed(title='시간이 초과되었습니다', colour=0x2F3136))\r\n return\r\n\r\n if msg.content == a:\r\n await message.author.send(embed=discord.Embed(title='인증되었습니다', ccolour=0x2F3136))\r\n role = discord.utils.get(message.guild.roles, id=inj)\r\n await message.author.add_roles(role)\r\n else:\r\n await message.author.send(embed=discord.Embed(title='재시도 해주세요', colour=0x2F3136))\r\n\r\n if message.content.startswith('!밴'):\r\n if message.author.guild_permissions.ban_members:\r\n try:\r\n target = message.mentions[0]\r\n except:\r\n await message.channel.send('유저가 지정되지 않았습니다')\r\n return\r\n \r\n j = message.content.split(\" \")\r\n try:\r\n reason = j[2]\r\n except IndexError:\r\n reason = 'None'\r\n\r\n \r\n\r\n embed = discord.Embed(title='차단', description=f'🚫**{message.guild.name}**에서 차단되었습니다.\\n사유: {reason}🚫', colour=0x2F3136)\r\n try:\r\n await target.send(embed=embed)\r\n except:\r\n pass\r\n await target.ban(reason=reason)\r\n\r\n embed = discord.Embed(title='✅ 차단 성공', description=f'🚫**{target}**이 차단되었습니다.\\n사유: {reason}🚫', colour=0x2F3136)\r\n await client.get_channel(835474138881327134).send(embed=embed)\r\n else:\r\n embed = discord.Embed(description=f'{message.author.mention}님 ⚠️ 명령어 사용권한이 없습니다 관리자만 사용이 가능합니다 ⚠️', colour=0x2F3136)\r\n await message.channel.send(embed=embed)\r\n\r\n if message.content.startswith('!킥'):\r\n if message.author.guild_permissions.ban_members:\r\n await message.delete()\r\n try:\r\n target = message.mentions[0]\r\n except:\r\n await message.channel.send('유저가 지정되지 않았습니다')\r\n return\r\n \r\n j = message.content.split(\" \")\r\n try:\r\n reason = j[2]\r\n except IndexError:\r\n reason = 'None'\r\n\r\n \r\n\r\n embed = discord.Embed(title='추방', description=f'🚫**{message.guild.name}**에서 추방되었습니다.\\n사유: {reason}🚫', colour=0x2F3136)\r\n try:\r\n await target.send(embed=embed)\r\n except:\r\n pass\r\n await target.kick(reason=reason)\r\n\r\n embed = discord.Embed(title='✅ 추방 성공', description=f'🚫**{target}**이 추방되었습니다.\\n사유: {reason}🚫', colour=0x2F3136)\r\n await client.get_channel(835474138881327134).send(embed=embed)\r\n else:\r\n embed = discord.Embed(description=f'{message.author.mention}님 ⚠️ 명령어 사용권한이 없습니다 관리자만 사용이 가능합니다 ⚠️', colour=0x2F3136)\r\n await message.channel.send(embed=embed)\r\n\r\n if message.content.startswith('!청소'):\r\n try:\r\n \r\n if message.author.guild_permissions.manage_messages:\r\n amount = message.content[4:]\r\n await message.delete()\r\n await message.channel.purge(limit=int(amount))\r\n embed = discord.Embed(description='🧹 메시지 ' + str(amount) + '개가 삭제되었습니다! 쓰레기 청소 전문은 고냥이~!', colour=0x2F3136)\r\n embed.set_footer(text=dev)\r\n await message.channel.send(embed=embed)\r\n await asyncio.sleep(2)\r\n await message.delete()\r\n else:\r\n embed = discord.Embed(description=f'{message.author.mention}님 ⚠️ 명령어 사용권한이 없습니다 관리자만 사용이 가능합니다 ⚠️', colour=0x2F3136)\r\n await message.channel.send(embed=embed)\r\n except:\r\n pass\r\n\r\n if message.channel.id == 836241957435998239:\r\n if message.author.bot:\r\n return\r\n await message.delete()\r\n embed2=discord.Embed(title='다음 아래중 골라주세요', description='선택해주세요')\r\n embed2.add_field(name='비공개 익명 전송', value='1️⃣ 를 선택해주세요')\r\n embed2.add_field(name='공개 전송', value='2️⃣ 를 선택해주세요')\r\n embed2.add_field(name='사연 취소 ', value='3️⃣ 를 선택해주세요')\r\n msg = await message.channel.send(embed=embed2)\r\n ss = message.content[4:]\r\n\r\n embed = discord.Embed(colour=discord.Colour.blue(), timestamp=message.created_at)\r\n embed.add_field(name='전송자', value='익명으로 전송된 사연 입니다', inline=False)\r\n embed.add_field(name='사연 메시지', value=ss, inline=False)\r\n embed.set_footer(text='봇에게 사연을 보내 저희 서버를 높이 올라가도록 해주세요!')\r\n\r\n embed1=discord.Embed(title='사연이 전송됨', description='관리자님 내용을 검토해주세요')\r\n embed1.add_field(name='전송자', value=f'{message.author.mention}')\r\n embed1.add_field(name='전송된 사연 메시지', value=ss, inline=False)\r\n embed1.set_footer(text=f'전송자 아이디 : {message.author.id} / 내용이 부적절할시 이에따른 처벌을 내려주세요')\r\n\r\n embed3 = discord.Embed(colour=discord.Colour.blue(), timestamp=message.created_at)\r\n embed3.add_field(name='전송자', value=f'{message.author.mention} 님께서 전송하신 사연입니다', inline=False)\r\n embed3.add_field(name='사연 메시지', value=ss, inline=False)\r\n embed3.set_footer(text='봇에게 사연을 보내 저희 서버를 높이 올라가도록 해주세요!')\r\n await msg.add_reaction(\"1️⃣\")\r\n await msg.add_reaction(\"2️⃣\")\r\n await msg.add_reaction(\"3️⃣\")\r\n\r\n while True:\r\n def check(reaction, user):\r\n return str(reaction.emoji) in ['1️⃣' , '2️⃣' , '3️⃣'] and user == message.author\r\n\r\n try:\r\n reaction, user = await client.wait_for('reaction_add', timeout=60.0, check=check)\r\n\r\n except asyncio.TimeoutError:\r\n await client.get_channel(837879770644480030).send(f\"{message.guild.name} 에서 시간이 초과됨\")\r\n\r\n if (str(reaction.emoji) == '1️⃣'):\r\n await msg.delete()\r\n await client.get_channel(836242030954414150).send(embed=embed)\r\n await client.get_channel(836242313733996596).send(embed=embed1)\r\n await message.author.send(\"공개로 메시지가 전송되었습니다\")\r\n break\r\n \r\n elif (str(reaction.emoji) == '2️⃣'):\r\n await msg.delete()\r\n await client.get_channel(836242030954414150).send(embed=embed3)\r\n await message.author.send(\"익명으로 메시지가 전송되었습니다\")\r\n break\r\n\r\n elif (str(reaction.emoji) == '3️⃣'):\r\n await message.author.send(\"성공적으로 취소되었습니다\")\r\n await msg.delete()\r\n\r\n if message.content.startswith('!공지'):\r\n try:\r\n if message.author.guild_permissions.manage_messages:\r\n msg = message.content[4:]\r\n await message.delete()\r\n message = await message.channel.send(embed=discord.Embed(title='✔️ `공지가 제대로 등록되었습니다` ✔️', colour=discord.Colour.blue())) \r\n embed = discord.Embed(colour=discord.Colour.blue(), timestamp=message.created_at)\r\n embed.add_field(name=\"공지사항 안내 \", value=msg , inline=False)\r\n embed.set_footer(text=message.author.name)\r\n await client.get_channel(notice_id).send('@everyone', embed=embed)\r\n else:\r\n await message.channel.send(f'{message.author.mention}')\r\n await message.delete()\r\n message = await message.channel.send(embed=discord.Embed(title='⚠️ `명령어 사용권한이 없습니다` ⚠️', colour=discord.Colour.red())) \r\n except:\r\n pass\r\n\r\naccess_token = os.environ[\"BOT_TOKEN\"]\r\nclient.run(access_token)\r\n","sub_path":"Radio.py","file_name":"Radio.py","file_ext":"py","file_size_in_byte":15219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"451296164","text":"from influxdb import InfluxDBClient\r\nimport constants as CS\r\nimport time\r\nimport random\r\n\r\nif __name__ == \"__main__\":\r\n client = InfluxDBClient(host = 'localhost', port = 8086, database = \"tsdb\")\r\n fields = {x: random.randint(0, 100) for x in CS.VARS_LIST}\r\n while True:\r\n client.write_points([\r\n {\r\n \"measurement\": \"measurement_tsdb\",\r\n \"fields\": fields\r\n }\r\n ])\r\n for element in fields:\r\n fields[element] += -1 + random.randint(0, 1)*2\r\n time.sleep(1)\r\n","sub_path":"InfluxeDB/imitator.py","file_name":"imitator.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"56177924","text":"# Thanks to visanciprian for this file!\n# http://forum.xda-developers.com/showpost.php?p=49218687&postcount=1733\n\nfrom multiboot.fileinfo import FileInfo\nimport multiboot.autopatcher as autopatcher\nimport re\n\nfile_info = FileInfo()\n\nfilename_regex = r\"^mahdi-.*jflte.*\\.zip$\"\nfile_info.name = 'Mahdi'\nfile_info.ramdisk = 'jflte/AOSP/AOSP.def'\nfile_info.patch = autopatcher.auto_patch\nfile_info.extract = autopatcher.files_to_auto_patch\n\ndef matches(filename):\n if re.search(filename_regex, filename):\n return True\n else:\n return False\n\ndef get_file_info():\n return file_info\n","sub_path":"patchinfo/jflte/ROMs/AOSP/mahdi.py","file_name":"mahdi.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"421355564","text":"def A_n_k(a, n, k, depth, used, curr, ans):\n\t\"\"\"\n\tdepth: start from 0, or the depth of the search\n\tused: track what items are in partial solution from the set n\n\tcurr: the current partial solution\n\tans: collect all the valid solutions\n\t\"\"\"\n\tprint('depth:', depth)\n\tif depth == k:\n\t\tprint('Append curr to ans')\n\t\tprint('Append curr: ', curr)\n\t\tans.append(curr[::])\n\t\treturn \n\n\tfor i in range(n):\n\t\tprint('i:',i)\n\t\tif not used[i]:\n\t\t\tcurr.append(a[i])\n\t\t\tused[i] = True \n\t\t\tprint('curr: ', curr)\n\t\t\tA_n_k(a, n, k, depth+1, used, curr, ans)\n\n\t\t\tcurr.pop()\n\t\t\tprint('backtrack: ', curr)\n\t\t\tused[i] = False\n\n\treturn \n\n\ndef should_swap(ls, start, end):\n\tfor i in range(start, end):\n\t\tif ls[i] == ls[end]:\n\t\t\treturn False\n\n\treturn True\n\n\ndef distinct_permutation(ls, start, end):\n\t\"\"\"\n\tprints all the permutations of a list \n\tInput: a list, start and end indices\n\tOutput: print all permutations\n\t\"\"\"\n\t#print('Interim list: ', ls)\n\tif (start >= end):\n\t\tprint(ls)\n\t\treturn \n\tfor i in range(start, end):\n\t\tcheck = should_swap(ls, start, i)\n\t\tif check == True:\n\t\t\t\n\t\t#print('i: {} start {} end {}'.format(i, start, end))\n\t\t#print('swap {} and {}'.format(ls[start], ls[i]))\n\t\t\n\t\t\tls[start], ls[i] = ls[i], ls[start] #swapping\n\t\t\n\t\t\tdistinct_permutation(ls, start+1, end)\n\t\t\n\t\t#print('backtrack {} and {}'.format(ls[start], ls[i]))\n\t\t\tls[start], ls[i] = ls[i], ls[start] #backtracking\n\n\ndistinct_permutation(list('aabb'), 0, 4)\n#a = [1,2,3]\n#n = len(a)\n#ans = [[None]]\n#used = [False] * n\n#ans = []\n#A_n_k(a, n, n, 0, used, [], ans)\n#print(ans)\n","sub_path":"Lecture3/distinct_permutation.py","file_name":"distinct_permutation.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"476342287","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Standard library imports.\nimport json\nimport os\n\n# Constant definitions.\n_MODULE_PATH = os.path.dirname(os.path.realpath(__file__))\nPACKAGE_NAME = \"app_icon_creator\"\nVERSION_FILE = os.path.join(_MODULE_PATH, \"version.json\")\n\nwith open(VERSION_FILE, \"r\") as file:\n AUTHOR = \"Behron Georgantas\"\n VERSION = json.load(file)\n\nDEFAULT_ENCODING = \"utf-8\"\n\n# Module dunder definitions.\n__author__ = AUTHOR\n__version__ = (\n f\"{VERSION['letter']}.\"\n f\"{VERSION['major']}.\"\n f\"{VERSION['minor']}.\"\n f\"{VERSION['patch']}.\"\n)\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"498801069","text":"import numpy as np\n\n\ndef codelength(links, tree, entropy_function=np.log, state_network=True, directed=False):\n if directed:\n source, target, weight = zip(*links)\n else:\n source, target, weight = [], [], []\n for s, t, w in links:\n source.append(s)\n source.append(t)\n target.append(t)\n target.append(s)\n weight.extend((w, w))\n\n if state_network:\n modules, _, state_ids, physical_ids = zip(*tree)\n else:\n modules, _, state_ids, physical_ids = zip(*((path, flow, phys_id, phys_id)\n for path, flow, _, phys_id in tree))\n\n level = [len(path) - 2 for path in modules] # top level is level 0\n level = np.array(level)\n\n physical_ids = np.array(physical_ids)\n\n module_per_level = {\":\".join(str(l) for l in path[:(j + 1)])\n for path in modules\n for j in range(len(path) - 1)}\n\n module_index = {module: i for i, module in enumerate(module_per_level)}\n\n # convert state node labels to integers from range(0, num_states)\n node_id = {int(n): i for i, n in enumerate(state_ids)}\n\n source_id, target_id = list(zip(*((node_id[n1], node_id[n2])\n for n1, n2 in zip(source, target))))\n\n # convert module label to integer (for each level)\n num_states = len(state_ids)\n num_levels = np.max(level) + 1\n module_per_level = -1 * np.ones(num_states * num_levels).reshape(num_states, num_levels).astype(int)\n for i, path in enumerate(modules):\n for j in range(len(path) - 1):\n mod_per_level = \":\".join(str(l) for l in path[:(j + 1)])\n module_per_level[i][j] = module_index[mod_per_level]\n\n module_id_per_level = -1 * np.ones(num_states * num_levels).reshape(num_states, num_levels).astype(int)\n for j in range(num_levels):\n module_per_level_j = module_per_level[:, j]\n modules = np.unique(module_per_level_j[module_per_level_j > -1])\n D = {module: i for i, module in enumerate(modules)}\n for i in range(num_states):\n if module_per_level_j[i] > -1:\n module_id_per_level[i][j] = D[module_per_level_j[i]]\n\n k_in = np.zeros(num_states) # node degree\n for alpha, w in zip(target_id, weight):\n k_in[alpha] += w\n norm_M = 1.0 / np.float(np.sum(k_in))\n\n enter, exit_ = {}, {}\n for l in range(num_levels):\n module_id = module_id_per_level[:, l]\n modules = np.unique(module_id[module_id > -1]) # list of unique module ids\n num_modules = len(modules)\n enter[l], exit_[l] = np.zeros(num_modules), np.zeros(num_modules) # number of links to/from module\n for alpha, beta, w in zip(source_id, target_id, weight):\n i = module_id[alpha]\n j = module_id[beta]\n if i != j:\n if i > -1:\n exit_[l][i] += w\n if j > -1:\n enter[l][j] += w\n\n L_M = 0.0 # average codelength\n\n # ---------------------------- the finest level ----------------------------\n out_modules = []\n for l in range(num_levels):\n module_id = module_id_per_level[:, l]\n modules = np.unique(module_id[level == l])\n num_modules = len(modules)\n n_i_exit = exit_[l]\n\n # H_Pi: the average length of codewords in module codebook i\n H_Pi, p_i = np.zeros(num_modules), np.zeros(num_modules)\n for i, mod_id in enumerate(modules):\n k_in_m = k_in[module_id == mod_id]\n physical_id = physical_ids[module_id == mod_id]\n n_i = [n_i_exit[mod_id]]\n for unique_phys_id in np.unique(physical_id):\n n_i.append(np.sum(k_in_m[physical_id == unique_phys_id]))\n n_i = np.array(n_i)\n p_i[i] = np.sum(n_i) * norm_M\n n_i = n_i[n_i > 0]\n if len(n_i) > 0:\n H_Pi[i] = (np.log(np.sum(n_i)) - (1.0 / np.sum(n_i)) * np.sum(n_i * entropy_function(n_i))) / np.log(2)\n\n L_m = np.sum(p_i * H_Pi) # module codebook\n L_M += L_m\n out_modules.append(L_m)\n\n # ---------------------------- intermediate level ----------------------------\n out_index = []\n for l in range(1, num_levels):\n module_mn_id = module_id_per_level[:, l]\n module_mn_id = module_mn_id[level >= l]\n module_m_id = module_id_per_level[:, l - 1]\n module_m_id = module_m_id[level >= l]\n modules = np.unique(module_m_id)\n num_modules = len(modules)\n n_m_exit = exit_[l - 1]\n n_mn_enter = enter[l]\n\n # H_Qm: the average length of codewords in submodule m\n H_Qm = np.zeros(num_modules)\n q_m_total = np.zeros(num_modules)\n for i in range(num_modules):\n n_i = np.insert(n_mn_enter[np.unique(module_mn_id[module_m_id == modules[i]])], 0, n_m_exit[modules[i]])\n n_i = n_i[n_i > 0]\n if len(n_i) > 0:\n H_Qm[i] = (np.log(np.sum(n_i)) - (1.0 / np.sum(n_i)) * np.sum(\n n_i * entropy_function(n_i))) / np.log(2)\n q_m_total[i] = np.sum(n_i) * norm_M\n\n L_i = np.sum(q_m_total * H_Qm) # index codebook\n L_M += L_i\n out_index.append(L_i)\n\n # ---------------------------- the coarsest level ----------------------------\n n_m_enter = enter[0]\n module_per_level_j = n_m_enter[n_m_enter > 0]\n\n if len(module_per_level_j) > 1:\n H_Q = (np.log(np.sum(module_per_level_j)) - (1.0 / np.sum(module_per_level_j)) * np.sum(\n module_per_level_j * entropy_function(module_per_level_j))) / np.log(2)\n else:\n H_Q = 0.0\n\n q_m_enter = n_m_enter * norm_M\n L_i = np.sum(q_m_enter) * H_Q\n L_M += L_i\n\n return L_M\n\n\nif __name__ == \"__main__\":\n from grassberger import entropy_function, coefficients\n from read_file import read_links, read_tree\n\n train_links = read_links(\"test/training_seed0_order2_1.net\", weight_type=int)\n validation_links = read_links(\"test/validation_seed0_order2_1.net\", weight_type=int)\n tree = read_tree(\"test/training_seed0_order2_1_states.tree\")\n\n Lm = codelength(train_links, tree)\n print(f\"Naive: training {Lm} bits\")\n\n Lm = codelength(validation_links, tree)\n print(f\"Naive: validation: {Lm} bits\")\n\n Gn = coefficients(1000000)\n\n Lm = codelength(train_links, tree, entropy_function(Gn))\n print(f\"Grassberger: training {Lm} bits\")\n\n Lm = codelength(validation_links, tree, entropy_function(Gn))\n print(f\"Grassberger: validation {Lm} bits\")\n","sub_path":"mapequation/codelength.py","file_name":"codelength.py","file_ext":"py","file_size_in_byte":6588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"340492242","text":"s = input()\nn = len(s)\n\nans = 'Yes'\n\nfor i in range(n):\n if i%2 == 0 and s[i] == 'L': ans = 'No'\n if i%2 != 0 and s[i] == 'R': ans = 'No'\n\nprint(ans)","sub_path":"Python_codes/p02910/s114264188.py","file_name":"s114264188.py","file_ext":"py","file_size_in_byte":155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"103432532","text":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\n\nplt.clf()\n\n# Pie chart, slices will be ordered, \n# plotted counter-clockwise:\n\nlabels = 'Frogs', 'Hogs', 'Dogs', 'Cats'\nsizes = [15, 30, 45, 10]\n\n# only \"explode\" the 2nd slice(i.e. 'Hogs')\nexplode = (0, 0.1, 0, 0)\n\nplt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%')\n\n\n","sub_path":"piechart_example.py","file_name":"piechart_example.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"397358490","text":"#!/usr/bin/env python\n# _*_ coding: utf-8_*_\n#\n# Copyright 2016 planc2c.com\n# thomas@time2box.com\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport pymongo\nimport sys\nimport os\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), \"../\"))\nfrom comm import singleton\nfrom global_const import MONGO_HOST, MONGO_PORT, MONGO_USR, MONGO_PWD, MONGO_DB\n\n\n# vendor mp.weixin.qq.com\n# {_id(vendor_id), content}\nclass vendor_hha_dao(singleton):\n __vendor_hha_collection = None;\n\n\n def __init__(self):\n if self.__vendor_hha_collection is None:\n conn = pymongo.MongoClient(MONGO_HOST, MONGO_PORT);\n db = conn[MONGO_DB];\n db.authenticate(MONGO_USR, MONGO_PWD);\n self.__vendor_hha_collection = db.vendor_hha;\n else:\n logging.info(\"vendor_hha_collection has inited......\");\n\n\n def create(self,json):\n self.__vendor_hha_collection.insert(json);\n logging.info(\"create __vendor_hha_collection success......\");\n\n\n # _id = vendor_id\n def query_not_safe(self, _id):\n cursor = self.__vendor_hha_collection.find({\"_id\":_id})\n data = None\n for i in cursor:\n data = i\n return data\n\n\n # _id = vendor_id\n def query(self, _id):\n data = self.query_not_safe(_id)\n if not data:\n data = {\"_id\":_id, \"content\":\"\"}\n else:\n try:\n data['content']\n except:\n data['content'] = ''\n return data\n\n\n def update(self, json):\n _id = json[\"_id\"];\n self.__vendor_hha_collection.update({\"_id\":_id},{\"$set\":json});\n logging.info(\"update __vendor_hha_collection success......\")\n\n\n def delete(self, _id):\n self.__vendor_hha_collection.remove({\"_id\":_id});\n logging.info(\"delete __vendor_hha_collection success......\");\n","sub_path":"foo/dao/vendor_hha_dao.py","file_name":"vendor_hha_dao.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"175542116","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport imghdr\nimport os\n\nfrom rasa_core import evaluate\nfrom rasa_core.evaluate import (\n run_story_evaluation,\n collect_story_predictions)\nfrom tests.conftest import DEFAULT_STORIES_FILE\n\n\ndef test_evaluation_image_creation(tmpdir, default_agent):\n stories_path = tmpdir.join(\"failed_stories.md\").strpath\n img_path = tmpdir.join(\"evaluation.png\").strpath\n\n run_story_evaluation(\n resource_name=DEFAULT_STORIES_FILE,\n agent=default_agent,\n out_file_plot=img_path,\n max_stories=None,\n out_file_stories=stories_path\n )\n\n assert os.path.isfile(img_path)\n assert imghdr.what(img_path) == \"png\"\n\n assert os.path.isfile(stories_path)\n\n\ndef test_evaluation_script(tmpdir, default_agent):\n completed_trackers = evaluate._generate_trackers(\n DEFAULT_STORIES_FILE, default_agent)\n\n golds, predictions, failed_stories = collect_story_predictions(\n completed_trackers, default_agent)\n\n assert len(golds) == 14\n assert len(predictions) == 14\n assert len(failed_stories) == 0\n","sub_path":"tests/test_evaluation.py","file_name":"test_evaluation.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"206970380","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python3.5/dist-packages/megdata/bti_userblocks.py\n# Compiled at: 2018-10-24 06:01:48\n# Size of source mod 2**32: 29047 bytes\nimport os, time\nfrom .common import *\nfrom numpy import zeros, int16, float\nBTI_USERBLOCKS = {}\n\nclass BTIMagInfoBlockHeader(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.name = megdata_read_str(fd, 16)\n ret.transform = bti_read_xfm(fd)\n ret.units_per_bit = megdata_read_float(fd)\n os.lseek(fd, 20, os.SEEK_CUR)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIMagInfoBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.version = megdata_read_int32(fd)\n os.lseek(fd, 20, os.SEEK_CUR)\n ret.headers = []\n for j in range(6):\n ret.headers.append(BTIMagInfoBlockHeader.from_fd(fd))\n\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return s\n\n\nBTI_USERBLOCKS['B_Mag_Info'] = BTIMagInfoBlock\n\nclass BTICOHBlockPoint(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.pos = megdata_read_vec3(fd)\n ret.direction = megdata_read_vec3(fd)\n ret.error = megdata_read_double(fd)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTICOHBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.num_points = megdata_read_int32(fd)\n ret.status = megdata_read_int32(fd)\n ret.points = []\n for p in range(16):\n ret.points.append(BTICOHBlockPoint.from_fd(fd))\n\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['B_COH_Points'] = BTICOHBlock\n\nclass BTICCPXFMBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.method = megdata_read_int32(fd)\n os.lseek(fd, 4, os.SEEK_CUR)\n ret.transform = bti_read_xfm(fd)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['b_ccp_xfm_block'] = BTICCPXFMBlock\n\nclass BTIElectrodeEntry(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.label = megdata_read_str(fd, 16)\n ret.location = megdata_read_vec3(fd)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIElectrodeBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.electrodes = []\n while True:\n e = BTIElectrodeEntry.from_fd(fd)\n if e.label == '':\n break\n ret.electrodes.append(e)\n\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['b_eeg_elec_locs'] = BTIElectrodeBlock\n\nclass BTIChanConfig(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.subsys_type = megdata_read_int16(fd)\n ret.subsys_num = megdata_read_int16(fd)\n ret.card_num = megdata_read_int16(fd)\n ret.chan_num = megdata_read_int16(fd)\n ret.recdspnum = megdata_read_int16(fd)\n os.lseek(fd, 8, os.SEEK_CUR)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIHardwareStructVersionBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.version = megdata_read_int16(fd)\n ret.struct_size = megdata_read_int16(fd)\n ret.entries = megdata_read_int16(fd)\n os.lseek(fd, 8, os.SEEK_CUR)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['B_WHChanMapVer'] = BTIHardwareStructVersionBlock\nBTI_USERBLOCKS['B_WHSubsysVer'] = BTIHardwareStructVersionBlock\n\nclass BTIChannelMapBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n num_channels = None\n for block in blocks:\n if block.hdr.blocktype == 'B_WHChanMapVer':\n num_channels = block.data.entries\n break\n\n if num_channels is None:\n raise ValueError('Cannot find B_WHChanMapVer to determine number of channels')\n ret = cls()\n ret.channels = []\n for c in range(num_channels):\n ch = BTIChanConfig.from_fd(fd)\n ret.channels.append(ch)\n\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['B_WHChanMap'] = BTIChannelMapBlock\n\nclass BTIHardwareSubsysConfig(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.subsys_type = megdata_read_int16(fd)\n ret.subsys_num = megdata_read_int16(fd)\n ret.cards_per_sys = megdata_read_int16(fd)\n ret.channels_per_card = megdata_read_int16(fd)\n ret.card_version = megdata_read_int16(fd)\n os.lseek(fd, 2, os.SEEK_CUR)\n ret.offsetdacgain = megdata_read_float(fd)\n ret.squid_type = megdata_read_int32(fd)\n ret.timesliceoffset = megdata_read_int16(fd)\n ret.padding = megdata_read_int16(fd)\n ret.volts_per_bit = megdata_read_float(fd)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTISubsysMapBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n num_subsys = None\n for block in blocks:\n if block.hdr.blocktype == 'B_WHSubsysVer':\n num_subsys = block.data.entries\n break\n\n if num_subsys is None:\n raise ValueError('Cannot find B_WHSubsysVer to determine number of subsystems')\n ret = cls()\n ret.subsys = []\n for s in range(num_subsys):\n su = BTIHardwareSubsysConfig.from_fd(fd)\n ret.subsys.append(su)\n\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['B_WHSubsys'] = BTISubsysMapBlock\n\nclass BTIChannelLabelBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.version = megdata_read_int32(fd)\n ret.entries = megdata_read_int32(fd)\n os.lseek(fd, 16, os.SEEK_CUR)\n ret.labels = []\n for l in range(ret.entries):\n lb = megdata_read_str(fd, 16)\n ret.labels.append(lb)\n\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['B_ch_labels'] = BTIChannelLabelBlock\n\nclass BTICalibrationBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.sensor_no = megdata_read_int16(fd)\n os.lseek(fd, 2, os.SEEK_CUR)\n ret.timestamp = megdata_read_int32(fd)\n ret.logdir = megdata_read_str(fd, 256)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['B_Calibration'] = BTICalibrationBlock\n\nclass BTISysConfigTimeBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.sysconfig_name = megdata_read_str(fd, 512)\n ret.timestamp = megdata_read_int32(fd)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIDeltaEnabledBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.delta_enabled = megdata_read_int16(fd)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['B_DELTA_ENABLED'] = BTIDeltaEnabledBlock\n\nclass BTIETableHeader(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.version = megdata_read_int32(fd)\n ret.entry_size = megdata_read_int32(fd)\n ret.num_entries = megdata_read_int32(fd)\n ret.filtername = megdata_read_str(fd, 16)\n ret.num_E_values = megdata_read_int32(fd)\n ret.reserved = megdata_read_str(fd, 28)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIETableBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.hdr = BTIETableHeader.from_fd(fd)\n if ret.hdr.version == 2:\n ret.chan_names = []\n for e in range(ret.hdr.num_entries):\n val = megdata_read_str(fd, 16)\n ret.chan_names.append(val)\n\n ret.e_chan_names = []\n for e in range(ret.hdr.num_E_values):\n val = megdata_read_str(fd, 16)\n ret.e_chan_names.append(val)\n\n ret.etable = megdata_read_float_matrix(fd, ret.hdr.num_entries, ret.hdr.num_E_values)\n else:\n ret.chan_names = [\n 'WH2500'] * ret.hdr.num_entries\n ret.hdr.num_E_values = 6\n ret.e_chan_names = ['MxA', 'MyA', 'MzA', 'MxaA', 'MyaA', 'MzaA']\n ret.etable = megdata_read_float_matrix(fd, ret.hdr.num_entries, ret.hdr.num_E_values)\n curpos = os.lseek(fd, 0, os.SEEK_CUR)\n if curpos % 8 != 0:\n os.lseek(fd, 8 - curpos % 8, os.SEEK_CUR)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['B_E_table_used'] = BTIETableBlock\nBTI_USERBLOCKS['B_E_TABLE'] = BTIETableBlock\n\nclass BTIWeightTableHeader(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.version = megdata_read_int32(fd)\n ret.entry_size = megdata_read_int32(fd)\n ret.num_entries = megdata_read_int32(fd)\n ret.name = megdata_read_str(fd, 32)\n ret.description = megdata_read_str(fd, 80)\n ret.num_analog = megdata_read_int32(fd)\n ret.num_dsp = megdata_read_int32(fd)\n ret.reserved = megdata_read_str(fd, 72)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIWeightTableBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.hdr = BTIWeightTableHeader.from_fd(fd)\n if ret.hdr.version == 2:\n ret.chan_names = []\n for e in range(ret.hdr.num_entries):\n val = megdata_read_str(fd, 16)\n ret.chan_names.append(val)\n\n ret.analog_chan_names = []\n for e in range(ret.hdr.num_analog):\n val = megdata_read_str(fd, 16)\n ret.analog_chan_names.append(val)\n\n ret.dsp_chan_names = []\n for e in range(ret.hdr.num_dsp):\n val = megdata_read_str(fd, 16)\n ret.dsp_chan_names.append(val)\n\n ret.dsp_wts = megdata_read_float_matrix(fd, ret.hdr.num_entries, ret.hdr.num_dsp)\n ret.analog_wts = megdata_read_int16_matrix(fd, ret.hdr.num_entries, ret.hdr.num_analog)\n else:\n ret.chan_names = [\n 'WH2500'] * ret.hdr.num_entries\n ret.analog_chan_names = ['MxA', 'MyA', 'MzA']\n ret.hdr.num_analog = len(ret.analog_chan_names)\n ret.dsp_chan_names = ['MxA', 'MyA', 'MzA', 'MxaA', 'MyaA', 'MzaA', 'GxxA', 'GyyA', 'GyxA', 'GzaA', 'GzyA']\n ret.hdr.num_dsp = len(ret.dsp_chan_names)\n ret.analog_wts = zeros((ret.hdr.num_entries, ret.hdr.num_analog), dtype=int16)\n ret.dsp_wts = zeros((ret.hdr.num_entries, ret.hdr.num_dsp), dtype=float)\n for w in range(ret.hdr.num_entries):\n ret.analog_wts[w, :] = megdata_read_int16_matrix(fd, 1, ret.hdr.num_analog)\n megdata_read_int16(fd)\n ret.dsp_wts[w, :] = megdata_read_float_matrix(fd, 1, ret.hdr.num_dsp)\n\n curpos = os.lseek(fd, 0, os.SEEK_CUR)\n if curpos % 8 != 0:\n os.lseek(fd, 8 - curpos % 8, os.SEEK_CUR)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['B_weights_used'] = BTIWeightTableBlock\n\nclass BTITrigMaskEntry(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.name = megdata_read_str(fd, 20)\n ret.nbits = megdata_read_uint16(fd)\n ret.shift = megdata_read_uint16(fd)\n ret.mask = megdata_read_uint32(fd)\n os.lseek(fd, 8, os.SEEK_CUR)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTITrigMaskBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.version = megdata_read_int32(fd)\n ret.entries = megdata_read_int32(fd)\n os.lseek(fd, 16, os.SEEK_CUR)\n ret.masks = []\n for e in range(ret.entries):\n m = BTITrigMaskEntry.from_fd(fd)\n ret.masks.append(m)\n\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nBTI_USERBLOCKS['B_trig_mask'] = BTITrigMaskBlock\n\nclass BTIUnknownUserBlock(object):\n\n def __init__(self, data):\n self.data = data\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIUserBlockHeader(object):\n\n @classmethod\n def from_fd(cls, fd):\n ret = cls()\n ret.nbytes = megdata_read_int32(fd)\n ret.blocktype = megdata_read_str(fd, 20)\n ret.checksum = megdata_read_int32(fd)\n ret.username = megdata_read_str(fd, 32)\n ret.timestamp = megdata_read_int32(fd)\n ret.user_space_size = megdata_read_int32(fd)\n ret.reserved = megdata_read_char(fd, 32)\n curpos = os.lseek(fd, 0, os.SEEK_CUR)\n if curpos % 8 != 0:\n os.lseek(fd, 8 - curpos % 8, os.SEEK_CUR)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()\n\n\nclass BTIUserBlock(object):\n\n @classmethod\n def from_fd(cls, fd, blocks=None):\n ret = cls()\n ret.hdr = BTIUserBlockHeader.from_fd(fd)\n if ret.hdr.blocktype in list(BTI_USERBLOCKS.keys()):\n ret.data = BTI_USERBLOCKS[ret.hdr.blocktype].from_fd(fd, blocks)\n else:\n if ret.hdr.blocktype.startswith('BWT_'):\n ret.data = BTIWeightTableBlock.from_fd(fd, blocks)\n else:\n data = megdata_read_char(fd, ret.hdr.user_space_size)\n ret.data = BTIUnknownUserBlock(data)\n curpos = os.lseek(fd, 0, os.SEEK_CUR)\n if curpos % 8 != 0:\n os.lseek(fd, 8 - curpos % 8, os.SEEK_CUR)\n return ret\n\n def str_indent(self, indent=0):\n s = ' ' * indent + '\\n'\n return s\n\n def __str__(self):\n return self.str_indent()","sub_path":"pycfiles/megdata-1.0.3.linux-x86_64.tar/bti_userblocks.cpython-35.py","file_name":"bti_userblocks.cpython-35.py","file_ext":"py","file_size_in_byte":24224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163127442","text":"def increment_by_value(d, l, index, value):\n if index in d:\n d[index] += value\n else:\n d[index] = value\n l.append(index)\n l.sort()\n\n\ndef occupy(s):\n num_stalls, num_people = [int(x) for x in s.split(\" \")]\n free, free_index = dict(), []\n free[num_stalls] = 1\n free_index.append(num_stalls)\n\n count = 0\n while 1:\n #print(\"free \", free)\n #print(\"free index \", free_index)\n length = free_index[-1]\n\n num_served = free[length]\n free[length] = 0\n free_index.remove(length)\n\n #print(\"serving \", num_served, \" people\")\n if length % 2 == 0:\n increment_by_value(free, free_index, length // 2, num_served)\n increment_by_value(free, free_index, length // 2 - 1, num_served)\n max, min = length // 2, length // 2 - 1\n else:\n increment_by_value(free, free_index, length // 2, num_served * 2)\n #free[length // 2] += 1\n max, min = length // 2, length // 2\n\n count += num_served\n\n if count >= num_people:\n return max, min\n\n\ndef main():\n l = int(input())\n for i in range(l):\n max, min= occupy(input())\n print(\"Case #{}: {} {}\".format(i + 1, max, min))\n\nmain()","sub_path":"solutions_python/Problem_201/2402.py","file_name":"2402.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"586973421","text":"import pytest\nfrom django.test import TestCase\nfrom django.urls import reverse\nfrom django.utils.http import urlencode\n\nfrom car_store.store.factories import (\n CarFactory,\n CarMakeFactory,\n CarModelFactory,\n CarSubmodelFactory,\n)\nfrom car_store.store.models import Car, CarMake, CarModel, CarSubmodel\n\npytestmark = pytest.mark.django_db\n\n\nclass TestView(TestCase):\n @classmethod\n def setUpTestData(cls) -> None:\n for i in range(100):\n make = CarMakeFactory()\n model = CarModelFactory(make=make)\n submodel = CarSubmodelFactory(model=model)\n CarFactory(make=make, model=model, submodel=submodel)\n\n def test_submodule_get_list_success(self):\n \"\"\"\n List all makes, models, and submodels\n \"\"\"\n url = reverse(\"v1-store:submodule-list\")\n res = self.client.get(url)\n self.assertEqual(res.status_code, 200)\n data = res.json()\n self.assertListEqual(list(data.keys()), [\"makes\", \"models\", \"submodels\"])\n self.assertEqual(len(data[\"makes\"]), CarMake.objects.count())\n self.assertEqual(len(data[\"models\"]), CarModel.objects.count())\n self.assertEqual(len(data[\"submodels\"]), CarSubmodel.objects.count())\n\n def test_car_list_success(self):\n url = reverse(\"v1-store:car-list\")\n res = self.client.get(url)\n self.assertEqual(res.status_code, 200)\n data = res.json()\n self.assertEqual(len(data), Car.objects.count())\n self.assertListEqual(list(data[0].keys()), [\"id\", \"make\", \"model\", \"submodel\"])\n\n def test_car_create_success(self):\n url = reverse(\"v1-store:car-list\")\n data = {\n \"year\": 2000,\n \"mileage\": 12345,\n \"price\": 12345,\n \"make\": \"make 0\",\n \"model\": \"model 0\",\n \"submodel\": \"submodel 0\",\n \"body_type\": \"\",\n \"transmission\": \"\",\n \"fuel_type\": \"\",\n \"exterior_color\": \"\",\n }\n res = self.client.post(\n url, urlencode(data), content_type=\"application/x-www-form-urlencoded\"\n )\n self.assertEqual(res.status_code, 200)\n\n def test_car_create_fail(self):\n url = reverse(\"v1-store:car-list\")\n data = {\n \"year\": 1800,\n \"mileage\": -1,\n \"price\": \"\",\n \"make\": \"X\",\n \"model\": \"X\",\n \"submodel\": \"X\",\n \"body_type\": \"\",\n \"transmission\": \"\",\n \"fuel_type\": \"\",\n \"exterior_color\": \"\",\n }\n res = self.client.post(url, data)\n self.assertEqual(res.status_code, 400)\n\n # It is a very basic check. This part can be moved\n # Will be beter to extract this part into a separate test\n self.assertEqual(\n res.json(),\n {\n \"mileage\": [\"Ensure this value is greater than or equal to 0.\"],\n \"make\": [\n \"Select a valid choice. That choice is not one of the available choices.\"\n ],\n \"model\": [\n \"Select a valid choice. That choice is not one of the available choices.\"\n ],\n \"submodel\": [\n \"Select a valid choice. That choice is not one of the available choices.\"\n ],\n \"year\": [\"Ensure this value is greater than or equal to 1900.\"],\n },\n )\n","sub_path":"car_store/store/tests/test_store.py","file_name":"test_store.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"71695820","text":"#\n# Copyright 2020 Yiwenlong(wlong.yi#gmail.com)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport os\nimport time\nimport yaml\n\nfrom orgconfig import config_organizations, find_node, KEY_ORGANIZATIONS\nfrom channel import config_sys_channel, config_user_channels, KEY_SYS_CHANNEL, KEY_USER_CHANNELS\nfrom chiancode import config_chaincodes, KEY_USER_CHAINCODES\nfrom api import support as api_support\nfrom utils.fileutil import mkdir_if_need\n\n\nclass Network:\n\n def __init__(self, config_file, target_dir):\n\n if not os.path.exists(config_file):\n raise ValueError(\"Config file not exists: %s\" % config_file)\n with open(config_file, 'r') as conf:\n raw_conf = yaml.load(conf, yaml.CLoader)\n\n self.Dir = target_dir\n mkdir_if_need(self.Dir)\n\n if KEY_ORGANIZATIONS not in raw_conf:\n raise Exception(\"No organization found in config file: %s\" % config_file)\n self.orgs_map = config_organizations(raw_conf[KEY_ORGANIZATIONS], target_dir)\n\n if KEY_SYS_CHANNEL not in raw_conf:\n raise Exception(\"No system channel found in config file: %s\" % config_file)\n self.sys_channel = config_sys_channel(self.orgs_map, raw_conf[KEY_SYS_CHANNEL])\n self.sys_channel_cache_dir = os.path.join(target_dir, self.sys_channel.Name)\n mkdir_if_need(self.sys_channel_cache_dir)\n\n self.channel_cache_dir = os.path.join(target_dir, \"user-channels\")\n mkdir_if_need(self.channel_cache_dir)\n\n if KEY_USER_CHANNELS in raw_conf:\n self.channels = config_user_channels(self.orgs_map, raw_conf[KEY_USER_CHANNELS])\n\n self.chaincode_cache_dir = os.path.join(target_dir, \"user-chaincodes\")\n mkdir_if_need(self.chaincode_cache_dir)\n\n if KEY_USER_CHAINCODES in raw_conf:\n self.chaincodes = config_chaincodes(raw_conf[KEY_USER_CHAINCODES])\n\n self.api_cache_dir = os.path.join(target_dir, \"api\")\n mkdir_if_need(self.api_cache_dir)\n\n def echo_hosts(self, ip=\"127.0.0.1\"):\n hosts_cache = \"\"\n for org in self.orgs_map.values():\n hosts_cache += \"\\n\"\n hosts_cache += \"# fabric network host configs for organization: %s\\n\" % org.Name\n for p in org.PeerNodes.values():\n hosts_cache += \"%s\\t%s\\n\" % (ip, p.Domain)\n for o in org.OrdererNodes.values():\n hosts_cache += \"%s\\t%s\\n\" % (ip, o.Domain)\n print(hosts_cache)\n\n def deploy(self):\n self.sys_channel.deploy(self.sys_channel_cache_dir)\n\n def boot(self):\n self.sys_channel.boot()\n\n def stop(self):\n self.sys_channel.stop()\n\n def clear(self):\n self.sys_channel.clear()\n\n def up(self):\n self.sys_channel.deploy(self.sys_channel_cache_dir)\n self.sys_channel.boot()\n\n time.sleep(15)\n\n orderer = self.sys_channel.Ords[0]\n for ch_name in self.channels:\n support = api_support.cli_api_support(orderer.Org.admin(), self.__channel_cache_dir__(ch_name))\n channel = self.__channel__(ch_name)\n channel.create(support, orderer)\n\n for org in channel.Orgs.values():\n support = api_support.cli_api_support(org.admin(), self.__channel_cache_dir__(ch_name))\n for peer in org.PeerNodes.values():\n channel.join(support, peer, orderer)\n\n def down(self):\n self.clear()\n os.system(\"rm -fr %s\" % self.Dir)\n\n def status(self, node_name=None):\n if node_name is None:\n self.sys_channel.status()\n else:\n node = find_node(self.orgs_map, node_name)\n node.deploy_handler.display()\n\n def __channel_cache_dir__(self, ch_name):\n return os.path.join(self.channel_cache_dir, ch_name)\n\n def __channel__(self, ch_name):\n if ch_name not in self.channels:\n raise Exception(\"No channel configuration found: %s\" % ch_name)\n return self.channels[ch_name]\n\n def channel_create(self, ch_name, orderer_name):\n orderer = find_node(self.orgs_map, orderer_name)\n support = api_support.cli_api_support(orderer.Org.admin(), self.__channel_cache_dir__(ch_name))\n self.__channel__(ch_name).create(support, orderer)\n\n def channel_join(self, ch_name, peer_name, orderer_name):\n peer = find_node(self.orgs_map, peer_name)\n orderer = find_node(self.orgs_map, orderer_name)\n support = api_support.cli_api_support(peer.Org.admin(), self.__channel_cache_dir__(ch_name))\n self.__channel__(ch_name).join(support, peer, orderer)\n\n def channel_list(self, peer_name):\n peer = find_node(self.orgs_map, peer_name)\n support = api_support.cli_api_support(peer.Org.admin(), self.api_cache_dir)\n support.peer(peer.deploy_handler.Address).channel_list()\n\n def chaincode_list_installed(self, peer_name):\n peer = find_node(self.orgs_map, peer_name)\n support = api_support.cli_api_support(peer.Org.admin(), self.api_cache_dir)\n support.peer(peer.deploy_handler.Address).chaincode_installed()\n\n def chaincode_package(self, peer_name, cc_name):\n peer = find_node(self.orgs_map, peer_name)\n support = api_support.cli_api_support(peer.Org.admin(), self.api_cache_dir)\n support.peer(peer.deploy_handler.Address).chaincode_package(self.chaincodes[cc_name], self.chaincode_cache_dir)\n","sub_path":"network/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"195289924","text":"import csv\nimport os\nimport regex\nimport winsound\nfrom collections import Counter, defaultdict\nfrom datetime import datetime\nfrom pprint import pprint, pformat\n\nimport time\nimport cv2\nimport numpy\nimport pytesseract\nfrom PIL import ImageGrab, Image\n\n## CONFIGURABLE VARIABLES\n# name of stats file - must be in same dir as this file\nstats_file = 'stats.csv'\n# top half of a 1920x1080 monitor\nmon = (0, 0, 1920, 1080 / 2)\n\n##\nstats_headers = ['Datetime', 'Damage Done', 'Kills', 'Time Survived', 'Respawned Allies', 'Revived Allies',\n 'Killed Champion', 'Squad Placed']\nreplacements = [('x', ''), ('d', '0'), ('D', '0'), ('o', '0'), ('O', '0'), ('!', '1'), ('l', '1'), ('I', '1'),\n ('}', ')'), ('{', '('), (']', ')'), ('[', '('), ('$', ''), ('\\'', ''), ('\\\"', '')]\n# This doesn't seem to actually be doing anything, but leaving it in because it's working and I'm scared to change it\ntesseract_config = '-c tessedit_char_whitelist=()#01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz --psm 11'\nheaders_matcher_map = {\n 'Damage Done': regex.compile('(?:damagedone\\(){e<=2}(.*?)(?:\\]|\\))'),\n 'Killed Champion': regex.compile('(?:killedchampion\\(){e<=2}(.*?)(?:\\]|\\))'),\n 'Kills': regex.compile('(?:kills\\(){e<=1}(.*?)(?:\\]|\\))'),\n 'Respawned Allies': regex.compile('(?:respawnally\\(){e<=2}(.*?)(?:\\]|\\))'),\n 'Revived Allies': regex.compile('(?:reviveally\\(){e<=2}(.*?)(?:\\]|\\))'),\n 'Squad Placed': regex.compile('#([0-9]{1,2})'),\n 'Time Survived': regex.compile('(?:timesurvived\\(){e<=2}(.*?)(?:\\]|\\))')\n}\n\n\ndef process_squad_placed(text_list):\n # for deciphering single-digit squad placement from multi-digit squad placement\n squad_placed_list = []\n for text in text_list:\n try:\n numeric_place = int(text)\n if numeric_place == 2 or numeric_place == 20:\n squad_placed_list.append(20)\n elif numeric_place == 1 or numeric_place == 10:\n squad_placed_list.append(10)\n elif numeric_place > 20:\n squad_placed_list.append(int(text[0]))\n else:\n squad_placed_list.append(numeric_place)\n except:\n squad_placed_list.append(0)\n return squad_placed_list\n\n\ndef preprocess_image(img, blur_amount):\n img = img.convert('RGB')\n opencv_img = cv2.cvtColor(numpy.array(img), cv2.COLOR_RGB2GRAY)\n opencv_thr_img = cv2.threshold(opencv_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n opencv_blur_img = cv2.GaussianBlur(opencv_thr_img, (blur_amount, blur_amount), 0)\n return opencv_blur_img\n\n\ndef replace_nondigits(parsed_string):\n # making sure the fields that should be numeric are numeric\n return_list = []\n for s in parsed_string:\n for old, new in replacements:\n s = s.replace(old, new)\n try:\n return_list.append(int(s))\n except:\n continue\n return return_list\n\n\ndef write_to_file(filename, data):\n value_list = [data[header] for header in stats_headers]\n filepath = os.path.join(os.getcwd(), filename)\n if os.path.isfile(filepath):\n # if a stats file already exists, just append the game data\n write_method = 'a'\n rows_to_write = [value_list]\n else:\n # if file doesn't exist, create it, write header row, then game data\n write_method = 'w'\n rows_to_write = [stats_headers, value_list]\n\n with open(filename, write_method, newline='') as f:\n writer = csv.writer(f)\n for row in rows_to_write:\n writer.writerow(row)\n\n\ndef log_and_beep(print_text, beep_freq):\n pprint('[{}] {}'.format(datetime.now(), print_text))\n if beep_freq:\n winsound.Beep(beep_freq, 500)\n\n\nif __name__ == '__main__':\n print('Watching screen...')\n while True:\n # continuously grab screenshots and interpret them to identify the match summary screen\n img = preprocess_image(ImageGrab.grab(bbox=mon), 3)\n text = pytesseract.image_to_string(img, config=tesseract_config)\n text = text.replace(\"\\n\", \"\").replace(\" \", \"\").lower()\n\n if 'breakdown' in text or 'summary' in text:\n time.sleep(1)\n log_and_beep('Match Summary screen detected.', 2000)\n\n # takes 20 duplicate images immediately to get the most common (mode) interpretation later. should take ~2 secs\n dup_images = [ImageGrab.grab(bbox=mon) for _ in range(20)]\n\n mode_interpretation = defaultdict(None)\n mode_interpretation['Datetime'] = datetime.now()\n matches = defaultdict(list)\n\n log_and_beep('Finished taking backup screengrabs. Processing images -> text', 1500)\n # OCR for all the images captured, then assign interpretation to the associated stat\n blurs = [1,1,1,1,3,3,3,3,5,5,5,5,7,7,7,7,9,9,9,9]\n for image, blur_amount in zip(dup_images, blurs):\n img = preprocess_image(image, blur_amount)\n text = pytesseract.image_to_string(img, config=tesseract_config)\n text = text.replace(\"\\n\", \"\").replace(\" \", \"\").lower()\n\n print(text)\n for header, matcher in headers_matcher_map.items():\n if header == 'Squad Placed':\n parsed_text = process_squad_placed(matcher.findall(text))\n elif header == 'Time Survived':\n parsed_text = matcher.findall(text)\n else:\n parsed_text = replace_nondigits(matcher.findall(text))\n matches[header].extend(parsed_text)\n\n # for each of the 21 images, find the most common OCR text interpretation for each stat. If there are no\n # available interpretations of the stat, assign the value 'Not Captured' instead\n for k, v in matches.items():\n counts = Counter(v)\n most_common = counts.most_common(1)\n print(k, counts)\n if len(most_common) > 0:\n mode_interpretation[k] = most_common[0][0]\n else:\n mode_interpretation[k] = 'Not Captured'\n\n log_and_beep(\n 'Finished processing images. Image interpretations:\\n{}'.format(pformat(dict(mode_interpretation))),\n 1000)\n\n # writing to local file\n write_to_file(stats_file, mode_interpretation)\n log_and_beep('Finished writing interpretations to {} file.\\nWatching screen...'.format(stats_file), None)\n","sub_path":"apex_ocr.py","file_name":"apex_ocr.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"188210699","text":"# Mark of the Squad(2083004) | Cave of Life, Entrance to Horntail's Cave\nfrom net.swordie.ms.constants import BossConstants\nfrom net.swordie.ms.constants import GameConstants\nif sm.getFieldID() == 271040100 or sm.getFieldID() == 211070102:\n if sm.sendAskYesNo(\"Would you like to leave the fight?\"):\n sm.warpInstanceOut(271040000, 0)\nelse:\n if sm.isPartyLeader():\n sm.sendNext(\"#e#n \\r\\n Are you ready to fight Empress Cygnus?#b\\r\\n \\r\\n\"\n \"#L0#Request to join a Cygnus Expedition.#l\\r\\n\")\n selection = sm.sendNext(\"#e#n \\r\\n Select a mode. \\r\\n \\r\\n\"\n \"#L0#Easy(Level 150+) #l \\r\\n\"\n \"#L1#Normal (Level 175+) #l \\r\\n\")\n if selection == 0:\n sm.warpInstanceIn(271040100, True)\n sm.setInstanceTime(BossConstants.CYGNUS_TIME)\n elif selection == 1:\n sm.warpInstanceIn(211070102, True)\n sm.setInstanceTime(BossConstants.CYGNUS_TIME)\n else:\n sm.sendSayOkay(\"Please have your party leader speak to me.\")","sub_path":"scripts/npc/cygnus_accept.py","file_name":"cygnus_accept.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"575948278","text":"#coding=utf8\nimport random\nfrom PIL import Image\n\n'''\n图像缩放操作\n'''\n\nim = Image.open('tccc.jpeg') #打开一个图像文件\n\nw, h = im.size #获得图像尺寸\n\nprint('Original image size: %sx%s' % (w, h))\n\nim.thumbnail((w//2, h//2)) #缩放到50%\n\nprint('Resize image to: %sx%s' % (w//2, h//2))\n\nim.save('tcc_1.jpg', 'jpeg') # 把缩放后的图像用jpeg格式保存\n\n","sub_path":"Python/test_img/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"210948802","text":"# Problem No.: 13537\r\n# Solver: Jinmin Goh\r\n# Date: 20220823\r\n# URL: https://www.acmicpc.net/problem/13537\r\n\r\nimport sys\r\n\r\ndef update(arr, idx, val, n, l, r):\r\n if idx <= l and r <= idx:\r\n arr[n].append(val)\r\n return\r\n elif idx < l or r < idx:\r\n return\r\n arr[n].append(val)\r\n m = (l + r) // 2\r\n update(arr, idx, val, 2 * n, l, m)\r\n update(arr, idx, val, 2 * n + 1, m + 1, r)\r\n return\r\n\r\ndef search(arr, ql, qr, val, n, l, r):\r\n if ql <= l and r <= qr:\r\n p1 = 0\r\n p2 = len(arr[n]) - 1\r\n while p1 < p2:\r\n m = (p1 + p2) // 2\r\n if arr[n][m] <= val:\r\n p1 = m + 1\r\n else:\r\n p2 = m\r\n if arr[n][p2] <= val:\r\n p2 += 1\r\n return len(arr[n]) - p2\r\n elif qr < l or r < ql:\r\n return 0\r\n m = (l + r) // 2\r\n l_val = search(arr, ql, qr, val, 2 * n, l, m)\r\n r_val = search(arr, ql, qr, val, 2 * n + 1, m + 1, r)\r\n return l_val + r_val\r\n\r\ndef main():\r\n n = int(sys.stdin.readline().rstrip())\r\n nums = list(map(int, sys.stdin.readline().split()))\r\n q = int(sys.stdin.readline().rstrip())\r\n arr = [[] for _ in range(400010)]\r\n for i in range(n):\r\n update(arr, i + 1, nums[i], 1, 1, n)\r\n for i in range(1, 400010):\r\n if not arr[i]:\r\n break\r\n arr[i].sort()\r\n for _ in range(q):\r\n i, j, k = map(int, sys.stdin.readline().split())\r\n print(search(arr, i, j, k, 1, 1, n))\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"Solved/13537/13537.py","file_name":"13537.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"241508908","text":"lst=[1,2,3,4,5]\nlow=0\nupp=len(lst)-1\npair_list=[]\nelement=int(input(\"enter element\"))\nwhile(lowtot):\n low+=1\n else:\n upp-=1\nprint(pair_list)\n\n\n\n#for i in range(0,len(lst)):\n # for j in range(i+1,len(lst)):\n # if(lst[i]+lst[j]==num):\n # print(lst[i],lst[j])","sub_path":"LanguageFundamentals/pythoncollection/listprograms/pairsprint.py","file_name":"pairsprint.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"535893919","text":"from django.conf.urls import include, url\n#from django.conf.urls.defaults import *\nfrom django.contrib import admin\nfrom book.views import *\nurlpatterns = [\n \n url(r'^$',home),\n url(r'^search/$',search),\n url(r'^delete/(?P\\d{1,20})/(?P\\d{1,2})/$',delete),\n url(r'^add/$',add),\n url(r'^details/(?P\\d{1,20})/$',details),\n url(r'^update/(?P\\d{1,20})/$',update),\n]\n","sub_path":"gmmbook/gmmbook/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"470423105","text":"\"\"\" Compiled: 2020-09-18 10:38:52 \"\"\"\n\n#__src_file__ = \"extensions/export/./etc/FFileTransporter.py\"\n\"\"\"-------------------------------------------------------------------------------------------------------\nMODULE\n FFileTransporter - File transporter methods for Asset Management integrations\n\n (c) Copyright 2012 SunGard FRONT ARENA. All rights reserved.\n\nDESCRIPTION\n See ExportBaseReadMe.py for more information about this module\n\n-------------------------------------------------------------------------------------------------------\"\"\"\nimport collections\nimport os\nimport time\n\nimport FAssetManagementUtils\nimport FBusinessProcessUtils\n\nlogger = FAssetManagementUtils.GetLogger()\n\n\nclass FFileExporter(object):\n \"\"\"An engine for the transfer of export files to remote parties.\n\n Given an export process and pre-generated files containing exportable elements, this\n engine is responsible for delivering the files to the remote parties and updating the\n export-based business processes to reflect the success state of the transfer.\n\n \"\"\"\n def __init__(self, exportProcess):\n self._exportProcess = exportProcess\n\n def Execute(self, retries=0, retryDelay=1):\n \"\"\"Export all files in the export process.\n\n The method of transfer used is determined by a callback for each export file\n defined by the export processes' integration. This callback may optionally provide\n a collection of FFileTransfer instances to send the file to multiple recipients.\n\n Files defined by the export process must have been generated prior to performing\n this export process. If a number of retries is provided, each file transfer will be\n re-attempted on failure after retryDelay seconds.\n\n \"\"\"\n if not self._exportProcess.TestMode().IsFileTransferEnabled():\n if not self._exportProcess.TestMode().IsEnabled():\n for singleExport in self._exportProcess.SingleExportsAsList():\n if singleExport.IsExportable() and not self._exportProcess.GenerateEmptyFile():\n self._SetBusinessProcessesExported(singleExport.ExportableBusinessProcesses())\n logger.info('Skipping transferring of file(s).')\n return\n for singleExport in self._exportProcess.SingleExportsAsList():\n sheetTemplateId = singleExport.SheetTemplateId()\n if not singleExport.IsExportable() and not self._exportProcess.GenerateEmptyFile():\n continue\n try:\n fileTransfers = self._exportProcess.Integration().FileTransferFinderFunction()(singleExport.SingleExportIdentifier())\n if not isinstance(fileTransfers, collections.Iterable):\n fileTransfers = [fileTransfers, ]\n except Exception as e:\n msg = 'Failed to get a file transporter for export ''%s'': %s' % (sheetTemplateId, e)\n logger.error(msg)\n singleExport.Failed(msg)\n else:\n sourceFile = os.path.join(singleExport.FilePath(), singleExport.Filename())\n\n try:\n for ft in fileTransfers:\n self._ExportFile(ft, sourceFile, retries, retryDelay)\n self._SetBusinessProcessesExported(singleExport.ExportableBusinessProcesses())\n except (FFileTransfer.TransferError, ImportError, ValueError) as e:\n msg = 'Failed to transfer export file ''%s'': %s' % (sourceFile, str(e))\n logger.error(msg)\n singleExport.Failed(msg)\n\n def ExportProcess(self):\n return self._exportProcess\n\n @staticmethod\n def _ExportFile(fileTransfer, sourceFileName, retries, retryDelay):\n fileTransfer.SendWithRetries(sourceFileName, retries, retryDelay)\n logger.info('Successfully exported file ''%s''.', sourceFileName)\n\n def _SetBusinessProcessesExported(self, businessProcesses):\n count = len(businessProcesses)\n exportEvent = self._exportProcess.Integration().ExportEventId()\n if self._exportProcess.TestMode().IsEnabled():\n logger.info('[TEST MODE] Business processes would have transitioned with event \"%s\".', exportEvent)\n return\n for bp in businessProcesses:\n try:\n # Revalidate that the business process is still in the required state for an export\n if FBusinessProcessUtils.IsValidEvent(bp, exportEvent):\n bp.HandleEvent(exportEvent)\n bp.Commit()\n else:\n logger.error('Business process %d does not support export event \"%s\" (current state=\"%s\").',\n bp.Oid(), exportEvent, bp.CurrentStep().State().Name())\n count -= 1\n except RuntimeError as e:\n logger.error('Business process %d failed to handle event \"%s\": %s', bp.Oid(), exportEvent, e)\n count -= 1\n if count > 0:\n logger.info('Updated %d business process%s as having successfully exported.',\n count, 'es' if count > 1 else '')\n\n\nclass FFileTransfer(object):\n \"\"\"A base class for the transfer of a local file to a (possibly remote) destination.\"\"\"\n\n class TransferError(Exception):\n \"\"\"Exception class for all file transfer errors.\"\"\"\n pass\n\n def Send(self, sourceFileName):\n \"\"\"Attempt to send the source file to the specified destination. The transfer method\n used will depend on the selected base class.\n\n \"\"\"\n raise NotImplementedError('Base class does not support file transfer')\n\n def SendWithRetries(self, sourceFileName, retries=3, retryDelay=5):\n \"\"\"Send the file over a number of retries. If all retries are exhausted, a\n TransferError exception is raised.\n\n \"\"\"\n assert(retries >= 0 and retryDelay >= 0)\n attempt = 1\n while True:\n try:\n self.Send(sourceFileName)\n except FFileTransfer.TransferError as e:\n if attempt >= retries:\n if retries > 0:\n logger.error('Transfer failed after %d attempt(s), aborting.', retries)\n raise\n logger.warn('Failed to transfer file: %s', e)\n logger.info('Waiting %d second(s) before retrying transfer (attempt %d of %d)',\n retryDelay, attempt, retries)\n time.sleep(retryDelay)\n attempt += 1\n else:\n break\n\n @staticmethod\n def _ValidateSourceFile(sourceFileName):\n if not os.path.exists(sourceFileName):\n raise ValueError('Source file to transfer \"' + sourceFileName + '\" does not exist.')\n if os.path.getsize(sourceFileName) == 0:\n raise ValueError('Source file to transfer \"' + sourceFileName + '\" is empty.')\n\n\nclass FFileSystemTransfer(FFileTransfer):\n \"\"\"Represents the transfer of a file on the local file system.\"\"\"\n\n def __init__(self, destinationPath, overwriteExistingFiles=False, keepSourceFile=True):\n super(FFileSystemTransfer, self).__init__()\n self._destinationPath = destinationPath\n self._overwriteExistingFiles = overwriteExistingFiles\n self._keepSourceFile = keepSourceFile\n\n def Send(self, sourceFileName):\n import shutil\n self._ValidateSourceFile(sourceFileName)\n try:\n self._ValidateDestinationPath(self._destinationPath)\n destination_file = os.path.join(self._destinationPath, os.path.basename(sourceFileName))\n if not self._overwriteExistingFiles:\n destination_file = self._GetUniqueFilename(destination_file)\n transfer_file_func = shutil.copy2 if self._keepSourceFile else shutil.move\n\n logger.info('%s file ''%s'' to ''%s''.', 'Copying' if self._keepSourceFile else 'Moving',\n sourceFileName, destination_file)\n transfer_file_func(sourceFileName, destination_file)\n except IOError as e:\n raise FFileTransfer.TransferError(e)\n\n @staticmethod\n def _ValidateDestinationPath(path):\n if not os.path.lexists(path):\n os.makedirs(path)\n\n @staticmethod\n def _GetUniqueFilename(filename):\n count = 1\n base, ext = os.path.splitext(filename)\n while os.path.lexists(filename):\n filename = base + '_' + str(count) + ext\n count += 1\n return filename\n\n\nclass FFTPTransfer(FFileTransfer):\n \"\"\"Represents the transfer of a file to a remote FTP server.\"\"\"\n\n class FTPServer(object):\n \"\"\"Stores FTP server details.\"\"\"\n def __init__(self, hostname, port=21, username='anonymous', password='anonymous@', path=''):\n self.hostname = hostname\n self.port = port or 21\n self.username = username\n self.password = password\n self.path = path or ''\n\n @classmethod\n def createFromPartyOrContact(cls, obj):\n \"\"\"Given an FParty or FContact object, return an FTPServer instance with party/contact's\n FTP server details.\n\n This function is dependent on the following AdditionalInfo attributes being defined for\n the party/contact:\n 'FTP Hostname', 'FTP Port', 'FTP Username', 'FTP Password' & 'FTP Path'\n\n \"\"\"\n ai = obj.AdditionalInfo()\n return cls(hostname=ai.FTP_Hostname(), port=ai.FTP_Port(), username=ai.FTP_Username(),\n password=ai.FTP_Password(), path=ai.FTP_Path())\n\n def __init__(self, server):\n super(FFTPTransfer, self).__init__()\n self._ValidateFTPServer(server)\n self._server = server\n\n def Server(self, server):\n if server is None:\n return self._server\n self._ValidateFTPServer(server)\n self._server = server\n\n def Send(self, sourceFileName):\n import ftplib\n self._ValidateSourceFile(sourceFileName)\n try:\n logger.info('Sending ''%s'' to %s:%i%s via FTP.', sourceFileName, self._server.hostname,\n self._server.port, self._server.path)\n\n ftp = ftplib.FTP()\n ftp.connect(self._server.hostname, self._server.port)\n ftp.login(self._server.username, self._server.password)\n ftp.cwd(self._server.path)\n with open(sourceFileName, 'rb') as f:\n ftp.storbinary('STOR ' + os.path.basename(sourceFileName), f, 1024)\n ftp.close()\n except ftplib.all_errors as e:\n raise FFileTransfer.TransferError(e)\n\n @staticmethod\n def _ValidateFTPServer(server):\n if (not server or\n server.hostname is None or\n not isinstance(server.port, int) or\n server.path is None):\n raise ValueError('Invalid FTP server: ' + str(vars(server)))\n\n\nclass FSFTPTransfer(FFTPTransfer):\n \"\"\"Represents the transfer of a file to a remote SFTP server.\n\n NOTE: This class has a dependency on the 3rd Party Python module 'paramiko'\n for SFTP functionality (see http://www.lag.net/paramiko/).\n\n \"\"\"\n def __init__(self, server):\n super(FSFTPTransfer, self).__init__(server)\n\n def Send(self, sourceFileName):\n import paramiko\n self._ValidateSourceFile(sourceFileName)\n try:\n logger.info('Sending ''%s'' to %s:%i%s via SFTP.', sourceFileName, self._server.hostname,\n self._server.port, self._server.path)\n\n transport = paramiko.Transport((self._server.hostname, self._server.port))\n transport.connect(username = self._server.username, password = self._server.password)\n\n sftp = paramiko.SFTPClient.from_transport(transport)\n destinationFileName = os.path.basename(sourceFileName)\n if self._server.path != \"\":\n sftp.chdir(self._server.path)\n self._TransferFile(sftp, sourceFileName, destinationFileName)\n sftp.close()\n transport.close()\n except (paramiko.SSHException, paramiko.SFTPError, IOError) as e:\n raise FFileTransfer.TransferError(e)\n\n @staticmethod\n def _TransferFile(sftp, sourceFileName, remoteFileName):\n # This is a reimplementation of the built-in 'put' method in paramiko, without the subsequent\n # local-remote file size check after transfer. This check has been seen to have failed on some\n # SFTP servers, despite the transfer being successful.\n with file(sourceFileName, 'rb') as local:\n remote = sftp.file(remoteFileName, 'wb')\n try:\n remote.set_pipelined(True)\n while True:\n data = local.read(32768)\n if len(data) == 0:\n break\n remote.write(data)\n finally:\n remote.close()\n\n\nclass FEmailTransfer(FFileTransfer):\n \"\"\"Represents the transfer of a file as an attachment in an email message.\"\"\"\n\n class SMTPServer(object):\n \"\"\"Stores SMTP server details.\"\"\"\n def __init__(self, hostname, port=25, username=None, password=None, tls_mode=False):\n self.hostname = hostname\n self.port = port\n self.username = username\n self.password = password\n self.tls_mode = tls_mode\n\n class Message(object):\n \"\"\"Stores common email message details.\"\"\"\n def __init__(self, recipients, subject, sender = 'Front Arena', body = ''):\n self.recipients = recipients\n self.subject = subject\n self.sender = sender\n self.body = body\n\n def __init__(self, server, message):\n super(FEmailTransfer, self).__init__()\n self._ValidateSMTPServer(server)\n self._server = server\n self._ValidateMessage(message)\n self._message = message\n if not isinstance(self._message.recipients, collections.Iterable):\n self._message.recipients = [self._message.recipients, ]\n\n def Send(self, sourceFileName):\n import smtplib\n self._ValidateSourceFile(sourceFileName)\n try:\n logger.info('Emailing ''%s'' to %s via SMTP server %s.', sourceFileName,\n self._message.recipients, self._server.hostname)\n\n server = smtplib.SMTP()\n server.connect(self._server.hostname, self._server.port)\n if self._server.tls_mode:\n server.starttls()\n if self._server.username:\n server.login(self._server.username, self._server.password)\n server.verify(self._message.recipients)\n msg = self._GetEmailMessage(self._message.recipients, self._message, sourceFileName)\n server.sendmail(self._message.sender, self._message.recipients, msg.as_string())\n server.quit()\n except (smtplib.SMTPException, smtplib.socket.error, IOError) as e:\n raise FFileTransfer.TransferError(e)\n\n @staticmethod\n def _ValidateSMTPServer(server):\n if (not server or\n server.hostname is None or\n not isinstance(server.port, int)):\n raise ValueError('Invalid SMTP server: ' + str(vars(server)))\n\n @staticmethod\n def _ValidateMessage(message):\n if (not message or\n message.recipients is None or\n message.sender is None):\n raise ValueError('Invalid email message: ' + str(vars(message)))\n\n @classmethod\n def _GetEmailMessage(cls, recipients, message, filename=None):\n from email.mime.multipart import MIMEMultipart\n from email.mime.text import MIMEText\n\n msg = MIMEMultipart()\n msg['To'] = ', '.join(recipients)\n msg['Subject'] = message.subject\n msg['From'] = message.sender\n msg.attach(MIMEText(message.body, 'plain'))\n if filename:\n msg.attach(cls._GetEmailAttachment(filename))\n return msg\n\n @classmethod\n def _GetEmailAttachment(cls, filename):\n import mimetypes\n ctype, encoding = mimetypes.guess_type(filename)\n if not ctype or not encoding:\n ctype = 'application/octet-stream'\n maintype, subtype = ctype.split('/', 1)\n if maintype == 'text':\n from email.mime.text import MIMEText\n with open(filename) as f:\n attachment = MIMEText(f.read(), _subtype = subtype)\n else:\n from email.mime.base import MIMEBase\n from email import encoders\n attachment = MIMEBase(maintype, subtype)\n with open(filename, 'rb') as f:\n attachment.set_payload(f.read())\n encoders.encode_base64(attachment)\n attachment.add_header('Content-Disposition', 'attachment',\n filename=os.path.basename(filename))\n return attachment\n","sub_path":"Extensions/_export_base_py/FPythonCode/FFileTransporter.py","file_name":"FFileTransporter.py","file_ext":"py","file_size_in_byte":16989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"211469583","text":"logbook = {\n 'Bill Gates': [500, 1000, 1500],\n 'George Washington': [700, 750, 7000],\n 'Abraham Lincoln': [500, 50]\n}\n\n\nmain_menu = '''\nWelcome to to the mailroom\nChoose a command:\nT - Create a Thank You Message\nR - Generate a Report\nquit - Quit Program\n'''\nmessage_prompt = '''\nEnter a name, or choose from the following:\nlist - Print a list of previous donors\nquit - Return to main menu.\n'''\nmessage_prompt2 = '''\nEnter donation amount or 'quit':\n'''\nt_letter = '''\nDear {name},\nThank you so much for your generous donation of {amount}. Your funds will go\nto maintaining the our habitat for toothless ant eaters in Scandinavia. Grinner\n, our 50 year old ant eater is smailing at you.\n<<<<<<< HEAD\n=======\n\n>>>>>>> hw11\nThanks again,\nChris Kistner\nDirector, PTA.\n'''\nenter_cont = '\\n\\nPress enter to continue.'\n\ncells = [20, 8, 3, 8]\n\n\ndef repl(prompt, validator=None):\n while True:\n user_input = input(prompt)\n user_input = user_input.lower()\n if user_input in ('q', 'quit'):\n print('Quiting Mailroom')\n return\n if validator:\n result = validator(user_input)\n if result:\n if 'Invalid' in str(result):\n print(result)\n else:\n return result\n else:\n return\n\n\ndef main_menu():\n user_input = input('Welcome to to the mailroom Choose a command: \\\n T - Create a Thank You Message \\\n R - Generate a Report \\\n quit - Quit Program')\n if user_input in ('t'):\n repl(message_prompt, t_menu)\n elif user_input in ('r'):\n report()\n else:\n return 'Invalid Command.'\n\n\ndef list_donor_names():\n # returns a list of all donor names in the logbook, one per line.\n # global logbook\n return '\\n'.join(logbook.keys())\n\n\ndef t_menu(user_input):\n '''\n Validator for \"Send a Thank You Letter\" menu\n redirects through to name and amount validators.\n Eventually returns a formatted letter or an invalid message.\n '''\n if user_input == 'list':\n print (list_donor_names())\n return repl(enter_cont)\n else:\n name = user_input\n if valid_name(name):\n amount = repl(message_prompt2, valid_amount)\n if amount:\n add_to_data(name, amount)\n letter = t_letter.format(name=name, amount=format_amount(amount\n ))\n print(letter)\n return repl(enter_cont)\n else:\n return 'Invalid Name'\n\n\ndef valid_name(name):\n # Checks for valid name with at least two characters\n names = name.split(' ')\n if len(names) < 2:\n return False\n return True\n\n\ndef valid_amount(user_input):\n '''\n Validator for donation amount.\n Returns the float of the given number, or an invalid message\n '''\n try:\n return round(float(user_input), 2)\n except ValueError:\n return 'Invalid Amount'\n\n\ndef format_amount(amount):\n '''\n Returns a neatly formatted dollar amount string from a given int\n or float.\n '''\n return '$%.2f' % amount\n\n\ndef report():\n # Gathers the current logbook and formats it into neat rows, then prints.\n global logbook\n\n donor_list = logbook.keys()\n # donor_list.sort()\n row_list = get_report_header()\n\n for d in donor_list:\n donations = logbook[d]\n total = sum(donations)\n num = len(donations)\n avg = total / num\n row = [d, format_amount(total), str(num), format_amount(avg)]\n row_list.append(get_report_cells(row))\n\n print('\\n'.join(row_list))\n return repl(enter_cont)\n\n\ndef get_report_header():\n # Returns neatly formatted column headers for the data report.\n header = ['\\n']\n header.append(get_report_cells(['Name', 'Total', '#', 'Average']))\n header.append('-' * (sum(cells) + (3 * len(cells))))\n return header\n\n\ndef get_report_cells(row):\n '''\n Formats each row of donor data into spaced cells.\n returns a single string formatted row\n '''\n formatted_row = []\n for i, c in enumerate(row):\n spaces = ' ' * (cells[i] - len(c))\n formatted_row.append('{}{}'.format(spaces, c))\n return ' | '.join(formatted_row)\n\n\ndef add_to_data(name, amount):\n # Adds a given name and amount to the logbook. Returns None.\n global logbook\n if name not in logbook.keys():\n logbook[name] = []\n logbook[name].append(amount)\n\nif __name__ == '__main__':\n main_menu()\n","sub_path":"hw/hw11/mailroom_madness.py","file_name":"mailroom_madness.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"75139223","text":"#!/usr/bin/env python\n\ncounts = int(input())\ndata = list(map(int, input().split(\" \")))\ndata_weights = list(map(int, input().split(\" \")))\n\n\ndef data_times_weights(data_list, weights):\n if len(data_list) == len(weights):\n numerators = 0\n for i in range(len(data_list)):\n numerators += data_list[i] * weights[i]\n return numerators\n else:\n raise ValueError('Size of data and weights not equal')\n\n\ndef weighted_mean(data_list, weights):\n numerators = data_times_weights(data_list, weights)\n return numerators / sum(weights)\n\n\nprint(round(weighted_mean(data, data_weights),1))\n","sub_path":"10-days-of-stats/exercise-02/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"523487477","text":"from bs4 import BeautifulSoup\nimport sys\nimport re\nimport os\n\n\ndef merge(file_path):\n conll = open(file_path + '.conll', 'r').readlines()\n rst = open(file_path + '.rst', 'r').read()\n rst = '' + rst + ''\n soup = BeautifulSoup(rst, features='lxml')\n seg_ind = 0\n text_ind = 0\n segments = soup.find_all('segment')\n\n for tok_ind in range(len(conll)):\n word = conll[tok_ind].split('\\t')[2].strip()\n segment = segments[seg_ind].text.strip()\n\n \n text_ind = segment.find(word, text_ind)\n if text_ind < 0:\n print('###',word)\n print('$$$', segment)\n raise IOError(f'Word <{word}> was not found in segment {seg_ind + 1}')\n\n conll[tok_ind] = conll[tok_ind].strip() + '\\t' + str(seg_ind + 1)\n text_ind += len(word)\n\n if text_ind >= len(segment):\n seg_ind += 1\n text_ind = 0\n\n\n text = '\\n'.join(conll)\n f_merge = open(file_path + '.merge', 'w')\n f_merge.write(text)\n\n\n\nif __name__ == \"__main__\":\n print('====================== conll 2 merge =================')\n path = sys.argv[1]\n if path.endswith('/'):\n path = path[:len(path) - 1]\n all_files = os.listdir(path)\n for filename in sorted(all_files):\n if not filename.endswith('.conll'):\n continue\n filename = filename.split('.conll')[0]\n print (filename)\n merge(f'{path}/{filename}')\n","sub_path":"fa_7_conll_rst2merge.py","file_name":"fa_7_conll_rst2merge.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"111993303","text":"N = int(input())\r\nlst = []\r\nfor _ in range(N):\r\n lst.append(list(map(int,input().split())))\r\nlst.sort(key = lambda x: (x[1],x[0]))\r\n\r\nresult = 1\r\ntime = lst[0][1]\r\nind = 0\r\n\r\nwhile True:\r\n end = True\r\n for i in range(ind+1, N):\r\n if lst[i][0] >= time:\r\n result += 1\r\n time = lst[i][1]\r\n ind = i\r\n end = False\r\n break\r\n if end:\r\n print(result)\r\n break \r\n \r\n\r\n\r\n\r\n","sub_path":"python_algorithm/Baekjoon/silver/1931.py","file_name":"1931.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"124179078","text":"from os.path import join\nfrom tempfile import mkdtemp\nfrom shutil import rmtree\nfrom time import sleep\n\nfrom mock import patch\nfrom git import Git, Repo\nfrom git.exc import GitCommandError\n\nfrom jig.tests.testcase import JigTestCase\nfrom jig.exc import GitCloneError\nfrom jig.gitutils.checks import is_git_repo\nfrom jig.gitutils.remote import clone, remote_has_updates\n\n\nclass TestClone(JigTestCase):\n\n \"\"\"\n Git utils clone method can clone a repository.\n\n \"\"\"\n def setUp(self):\n self.workingdir = mkdtemp()\n\n def tearDown(self):\n rmtree(self.workingdir)\n\n def test_clone_valid_repo(self):\n \"\"\"\n Valid repo can be cloned.\n \"\"\"\n with patch.object(Git, 'execute'):\n to_dir = join(self.workingdir, 'a')\n\n Git.execute.return_value = 'Cloning into X'\n\n gitobj = clone('http://github.com/user/repo', to_dir)\n\n Git.execute.assert_called_with([\n 'git', 'clone', 'http://github.com/user/repo', to_dir\n ])\n\n self.assertIsInstance(gitobj, Git)\n\n def test_clone_invalid_repo(self):\n \"\"\"\n Invalid repo raises error.\n \"\"\"\n with patch.object(Git, 'execute'):\n to_dir = join(self.workingdir, 'a')\n\n Git.execute.side_effect = GitCommandError(\n ['command'], 128, stderr='bad command'\n )\n\n with self.assertRaises(GitCloneError) as gce:\n clone('http://github.com/user/repo', to_dir) # pragma: no branch\n\n self.assertIn(\n \"'command' returned exit status 128: bad command\",\n gce.exception\n )\n\n def test_local_directory_clone(self):\n \"\"\"\n Clones a local file-based Git repository.\n \"\"\"\n to_dir = join(self.workingdir, 'a')\n\n clone(self.gitrepodir, to_dir)\n\n self.assertTrue(is_git_repo(to_dir))\n\n def test_clone_branch(self):\n \"\"\"\n Clone a specific branch of a repository.\n \"\"\"\n with patch.object(Git, 'execute'):\n to_dir = join(self.workingdir, 'a')\n\n Git.execute.return_value = 'Cloning into X'\n\n clone(\n 'http://github.com/user/repo',\n to_dir,\n branch='alternate'\n )\n\n Git.execute.assert_called_with([\n 'git', 'clone', '--branch', 'alternate',\n 'http://github.com/user/repo', to_dir\n ])\n\n\nclass TestRemoteHasUpdates(JigTestCase):\n\n \"\"\"\n Git utils check if the active branch is older than the remote.\n\n \"\"\"\n def setUp(self):\n super(TestRemoteHasUpdates, self).setUp()\n\n repo, working_dir, diffs = self.repo_from_fixture('repo01')\n\n self.remote_repo = repo\n self.remote_workingdir = working_dir\n\n self.local_workingdir = mkdtemp()\n\n clone(self.remote_workingdir, self.local_workingdir)\n\n self.local_repo = Repo(self.local_workingdir)\n\n def tearDown(self):\n rmtree(self.local_workingdir)\n\n def test_no_updates(self):\n \"\"\"\n If the remote and local are the same, return False.\n \"\"\"\n self.assertFalse(remote_has_updates(self.local_workingdir))\n\n def test_has_updates(self):\n \"\"\"\n If the remote is newer than the local, returns True.\n \"\"\"\n # Wait a second so the date is different than our original commit\n sleep(1.0)\n\n self.commit(self.remote_workingdir, 'a.txt', 'aaa')\n\n self.assertTrue(remote_has_updates(self.local_workingdir))\n\n def test_handles_git_python_exceptions(self):\n \"\"\"\n If the fetch to retrieve new information results in an exception.\n \"\"\"\n with patch('jig.gitutils.remote.git') as git:\n git.Repo.side_effect = AttributeError\n\n self.assertTrue(remote_has_updates(self.local_workingdir))\n\n with patch('jig.gitutils.remote.git') as git:\n git.Repo.side_effect = GitCommandError(None, None)\n\n self.assertTrue(remote_has_updates(self.local_workingdir))\n\n with patch('jig.gitutils.remote.git') as git:\n git.Repo.side_effect = AssertionError\n\n self.assertTrue(remote_has_updates(self.local_workingdir))\n\n def test_has_updates_in_local(self):\n \"\"\"\n If the updates are in the local branch, return False.\n \"\"\"\n sleep(1.0)\n\n self.commit(self.local_workingdir, 'a.txt', 'aaa')\n\n self.assertFalse(remote_has_updates(self.local_workingdir))\n\n def test_remote_different_branch_has_updates(self):\n \"\"\"\n If the remote has a non-\"master\" branch as the default.\n \"\"\"\n # Create a new branch on the remote\n alternate = self.remote_repo.create_head('alternate')\n self.remote_repo.head.reference = alternate\n self.remote_repo.head.reset(index=True, working_tree=True)\n\n # Clone the remote branch locally\n self.local_workingdir = mkdtemp()\n clone(self.remote_workingdir, self.local_workingdir,\n branch='alternate')\n self.local_repo = Repo(self.local_workingdir)\n\n # If we check now, no new updates have been made\n self.assertFalse(remote_has_updates(self.local_workingdir))\n\n # Let the clock rollover\n sleep(1.0)\n\n # Commit to the 'alternate' branch on the remote\n self.commit(self.remote_workingdir, 'a.txt', 'aaa')\n\n self.assertTrue(remote_has_updates(self.local_workingdir))\n","sub_path":"src/jig/gitutils/tests/test_remote.py","file_name":"test_remote.py","file_ext":"py","file_size_in_byte":5510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"176433197","text":"class Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n if not grid:\n return 0\n count = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == \"1\":\n self.dfs(grid, i, j)\n count += 1\n return count\n \n def dfs(self, grid, i, j):\n if not 0<=i')\n context.bot.send_photo(chat_id=chat_id, photo=payload)\n else:\n logging.info(f'bot said:\\n')\n context.bot.send_document(chat_id=chat_id, document=payload)","sub_path":"utils/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"315370030","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 31 17:46:33 2019\n\n@author: badhe\n\"\"\"\n\nimport sys\n\nfilename = str(sys.argv[1])\ninfile = open(filename, \"r\")\n\nbase_out = \"FASTA/Prediction_source/\"\n\nlines = infile.readlines()\nitr = iter(lines)\ncount = 0\n\noutfile = None\n\nnextline = next(itr)\n\nwhile(1):\n if(nextline.startswith('>')):\n try:\n outfile.close()\n except AttributeError:\n pass\n count+=1\n outfile = open(base_out+str(count)+\".txt\", \"w+\")\n outfile.write(nextline)\n else:\n outfile.write(nextline)\n \n try:\n nextline = next(itr)\n except StopIteration:\n break\n\noutfile.close()\ninfile.close()\n\n","sub_path":"Prediction/FASTA_splitting.py","file_name":"FASTA_splitting.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"625804065","text":"class Solution:\n \"\"\"\n @param n: An integer\n @return: A list of strings.\n \"\"\"\n def fizzBuzz(self, n):\n i = 1\n trans_dict = {3: \"fizz buzz\", 2: \"fizz\", 1: \"buzz\"}\n out = []\n while i <= n:\n status = (i % 3 == 0) * 2 + (i % 5 == 0)\n out.append(trans_dict.get(status, str(i)))\n i += 1\n return out\n","sub_path":"lc0009_fizz_buzz.py","file_name":"lc0009_fizz_buzz.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"544743985","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# License: 3 Clause BSD\n# http://scikit-criteria.org/\n\n\n# =============================================================================\n# FUTURE\n# =============================================================================\n\nfrom __future__ import unicode_literals\n\n\n# =============================================================================\n# DOC\n# =============================================================================\n\n__doc__ = \"\"\"test moora methods\"\"\"\n\n\n# =============================================================================\n# IMPORTS\n# =============================================================================\n\nimport random\n\nfrom . import core\n\nfrom .. import moora\nfrom ..common import util\n\n\n# =============================================================================\n# BASE CLASS\n# =============================================================================\n\nclass MooraTest(core.SKCriteriaTestCase):\n\n def setUp(self):\n \"\"\"Data from Kracka et al, 2010 [KRACKA2010]_\n\n References\n ----------\n\n .. [KRACKA2010] KRACKA, M; BRAUERS, W. K. M.; ZAVADSKAS, E. K. Ranking\n Heating Losses in a Building by Applying the MULTIMOORA . -\n ISSN 1392 – 2785 Inzinerine Ekonomika-Engineering Economics, 2010,\n 21(4), 352-359.\n\n \"\"\"\n self.mtx = [\n [33.95, 23.78, 11.45, 39.97, 29.44, 167.10, 3.852],\n [38.9, 4.17, 6.32, 0.01, 4.29, 132.52, 25.184],\n [37.59, 9.36, 8.23, 4.35, 10.22, 136.71, 10.845],\n [30.44, 37.59, 13.91, 74.08, 45.10, 198.34, 2.186],\n [36.21, 14.79, 9.17, 17.77, 17.06, 148.3, 6.610],\n [37.8, 8.55, 7.97, 2.35, 9.25, 134.83, 11.935]\n ]\n self.criteria = [\n util.MIN, util.MIN, util.MIN, util.MIN,\n util.MAX, util.MIN, util.MAX\n ]\n self.rows = len(self.mtx)\n self.columns = len(self.mtx[0]) if self.rows else 0\n\n def test_ratio_with_weights(self):\n weights = [1, 1, 1, 1, 1, 1, 1]\n\n result = [5, 1, 3, 6, 4, 2]\n points = [-0.23206838, -0.03604841, -0.1209072,\n -0.31909074, -0.16956892, -0.11065173]\n\n rank_result, points_result = moora.ratio(\n self.mtx, self.criteria, weights\n )\n\n self.assertAllClose(points_result, points, atol=1.e-4)\n self.assertAllClose(rank_result, result)\n\n def test_ratio(self):\n result = [5, 1, 3, 6, 4, 2]\n points = [-1.6245, -0.2523, -0.8464, -2.2336, -1.1870, -0.7746]\n\n rank_result, points_result = moora.ratio(self.mtx, self.criteria)\n\n self.assertAllClose(points_result, points, atol=1.e-4)\n self.assertAllClose(rank_result, result)\n\n def test_refpoint(self):\n result = [4, 5, 1, 6, 2, 3]\n points = [0.6893, 0.6999, 0.5982, 0.8597, 0.6002, 0.6148]\n\n rank_result, points_result = moora.refpoint(self.mtx, self.criteria)\n\n self.assertAllClose(points_result, points, atol=1.e-3)\n self.assertAllClose(rank_result, result)\n\n def test_refpoint_with_weights(self):\n weights = [1, 1, 1, 1, 1, 1, 1]\n result = [4, 5, 1, 6, 2, 3]\n points = [0.09847, 0.0999, 0.0854, 0.1227, 0.0857, 0.0878]\n\n rank_result, points_result = moora.refpoint(\n self.mtx, self.criteria, weights\n )\n\n self.assertAllClose(points_result, points, atol=1.e-3)\n self.assertAllClose(rank_result, result)\n\n def test_fmf(self):\n result = [5, 1, 3, 6, 4, 2]\n points = [3.4343, 148689.356, 120.3441, 0.7882, 16.2917, 252.9155]\n\n rank_result, points_result = moora.fmf(self.mtx, self.criteria)\n\n self.assertAllClose(points_result, points, atol=1.e-4)\n self.assertAllClose(rank_result, result)\n\n # some zeroes\n zeros = set()\n while len(zeros) < 3:\n zero = (\n random.randint(0, self.rows-1),\n random.randint(0, self.columns-1)\n )\n zeros.add(zero)\n for row, column in zeros:\n self.mtx[row][column] = 0\n\n moora.fmf(self.mtx, self.criteria)\n\n def test_fmf_only_max(self):\n self.criteria = [util.MAX] * len(self.criteria)\n\n result = [2, 6, 4, 1, 3, 5]\n points = [0.0011, 2.411e-08, 3.135e-05, 0.0037, 0.0002, 1.48e-05]\n\n rank_result, points_result = moora.fmf(self.mtx, self.criteria)\n\n self.assertAllClose(points_result, points, atol=1.e-4)\n self.assertAllClose(rank_result, result)\n\n # some zeroes\n zeros = set()\n while len(zeros) < 3:\n zero = (\n random.randint(0, self.rows-1),\n random.randint(0, self.columns-1)\n )\n zeros.add(zero)\n for row, column in zeros:\n self.mtx[row][column] = 0\n\n moora.fmf(self.mtx, self.criteria)\n\n def test_fmf_only_min(self):\n self.criteria = [util.MIN] * len(self.criteria)\n\n result = [5, 1, 3, 6, 4, 2]\n points = [\n 869.5146, 41476540.2, 31897.0622, 264.0502, 4171.5128, 67566.8851\n ]\n\n rank_result, points_result = moora.fmf(self.mtx, self.criteria)\n\n self.assertAllClose(points_result, points, atol=1.e-4)\n self.assertAllClose(rank_result, result)\n\n # some zeroes\n zeros = set()\n while len(zeros) < 3:\n zero = (\n random.randint(0, self.rows-1),\n random.randint(0, self.columns-1)\n )\n zeros.add(zero)\n for row, column in zeros:\n self.mtx[row][column] = 0\n\n moora.fmf(self.mtx, self.criteria)\n\n def test_multimoora(self):\n result = [5, 1, 3, 6, 4, 2]\n mmora_mtx = [\n [5, 4, 5],\n [1, 5, 1],\n [3, 1, 3],\n [6, 6, 6],\n [4, 2, 4],\n [2, 3, 2]\n ]\n\n rank_result, mmora_mtx_result = moora.multimoora(\n self.mtx, self.criteria\n )\n\n self.assertAllClose(mmora_mtx_result, mmora_mtx, atol=1.e-4)\n self.assertAllClose(rank_result, result)\n","sub_path":"skcriteria/tests/test_moora.py","file_name":"test_moora.py","file_ext":"py","file_size_in_byte":6151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"83278768","text":"import json\nfrom Puzzle import Puzzle\nfrom Generator import*\nfrom Model import*\n\ndef createMoreSudoku(starting_board, emptied_board):\n empty_squares = rememberEmptySquares(emptied_board)\n starting_board = shuffle(starting_board)\n board = putBack(empty_squares, starting_board)\n return board\ndef putBack(empty_squares, board):\n for pair in empty_squares:\n board[pair[0]][pair[1]] = 0\n return board\ndef rememberEmptySquares(board):\n empty_squares = []\n for i in range(9):\n for j in range(9):\n if board[i][j] == 0:\n empty_squares.append((i,j))\n return empty_squares\n\ndef main():\n nSolutions = int(input(\"How many solutions should the puzzle have? \"))\n nEmpty = int(input(\"How many empty cells do you want? \"))\n if (nEmpty <= 45):\n print(\"Generating and saving initial puzzle...\")\n p = Puzzle(nSolutions, nEmpty)\n p.empty()\n\n filNam = str(p.puzzleID()[0]) + \"-\" + str(p.puzzleID()[1]) + \".txt\"\n out = open(filNam, 'a')\n\n json.dump(p.getPuzzle(), out)\n print(\"Now adding more puzzles...\")\n n = 0\n i = 1\n while(n < 50):\n print(\"Trying new puzzle \" + str(i))\n fullBoard = copyListOfLists(p.getOriginalBoard())\n emptyBoard = copyListOfLists(p.getPuzzle())\n board = createMoreSudoku(fullBoard, emptyBoard)\n F = createSudoku(board)\n if (exactly_n_models(F, p.puzzleID()[0])):\n json.dump(board, out)\n n+= 1\n print(\"Another puzzle added: \" + str(n) + \" puzzles this session out of \"\n\n + str(i) + \" tried\")\n i += 1\n \n else:\n print(\"Generating and saving 50 puzzles with your preferences...\")\n for i in range(50):\n print(\"Generating puzzle \" + str(i + 1) + \" of 50...\")\n p = Puzzle(nSolutions, nEmpty)\n p.empty()\n\n filNam = str(p.puzzleID()[0]) + \"-\" + str(p.puzzleID()[1]) + \".txt\"\n out = open(filNam, 'a')\n \n json.dump(p.getPuzzle(), out)\n \n\nmain()\n","sub_path":"9x9, 16x16, 25x25/Saver.py","file_name":"Saver.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"169095801","text":"def pattern(number):\n for i in range(0,number):\n for j in range(1,i+1):\n print(\"* \",end=\" \")\n print(\"\\r\")\n for i in range(number,0,-1):\n for j in range(0,i-1):\n print(\"* \",end=\" \")\n print(\"\\r\")\n \n\nnumber=int(input(\"enter the number :-\"))\npattern(number)\n ","sub_path":"Day02/pattern.py","file_name":"pattern.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"157385602","text":"import sys\nreader = (s.rstrip() for s in sys.stdin)\ninput = reader.__next__\n\n\nn = int(input())\narry = list(map(int,input().split()))\n\narry.sort()\n\nans = [0]*n\n\nif n%2:\n rangen = n//2 + 1\nelse:\n rangen = n//2\nfor i in range(rangen):\n if n//2 + i < n:\n ans[i*2] = arry[n//2 + i]\n if i * 2 + 1 < n:\n ans[i*2+1] = arry[i]\n\nprint((n-1)//2)\nprint(\" \".join([str(x) for x in ans]))\n\n#\"{} {} {}\".format(maxele,minele,minele)\n# 4 1 3 2 5 \n","sub_path":"round671/icephe.py","file_name":"icephe.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"635355458","text":"import numpy as np\nimport pandas as pd\nfrom moodscores.helper_functions import tokenizer\nfrom moodscores import _ROOT,get_data\n\nAROUSAL_DATA = get_data('Anew_arousal.txt')\nVALENCE_DATA = get_data('Anew_valence.txt')\nDOMINANCE_DATA = get_data('Anew_dominance.txt')\n\nclass Anew(object):\n\n def __init__(self):\n\n self.words, self.Arousal, self.Dominance, self.Valence = self.Setup()\n\n def Setup(self):\n\n with open(AROUSAL_DATA) as f:\n Arousal = [x.strip().split('\\t') for x in f.readlines()]\n Arousal = {x:float(y) for x,y in Arousal}\n\n with open(DOMINANCE_DATA) as f:\n Dominance = [x.strip().split('\\t') for x in f.readlines()]\n Dominance = {x:float(y) for x,y in Dominance}\n\n with open(VALENCE_DATA) as f:\n Valence = [x.strip().split('\\t') for x in f.readlines()]\n Valence = {x:float(y) for x,y in Valence}\n\n words = Arousal.keys()\n\n return words, Arousal, Dominance, Valence\n\n def Score(self,tweet, calculation_type):\n\n if calculation_type != 'Sum' and calculation_type != 'Average':\n raise ValueError(\"calculation_type return be 'Sum' or 'Average'\")\n\n total = 0\n results = {'Valence':0, 'Arousal':0, 'Dominance':0}\n tokenized_list = tokenizer(tweet)\n tokens_in_wordlist = []\n\n for word in tokenized_list:\n if word in self.words:\n tokens_in_wordlist.append(word)\n total += 1\n results['Valence'] += self.Valence[word]\n results['Arousal'] += self.Arousal[word]\n results['Dominance'] += self.Dominance[word]\n\n if total == 0: return [{'Valence':None, 'Arousal':None, 'Dominance':None}, tokens_in_wordlist]\n\n if calculation_type == 'Sum': return [results, tokens_in_wordlist]\n elif calculation_type == 'Average':\n results['Valence'] = results['Valence'] / total\n results['Arousal'] = results['Arousal'] / total\n results['Dominance'] = results['Dominance'] / total\n return [results, tokens_in_wordlist]\n\n","sub_path":"build/lib/moodscores/anew.py","file_name":"anew.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"459921140","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.product_home),\n url(r'^product_detail/(?P\\d+)/$', views.product_detail),\n url(r'^phone/$', views.phone),\n url(r'^payment_detail/(?P\\d+)/$', views.payment_detail),\n url(r'^test/$', views.test),\n url(r'^create_order/$', views.create_order),\n url(r'^thanks/$', views.thanks),\n url(r'^contacts/$', views.contacts),\n]\n","sub_path":"app/pay/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"285719877","text":"import os\nimport numpy as np\nfrom collections import OrderedDict\n\nfrom target import SampleRandomVector\nfrom srom import SROM, SROMSurrogate\nfrom postprocess import Postprocessor\n\n'''\nCompare piecewise linear SROM approximations to the EOL for m=5,10,20\nProduces Figure 5(b) in the paper\n'''\n\n#Target Monte Carlo input samples for comparison\ntargetsamples = \"mc_data/eol_samples_MC.txt\"\n\n#SElect 3 SROM sizes\nsromsizes = [5,10,20]\nsrom_dir = \"srom_data\"\n\n#Plotting specs:\nvarz = [r'EOL (Cycles)']\nxlimits = [[1.0e6, 2.0e6]]\n#xlimits = None\n#xlimits = [[9.e5, 2.0e6]]\nylimits = [[-0.01, 1.1]]\nxticks = [[ r'$1.0 \\times 10^6$','',r'$1.4 \\times 10^6$','',\n r'$1.8 \\times 10^6$','']]\n\nxaxispadding = 5\naxisfontsize = 28\nlabelfontsize = 24\nlegendfontsize = 24\ncdfylabel = True #Label y axis as \"CDF\"\nplot_dir = \"plots\"\nplot_suffix = \"SROM_pwlin_eol_CDF_m\"\nfor m in sromsizes:\n plot_suffix += \"_\" + str(m)\n\n#Load / initialize target random variable from samples:\nsamples = np.genfromtxt(targetsamples)\ntarget = SampleRandomVector(samples)\n\n#Build up sromsize-to-SROM object map for plotting routine\nsroms = OrderedDict()\n\nfor sromsize in sromsizes:\n\n #Get EOL SROM Surrogate samples to make SampleRandomVector representation of CDF\n eolsamplefile = \"srom_eol_samples_m\" + str(sromsize) + \".txt\"\n eolsamplefile = os.path.join(srom_dir, eolsamplefile)\n eolsamples = np.genfromtxt(eolsamplefile)\n\n sroms[sromsize] = SampleRandomVector(eolsamples)\n \nPostprocessor.compare_srom_CDFs(sroms, target, plotdir=\"plots\",\n plotsuffix=plot_suffix, variablenames=varz, xlimits=xlimits, ylimits=ylimits, xticks=xticks,\n cdfylabel=True, xaxispadding=xaxispadding,\n axisfontsize=axisfontsize,\n labelfontsize=labelfontsize,\n legendfontsize=legendfontsize)\n\n","sub_path":"examples/phm18/compare_pwlin_srom_eol_CDFs.py","file_name":"compare_pwlin_srom_eol_CDFs.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"558803129","text":"# (C) Datadog, Inc. 2018-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\nimport json\nimport os\nfrom io import StringIO\n\nimport click\n\nfrom ....fs import read_file, write_file\nfrom ...constants import get_agent_changelog, get_root\nfrom ..console import CONTEXT_SETTINGS, abort, echo_info\nfrom .common import get_changes_per_agent\n\n\n@click.command(\n context_settings=CONTEXT_SETTINGS,\n short_help=\"Provide a list of updated checks on a given Datadog Agent version, in changelog form\",\n)\n@click.option('--since', help=\"Initial Agent version\", default='6.3.0')\n@click.option('--to', help=\"Final Agent version\")\n@click.option(\n '--write', '-w', is_flag=True, help=\"Write to the changelog file, if omitted contents will be printed to stdout\"\n)\n@click.option('--force', '-f', is_flag=True, default=False, help=\"Replace an existing file\")\ndef changelog(since, to, write, force):\n \"\"\"\n Generates a markdown file containing the list of checks that changed for a\n given Agent release. Agent version numbers are derived inspecting tags on\n `integrations-core` so running this tool might provide unexpected results\n if the repo is not up to date with the Agent release process.\n\n If neither `--since` or `--to` are passed (the most common use case), the\n tool will generate the whole changelog since Agent version 6.3.0\n (before that point we don't have enough information to build the log).\n \"\"\"\n\n changes_per_agent = get_changes_per_agent(since, to)\n\n # store the changelog in memory\n changelog_contents = StringIO()\n\n # prepare the links\n agent_changelog_url = 'https://github.com/DataDog/datadog-agent/blob/master/CHANGELOG.rst#{}'\n check_changelog_url = 'https://github.com/DataDog/integrations-core/blob/master/{}/CHANGELOG.md'\n\n # go through all the agent releases\n for agent, version_changes in changes_per_agent.items():\n url = agent_changelog_url.format(agent.replace('.', '')) # Github removes dots from the anchor\n changelog_contents.write(f'## Datadog Agent version [{agent}]({url})\\n\\n')\n\n if not version_changes:\n changelog_contents.write('* There were no integration updates for this version of the Agent.\\n\\n')\n else:\n for name, ver in version_changes.items():\n # get the \"display name\" for the check\n manifest_file = os.path.join(get_root(), name, 'manifest.json')\n if os.path.exists(manifest_file):\n decoded = json.loads(read_file(manifest_file).strip())\n display_name = decoded.get('display_name')\n else:\n display_name = name\n\n breaking_notice = \" **BREAKING CHANGE**\" if ver[1] else \"\"\n changelog_url = check_changelog_url.format(name)\n changelog_contents.write(f'* {display_name} [{ver[0]}]({changelog_url}){breaking_notice}\\n')\n # add an extra line to separate the release block\n changelog_contents.write('\\n')\n\n # save the changelog on disk if --write was passed\n if write:\n dest = get_agent_changelog()\n # don't overwrite an existing file\n if os.path.exists(dest) and not force:\n msg = \"Output file {} already exists, run the command again with --force to overwrite\"\n abort(msg.format(dest))\n\n write_file(dest, changelog_contents.getvalue())\n else:\n echo_info(changelog_contents.getvalue())\n","sub_path":"datadog_checks_dev/datadog_checks/dev/tooling/commands/agent/changelog.py","file_name":"changelog.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"120728048","text":"import numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom scipy import stats\n\nmatplotlib.rcParams.update({'font.size': 20})\n\ndatapath = \"G:\\\\Dev\\\\Data\\\\Family Experiments\\\\\"\n\nvariables = [\"Mibig Fingerprint-Family\"]\ndata = []\n\nfor i in range(10):\n filepath = datapath + variables[0] + \" \" + str(i) + \".txt\"\n stats_one = np.loadtxt(filepath, dtype=float)\n print(stats_one[:, 2])\n data.append(stats_one[:, 2])\n\nfilepath_one = datapath + variables[0] + \" 0.txt\"\nstats_one = np.loadtxt(filepath_one, dtype=float)\nori_copy_one = stats_one\nfilter = np.where(stats_one[:,2]>0.5)\nstats_one = stats_one[filter]\n\n# filepath_two = datapath + variables[1] + \" 0.txt\"\n# stats_two = np.loadtxt(filepath_two, dtype=float)\n# ori_copy_two = stats_two\n# filter = np.where(stats_two[:,2]>0.5)\n# stats_two = stats_two[filter]\n#\n# filepath_three = datapath + variables[2] + \" 0.txt\"\n# stats_three = np.loadtxt(filepath_three, dtype=float)\n# ori_copy_three = stats_three\n# filter = np.where(stats_three[:,2]>0.5)\n# stats_three = stats_three[filter]\n#\n# filepath_four = datapath + variables[3] + \" 0.txt\"\n# stats_four = np.loadtxt(filepath_four, dtype=float)\n# ori_copy_four = stats_four\n# filter = np.where(stats_four[:,2]>0.5)\n# stats_four = stats_four[filter]\n#\n# filepath_five = datapath + variables[4] + \" 0.txt\"\n# stats_five = np.loadtxt(filepath_five, dtype=float)\n# filter = np.where(stats_five[:,2]>0.5)\n# stats_five = stats_five[filter]\n\nfig, ax = plt.subplots(figsize=(8, 4))\n\nn_bins = 1000\n\nn, bins, patches = ax.hist(stats_one[:,2], n_bins, normed=1, histtype ='step', cumulative=-1, label='Mibig Fingerprint-Family')\n\n# ax.hist(stats_two[:,2], n_bins, normed=1, histtype='step', cumulative=-1, label='10000')\n#\n# ax.hist(stats_three[:,2], n_bins, normed=1, histtype='step', cumulative=-1, label='1000_Triangle')\n#\n# ax.hist(stats_four[:,2], n_bins, normed=1, histtype='step', cumulative=-1, label='10000_Triangle')\n#\n# ax.hist(stats_five[:,2], n_bins, normed=1, histtype='step', cumulative=-1, label='60')\n\nax.grid(True)\nax.legend(loc='right')\nax.set_title('Cumulative AUC Scores')\nax.set_xlabel('AUC Score')\nax.set_ylabel('Proportion of Families')\n\n# print(stats.ttest_rel(ori_copy_one[:, 2], ori_copy_two[:, 2]))\n# print(stats.ttest_rel(ori_copy_one[:, 2], ori_copy_three[:, 2]))\n\nplt.show()\n\n\n\n","sub_path":"Code/Python/CompareBySubstructure.py","file_name":"CompareBySubstructure.py","file_ext":"py","file_size_in_byte":2321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"462104481","text":"#!/usr/bin/python\n\nimport glob\nimport MySQLdb\nimport os\n\nfrom config import mysql\nfrom errors import CreateTableError, InvalidDataTypeError\n\nDATA_PATH = '../data/'\nSPECS_PATH = '../specs/'\n\ndef get_data_specs(spec_file_name):\n '''Return a dictionary of table column specs and data file formatting.'''\n data_specs = []\n spec_file_path = SPECS_PATH + spec_file_name\n with open(spec_file_path) as f:\n first_line = f.readline()\n for line in f:\n specs = line.rstrip().split(',')\n if len(specs) == 3:\n data_specs.append(\n {'column_name': specs[0], 'width': specs[1], 'data_type': specs[2]})\n return data_specs\n\ndef get_create_table_statement(table_name, data_specs):\n '''Return string that creates a table when executed by MySQLdb.'''\n create_statement = 'CREATE TABLE ' + table_name + ' ('\n column_statements = []\n for column in data_specs:\n if column['data_type'] == 'TEXT':\n column_statement = column['column_name'] + ' ' + 'VARCHAR('+ column['width'] + ')'\n else:\n column_statement = column['column_name'] + ' ' + column['data_type']\n column_statements.append(column_statement)\n create_statement += ', '.join(column_statements)\n create_statement = create_statement + ', date VARCHAR(10)'\n create_statement += ');'\n return create_statement\n\ndef get_and_load_data():\n '''Retrieve specifications and use them to load data into db.'''\n sql_statements = []\n for spec_file in os.listdir(SPECS_PATH):\n if spec_file.endswith('.csv'):\n file_name = spec_file.rstrip('.csv')\n data_specs = get_data_specs(spec_file)\n create_table_statement = get_create_table_statement(file_name, data_specs)\n sql_statements.extend(data_file_parser(file_name, data_specs))\n load_data(file_name, create_table_statement, sql_statements)\n\ndef convert_to_data_type(value, data_type):\n '''Return the appropriate string to append to a sql statement.'''\n if data_type == 'TEXT':\n return '\"' + value.rstrip().lstrip() + '\"'\n elif data_type == 'INTEGER':\n try:\n int(value)\n return value.rstrip().lstrip()\n except ValueError as e:\n raise\n elif data_type == 'BOOLEAN':\n if value == '1' or value == '0':\n return value\n else:\n raise ValueError('Invalid value for type BOOLEAN: ' + value + '. Expected 1 or 0.')\n else:\n raise InvalidDataTypeError('Data file contains invalid data type: ' + data_type)\n\ndef get_insert_statement(data_line, data_specs, table_name, date):\n '''Return string that can insert into a table when executed by MySQLdb.'''\n column_names = []\n values = []\n index = 0\n for column in data_specs:\n column_names.append(column['column_name'])\n value = data_line[index:index+int(column['width'])]\n value = convert_to_data_type(value, column['data_type'])\n values.append(value)\n index += int(column['width'])\n column_names.append('date')\n values.append('\"' + date + '\"')\n insert_statement = 'INSERT INTO ' + table_name + ' (' + \\\n ', '.join(column_names) + ') VALUES (' + ', '.join(values) + ');'\n return insert_statement\n\ndef data_file_parser(specification_name, data_specs):\n '''Return a list of all the insert statement strings for data files of the same type.'''\n data_files_path = DATA_PATH + specification_name + '_*.txt'\n data_files = glob.glob(data_files_path)\n insert_statements = []\n for data_file in data_files:\n file_date = data_file.split('_')[-1].rstrip('.txt')\n with open(data_file) as f:\n for line in f:\n insert_statements.append(get_insert_statement(line, data_specs, specification_name, file_date))\n return insert_statements\n\ndef load_data(table_name, create_table_statement, insert_statements):\n '''Run all create table and insert sql statements to load data into db.'''\n db = MySQLdb.connect(mysql['host'], mysql['user'], mysql['password'], mysql['db'])\n cursor = db.cursor()\n try:\n cursor.execute('DROP TABLE IF EXISTS ' + table_name)\n cursor.execute(create_table_statement)\n except:\n raise CreateTableError('Unable to create table')\n try:\n for statement in insert_statements:\n cursor.execute(statement)\n db.commit()\n except:\n db.rollback()\n db.close()\n\ndef main():\n get_and_load_data()\n\nif __name__ == '__main__':\n main()\n","sub_path":"file_parser/file_parser.py","file_name":"file_parser.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"113995471","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/9/18 10:10 AM\n# @Author : zhongch4g\n# @Site : \n# @File : distance_of_string.py\n# @Software: IntelliJ IDEA\n\n\"\"\"\n\n两个字符串的距离定义为除去共同前缀后两字符串长度只和。给一个二元字符串列表,请给出距离最大的两个字符串的距离。\n\"\"\"\n\nclass TrieNode:\n def __init__(self, depth):\n # self.char = char\n self.left = None\n self.right = None\n self.isEnd = False\n self.depth = depth\n\n def get(self, c):\n if c == '1':\n return self.right\n else:\n return self.left\n\n def set(self, c):\n if c == '1':\n self.right = TrieNode(self.depth + 1)\n else:\n self.left = TrieNode(self.depth + 1)\n\n def getDepth(self):\n leftDepth = self.depth\n rightDepth = self.depth\n if self.left: leftDepth = self.left.getDepth()\n if self.right: rightDepth = self.right.getDepth()\n return max(leftDepth, rightDepth)\n\n def maxDistance(self):\n if self.right and self.left: return self.left.getDepth() + self.right.getDepth() - self.depth * 2\n if self.left: return self.left.maxDistance()\n if self.right: return self.right.maxDistance()\n return 0\n\n\nclass BinaryTrie:\n def __init__(self):\n self.root = TrieNode(0)\n\n def insert(self, s):\n node = self.root\n for c in s:\n cur = node.get(c)\n if not cur:\n node.set(c)\n node = node.get(c)\n node.isEnd = True\n\n def search(self, s):\n node = self.root\n for c in s:\n node = node.get(c)\n if not node: return False\n return node.isEnd\n\n\nclass Test:\n\n def setup(self, words):\n self.trie = BinaryTrie()\n for word in words:\n self.trie.insert(word)\n\n def test1(self):\n words = ['1011000', '10111101', '1100000']\n # words = ['abcqqq', 'abqqq', 'zww']\n self.setup(words)\n distance = self.trie.root.maxDistance()\n print('max_distance between words ', words, distance)\n\ntest = Test()\ntest.test1()\n\n\nclass Solution:\n def max_distance(self, words):\n trie = {'depth': 0}\n for word in words:\n cur_trie = trie\n for c in word:\n if c not in cur_trie:\n cur_trie[c] = {'depth': cur_trie['depth'] + 1}\n cur_trie = cur_trie[c]\n\n\n\n\nwords = ['1011000', '10111101', '1100000']\nsolution = Solution()\nsolution.max_distance(words)","sub_path":"goog/distance_of_string.py","file_name":"distance_of_string.py","file_ext":"py","file_size_in_byte":2585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"377721049","text":"import turtle\n\nwindow = turtle.Screen()\nkaplumbik = turtle.Turtle()\n\nd = 80\nh = 0\nkaplumbik.pensize(5)\n# kaplumbik.speed(1)\nwindow.bgcolor(\"lightgreen\")\n\nfor x in range(6):\n kaplumbik.setheading(h+60)\n for i in range(4):\n kaplumbik.forward(d)\n kaplumbik.right(90)\n\n for i in range(4):\n kaplumbik.left(90)\n kaplumbik.forward(d)\n\n for i in range(4):\n kaplumbik.forward(d)\n kaplumbik.left(90)\n\n for i in range(4):\n kaplumbik.right(90)\n kaplumbik.forward(d)","sub_path":"Python3/How to Think Like a Computer Scientist/4 - Functions/4.9.4 Exercises.py","file_name":"4.9.4 Exercises.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"481453862","text":"# encoding: utf-8\n\"\"\"\nGiven a binary tree, flatten it to a linked list in-place.\n\nFor example,\nGiven\n\n 1\n / \\\n 2 5\n / \\ \\\n 3 4 6\n\nThe flattened tree should look like:\n 1\n \\\n 2\n \\\n 3\n \\\n 4\n \\\n 5\n \\\n 6\n\n\"\"\"\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n\nclass Solution(object):\n def __init__(self):\n self.pre = None\n\n def flatten(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n \"\"\" reversed preorder traversal \"\"\"\n if not root:\n return\n self.flatten(root.right)\n self.flatten(root.left)\n root.right = self.pre\n root.left = None\n self.pre = root\n\n def flattenIteratively(self, root):\n cur = root\n while cur:\n if cur.left:\n pre = cur.left\n while pre.right:\n pre = pre.right\n pre.right = cur.right # 左边的尾巴的right等于右边的头\n cur.right = cur.left\n cur.left = None\n cur = cur.right\n\n\nif __name__ == '__main__':\n n1 = TreeNode(1)\n n2 = TreeNode(2)\n n3 = TreeNode(3)\n n4 = TreeNode(4)\n n5 = TreeNode(5)\n n6 = TreeNode(6)\n\n n1.left = n2\n # n1.right = n5\n n2.left = n3\n # n2.right = n4\n # n5.right = n6\n\n head = Solution().flatten(n1)\n","sub_path":"Leetcode-Python/FlattenBinaryTreeToLinkedList.py","file_name":"FlattenBinaryTreeToLinkedList.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"625080443","text":"#Machine Learning Versuch 6\r\n#Aufgabe D3\r\n#Autor: Lukas Götz\r\n#Datum: 13.10.2018\r\n\r\n\r\n#-----------------------------------------------------------------------------------------\r\n#Anmerkung: 100% korrekt ist der Versuch in Matlab hinterlegt\r\n#G:\\Semester_6\\ML\\ML_Klausur\\V2_\r\n#D2_Vorlage.m\r\n#-----------------------------------------------------------------------------------------\r\n\r\n\r\n\r\n# Bibliotheken importieren ---------------------------------------------------------------\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Daten importieren und Variablen extrahieren\r\ndf = pd.read_excel('Autos_DE.xlsx')\r\nDATA = df.values\r\ny = np.array(DATA[2:,0], dtype='float') #Verbrauch\r\nx1 = np.array(DATA[2:,1], dtype='float') #Zylinderanzahl\r\nx2 = np.array(DATA[2:,2], dtype='float') #Hubraum\r\nx3 = np.array(DATA[2:,3], dtype='float') #Leistung\r\nx4 = np.array(DATA[2:,4], dtype='float') #Gewicht\r\nx5 = np.array(DATA[2:,5], dtype='float') #Beschleunigung\r\nx6 = np.array(DATA[2:,6], dtype='float') #Baujahr\r\nm = y.shape[0]\r\nx0 = np.array(np.ones(m), dtype='float') #Hilfsvariable bestehend aus Einsen\r\nX = np.stack((x0,x1,x2,x3,x4,x5,x6)).T #Datenmatrix X\r\n#-----------------------------------------------------------------------------------------\r\n\r\n\r\n# Berechnung der Regressionsparameter für alle Eingansvariablen---------------------------\r\nRxx = np.dot(X.T,X) #Korrelationsmatrix\r\nbeta = np.dot(np.dot(np.linalg.inv(Rxx),X.T),y) #beta-Vektor\r\nprint('Parametervektor (mit allen Eingangsvariablen) beta=', beta)\r\n#-----------------------------------------------------------------------------------------\r\n\r\n\r\n\r\n# Scatterplot der Daten Hubraum-----------------------------------------------------------\r\nplt.figure(1)\r\nplt.scatter(x2,y,s=3,c='red')\r\nplt.grid(True)\r\nplt.title('Scatterplot')\r\nplt.xlabel('Hubraum x2 in Liter')\r\nplt.ylabel('Verbrauch in Liter/100km')\r\nplt.show(block=False) #block=False: Skript laeuft weiter\r\n\r\n#Berechnung der Regressionsparameter Hubraum einzeln\t\t\r\nx0 = np.array(np.ones(m), dtype='float') #Hilfsvariable bestehend aus Einsen\t\r\nX2 = np.stack((x0,x2)).T #Datenmatrix X\r\nRxx2 = np.dot(X2.T,X2) #Korrelationsmatrix\r\nbeta2 = np.dot(np.dot(np.linalg.inv(Rxx2),X2.T),y)\r\nprint(\"Parametervektor (für Hubraum) beta2= \", beta2)\r\n\r\n#Graphische Darstellung der Regressionsgerade Hubraum\r\nx_2 = np.linspace(np.min(x2),np.max(x2),200) #Hilfsvariable x_\r\ny_2 = beta2[0]+beta2[1]*x_2 #Hilfsvariable y_\r\nplt.plot(x_2,y_2,lw=2)\r\nplt.grid(True)\r\nplt.title('Datenpunkte mit Regressionsgerade')\r\nplt.xlabel('Hubraum x2 in Liter')\r\nplt.ylabel('Verbrauch in Liter/100km')\r\nplt.show(block=False)\r\n\r\n# Scatterplot der Daten Gewicht\r\nplt.figure(2)\r\nplt.scatter(x4,y,s=3,c='red')\r\nplt.grid(True)\r\nplt.title('Scatterplot')\r\nplt.xlabel('Gewicht x3 in kg')\r\nplt.ylabel('Verbrauch in Liter/100km')\r\nplt.show(block=False) #block=False: Skript laeuft weiter\r\n\r\n#Berechnung der Regressionsparameter Gewicht einzeln\t\t\r\nx0 = np.array(np.ones(m), dtype='float') #Hilfsvariable bestehend aus Einsen\t\r\nX4 = np.stack((x0,x4)).T #Datenmatrix X\r\nRxx4 = np.dot(X4.T,X4) #Korrelationsmatrix\r\nbeta4 = np.dot(np.dot(np.linalg.inv(Rxx4),X4.T),y)\r\nprint(\"Parametervektor (für Gewicht) beta4= \", beta4)\r\n\r\n#Graphische Darstellung der Regressionsgerade Gewicht\r\nx_4 =np.linspace(np.min(x4),np.max(x4),200) #Hilfsvariable x_\r\ny_4 = beta4[0]+beta4[1]*x_4 #Hilfsvariable y_\r\nplt.plot(x_4,y_4,lw=2)\r\nplt.grid(True)\r\nplt.title('Datenpunkte mit Regressionsgerade')\r\nplt.xlabel('Gewicht x3 in kg')\r\nplt.ylabel('Verbrauch in Liter/100km')\r\nplt.show(block=False)\r\n\r\nplt.show() #Verhindert, dass Grafikfenster sofort geschlossen wird\r\n#-----------------------------------------------------------------------------------------\r\n","sub_path":"D3.py","file_name":"D3.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"556653091","text":"import lp\nimport datetime\nimport conn_str\nimport psycopg2\nimport random\n\n\n\n\nCC_q_filename = \"CC_queries_all_10_good.txt\"\nV1_CC_filename = \"V1_CC_dict_all_10_good.txt\"\noutput_filename = \"details.txt\"\n\nlp.main(CC_q_filename, V1_CC_filename, output_filename)\n\n\n\n\nio_stats = open(output_filename, \"a\")\nio_stats.write(\"\\n\\n\\tPartition people and households by PUMA10-TEN ---- start time %s\\n\" %(datetime.datetime.now()))\n'''------------------------------------------------------------------------------------------------------------------------\n h_id UNKNOWN for our problem\n------------------------------------------------------------------------------------------------------------------------'''\n# CREATE A DICTIONARY THAT PARTITIONS PEOPLE INTO PUMAs ON THE BASIS OF V1\nl_ppl_in_diff_puma_ten = {}\ntry:\n connect_str = conn_str.get_conn_str()\n conn = psycopg2.connect(connect_str)\n cursor = conn.cursor()\n\n cursor.execute(\"SELECT * FROM V1\")\n rows = cursor.fetchall()\n for i in range(len(rows)):\n p_id = rows[i][0]\n ten = rows[i][5]\n puma = rows[i][4]\n if (puma, ten) not in l_ppl_in_diff_puma_ten:\n l_ppl_in_diff_puma_ten[(puma, ten)] = [p_id]\n else:\n l_ppl_in_diff_puma_ten[(puma, ten)].append(p_id)\n\n conn.commit()\n cursor.close()\n conn.close()\nexcept Exception as e:\n print(\"Uh oh, can't connect. Invalid dbname, user or password?\")\n print(e)\n\n\n# CREATE A DICTIONARY THAT PARTITIONS HHs INTO PUMAs ON THE BASIS OF h\nl_hhs_in_diff_puma_ten = {}\ntry:\n connect_str = conn_str.get_conn_str()\n conn = psycopg2.connect(connect_str)\n cursor = conn.cursor()\n\n cursor.execute(\"SELECT * FROM V2\")\n rows = cursor.fetchall()\n for i in range(len(rows)):\n h_id = rows[i][0]\n puma = rows[i][1] # idx 1 in V2(h_id, PUMA10, TEN) but 3 in h(h_id, TEN, ST, PUMA10)\n ten = int(rows[i][2])\n if (puma, ten) not in l_hhs_in_diff_puma_ten:\n l_hhs_in_diff_puma_ten[(puma, ten)] = [h_id]\n else:\n l_hhs_in_diff_puma_ten[(puma, ten)].append(h_id)\n conn.commit()\n cursor.close()\n conn.close()\nexcept Exception as e:\n print(\"Uh oh, can't connect. Invalid dbname, user or password?\")\n print(e)\nio_stats.write(\"\\tPartition people and households by PUMA10-TEN ---- end time %s\\n\" %(datetime.datetime.now()))\n'''---------------------------------------------------------------------------------------------------------------------'''\n\n\n'''------------------------------------------------------------------------------------------------------------------------\n RANDOM h_id ASSIGNMENT FROM CANDIDATE FK VALUES\n------------------------------------------------------------------------------------------------------------------------'''\nlp.save_time(datetime.datetime.now())\n\nh_new_l = -1\nh_new_u = -1\n\nfor (puma, ten), l_ppl in l_ppl_in_diff_puma_ten.items():\n candidate_fk = []\n if puma == -1:\n continue\n if (puma, ten) in l_hhs_in_diff_puma_ten:\n candidate_fk = l_hhs_in_diff_puma_ten[(puma, ten)]\n else:\n h_new_l -= len(l_ppl)\n candidate_fk = [i for i in range(h_new_l, h_new_u)]\n h_new_u = h_new_l\n \n io_stats.write(\"\\n\\n\\n\\n---------------------------PUMA10 = %3d - TEN = %1d---------------------------\\n\" %(puma, ten))\n #io_stats.write(\"h_id candidates = %s\\n\" %(candidate_fk))\n io_stats.write(\"Number of people = %s\\n\" %(len(l_ppl)))\n \n io_stats.write(\"\\tUpdate p_with_hid ---- start time %s\\n\" %(datetime.datetime.now()))\n '''------------------------------------------------------------------------------------------------------------------------\n Update h_id value in p_with_hid for people in this PUMA10\n ------------------------------------------------------------------------------------------------------------------------'''\n try:\n connect_str = conn_str.get_conn_str()\n conn = psycopg2.connect(connect_str)\n cursor = conn.cursor()\n \n # Assign values to h_id in relation p_with_hid\n for i in l_ppl:\n fk = random.choice(candidate_fk)\n cursor.execute(\"UPDATE p_with_hid SET h_id=\" + str(fk) + \" WHERE p_id=\" + str(i))\n # If need a new home, add corresponding row to V_2\n cursor.execute(\"SELECT COUNT(*) FROM V2 WHERE h_id=\" + str(fk))\n if cursor.fetchone()[0] == 0:\n cursor.execute(\"INSERT INTO V2(h_id, PUMA10, TEN) VALUES(\" + str(fk) + \",\" + str(puma) + \",\" + str(ten) + \")\")\n conn.commit()\n cursor.close()\n conn.close()\n except Exception as e:\n print(\"Uh oh, can't connect. Invalid dbname, user or password?\")\n print(e)\n io_stats.write(\"\\tUpdate p_with_hid ---- end time %s\" %(datetime.datetime.now()))\n io_stats.write(\"\\n----------------------------------------------------------------------------\\n\")\nlp.save_time(datetime.datetime.now())\n\n\n\n\n\n\n\n\n'''------------------------------------------------------------------------------------------------------------------------\n Compute Stats (for coloring)\n------------------------------------------------------------------------------------------------------------------------'''\nerr = 0\ntry:\n connect_str = conn_str.get_conn_str()\n conn = psycopg2.connect(connect_str)\n cursor = conn.cursor()\n \n # CHECK CC VIOLATIONS\n io_stats.write(\"\\n\\n\\n\\nCCs with target counts minus counts in solution\\n\")\n q = open(CC_q_filename, \"r\")\n for line in q:\n k_v = line.rstrip().split(\":\")\n pred = k_v[0]\n target_count = int(k_v[1])\n\n cursor.execute(\"SELECT COUNT(*) FROM p_with_hid NATURAL JOIN V2 WHERE \" + pred)\n ans = cursor.fetchone()[0]\n\n io_stats.write(\"%4d - %4d = %4d \\t%s\\n\" %(target_count, ans, target_count - ans, pred))\n err += abs(target_count - ans)\n q.close()\n\n # CHECK DC VIOLATIONS\n io_stats.write(\"\\nDCs with number of pairs of (tuple) violations\\n\")\n q = open(\"./DC_queries.txt\", \"r\")\n DC_err = 0\n for line in q:\n k_v = line.rstrip().split(\":\")\n cursor.execute(k_v[0])\n ans = cursor.fetchone()[0]\n DC_err += ans\n io_stats.write(str(ans) + \"\\t\" + k_v[1] + \"\\n\")\n q.close()\n io_stats.write(\"\\n\")\n\n conn.commit()\n cursor.close()\n conn.close()\nexcept Exception as e:\n print(\"Uh oh, can't connect. Invalid dbname, user or password?\")\n print(e)\n\n\n\n\n'''---------------------------------------------------------------------------------------------------------------------'''\nlp.save_tot_L1_err(\"%s (DC)\\n\" %(DC_err))\nlp.save_tot_L1_err(\"%s\\n----------------------------------------\\n\" %(err))\nio_stats.write(\"\\n\\n\\n\\n\\n\")\nio_stats.close()\n","sub_path":"baseline_with_marginals/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"205024120","text":"from odoo import models, fields, api\n\nclass MrpReportProduction(models.Model):\n _name = 'mrp.report.production'\n\n @api.depends('production_lines')\n def _compute_suma(self):\n total = 0\n for mrp in self:\n for line in mrp.production_lines:\n total += line.total\n mrp.subtotal = total\n\n @api.depends('production_lines')\n def _compute_piezas(self):\n total = 0\n for mrp in self:\n for line in mrp.production_lines:\n total += line.qty\n mrp.piezass = total\n name = fields.Many2one('hr.employee', string=\"Empleado\", readonly=True)\n production_lines = fields.One2many('mrp.report.production.line', 'production_id', string='Table lines')\n date_start = fields.Date('Fecha Inicio',readonly=True)\n date_finish = fields.Date('Fecha Fin', readonly=True)\n piezass = fields.Integer('Piezas', compute=\"_compute_piezas\", store=True)\n subtotal = fields.Float('Subtotal', compute=\"_compute_suma\", store=True)\n state = fields.Selection([\n ('draft', 'Borrador'),\n ('done', 'Confirmado'),\n ('pay', 'Pagado'),\n ], string='Estatus', readonly=True, copy=False, index=True, track_visibility='onchange', default='draft')\n\n @api.multi\n def action_confirm(self):\n for order in self:\n order.state = 'done'\n for p in self.production_lines:\n p.state = 'done'\n\n @api.multi\n def action_pay(self):\n for order in self:\n order.state = 'pay'\n for p in self.production_lines:\n p.state = 'pay'\n\n @api.multi\n def action_cancel(self):\n for order in self:\n order.state = 'draft'\n for p in self.production_lines:\n p.state = 'draft'\n\n\n\n\nclass MrpReportProductionLine(models.Model):\n _name = 'mrp.report.production.line'\n state = fields.Selection([\n ('draft', 'Borrador'),\n ('done', 'Confirmado'),\n ('pay', 'Pagado'),\n ], string='Estatus', readonly=True, copy=False, index=True, track_visibility='onchange', default='draft')\n production_id = fields.Many2one('mrp.report.production', string='reporte de Produccion Reference', required=True, ondelete='cascade',\n index=True, copy=False)\n mrp_production_id = fields.Many2one('mrp.production', string=\"Orden de Produccion\", readonly=False)\n operation_id = fields.Many2one('mrp.workorder', string=\"Operacion\", readonly=False)\n qty= fields.Integer('Piezas', readonly=False)\n precio_unit = fields.Float('Precio', readonly=False)\n total = fields.Float('Total', readonly=False)\n","sub_path":"bibo/models/mrp_report_production.py","file_name":"mrp_report_production.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"513151467","text":"from keras.models import Sequential\nfrom keras.utils import np_utils\nfrom keras.layers.core import Dense, Activation, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D, ZeroPadding2D, BatchNormalization\nfrom keras import regularizers, callbacks, optimizers\nfrom keras.optimizers import SGD\nfrom keras import backend as K\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.regularizers import l2\nimport matplotlib.pyplot as plt\nimport csv\nimport os\n# import matplotlib.pyplot as plt\n# dupa\nimport pandas as pd\nimport numpy as np\n\nlr = 0.003\nmomentum = 0.95\ndecay = 1e-6\nnesterov=False\nbatch_size = 1024\nepochs = 400\nvalidation_split=0.3\ndense_regularization = True\ndense_1_reg_l2 = 0.0003\ndebse_2_reg_l2 = 0.0003\nother = \"dense_sigmoid\"\n\n# Read data\nx_train = pd.read_csv('input/train_new.csv')\nprint(x_train.shape)\nlabels = x_train.ix[:,0].values.astype('int32')\nx_train = (x_train.ix[:,1:].values).astype('float32')\nx_test = (pd.read_csv('input/test.csv').values).astype('float32')\nimg_rows, img_cols = 28, 28\n\n# X_train = np.reshape(X_train_,(-1,28,28))\n\n# X_test = np.reshape(X_test_,(-1,28,28))\n\n\nif K.image_data_format() == 'channels_first':\n X_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n X_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\nelse:\n X_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n X_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\ninput_shape = (img_rows, img_cols, 1)\n\n\n\n\n# convert list of labels to binary class matrix\ny_train = np_utils.to_categorical(labels) \n\n# pre-processing: divide by max and substract mean\nscale = np.max(X_train)\nX_train /= scale\nX_test /= scale\n\nmean = np.std(X_train)\nX_train -= mean\nX_test -= mean\n\ninput_dim = X_train.shape[:]\nnb_classes = y_train.shape[1]\n# \n\nmodel = Sequential()\nmodel.add(Conv2D(30, (5, 5), input_shape=(28, 28, 1), activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(60, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(BatchNormalization())\nmodel.add(Dropout(0.2))\n\nif dense_regularization:\n #dense 1\n model.add(Flatten())\n model.add(Dense(128, activation='relu', kernel_regularizer = l2(dense_1_reg_l2)))\n model.add(BatchNormalization())\n model.add(Dropout(0.25))\n \n #dense 2\n model.add(Dense(64, activation='relu', kernel_regularizer = l2(debse_2_reg_l2)))\n model.add(BatchNormalization())\n model.add(Dropout(0.25))\n model.add(Dense(nb_classes, activation='softmax'))\nelse:\n model.add(Flatten())\n model.add(Dense(128, activation='sigmoid'))\n model.add(BatchNormalization())\n model.add(Dropout(0.25))\n \n model.add(Dense(64, activation='sigmoid'))\n model.add(BatchNormalization())\n model.add(Dropout(0.25))\n model.add(Dense(nb_classes, activation='softmax'))\n\n\n# initiate RMSprop optimizer\nsgd = SGD(lr=lr, momentum=momentum, decay=decay, nesterov=nesterov)\n\nif dense_regularization:\n filename = \"sgd_lr=\" + str(lr) + \"_momentum=\" + str(momentum) + \"_deay=\" + str(decay) + \"_nesterov=\" + str(nesterov) + \"_batch_size=\" + str(batch_size) + \"_epochs\" + str(epochs) + \"_debse_1_reg_l2=\" + str(debse_2_reg_l2 ) + \"_debse_2_reg_l2_\" + str(debse_2_reg_l2) + \"_\" + other + \" \"\nelse:\n filename = \"sgd_lr=\" + str(lr) + \"_momentum=\" + str(momentum) + \"_deay=\" + str(decay) + \"_nesterov=\" + str(nesterov) + \"_batch_size=\" + str(batch_size) + \"_epochs\" + str(epochs) + \"_noreg_\"\n\nos.mkdir(filename)\n\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])\n \nprint(\"Training...\")\n\ncheckpointfilename = filename + \"/weights-improvement-{epoch:02d}-{val_acc:.5f}_\"+filename+\".hdf5\"\n\n# plotter = Plotter()\ncheckpoint = ModelCheckpoint(checkpointfilename, monitor='val_acc', verbose=1, save_best_only=True, mode='max')\ncallbacks_list = [checkpoint]\n\nhist = model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=validation_split, verbose = 2 , callbacks=callbacks_list)\n\nkeys = sorted(hist.history.keys())\nwith open(filename + \"/history_\" + filename + \".csv\", \"w\") as outfile:\n writer = csv.writer(outfile, delimiter = \"\\t\")\n writer.writerow(keys)\n writer.writerows(zip(*[hist.history[key] for key in keys]))\n\n\n# summarize history for accuracy\nf = plt.figure(1)\nplt.plot(hist.history['acc'])\nplt.plot(hist.history['val_acc'])\nplt.title('model accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\n# f.show()\nf.savefig(filename + \"/accuracy_\"+ filename + \".jpg\")\n\n\n# summarize history for loss\ng = plt.figure(2)\nplt.plot(hist.history['loss'])\nplt.plot(hist.history['val_loss'])\nplt.title('model loss')\nplt.ylabel('loss')\nplt.xlabel('epoch')\nplt.legend(['train', 'test'], loc='upper left')\n# g.show()\ng.savefig(filename + \"/loss_\" + filename + \".jpg\")\n \nprint(\"Generating test predictions...\")\npreds = model.predict_classes(X_test, verbose=2)\n \ndef write_preds(preds, fname):\n pd.DataFrame({\"ImageId\": list(range(1,len(preds)+1)), \"Label\": preds}).to_csv(fname, index=False, header=True)\n \nwrite_preds(preds, filename + \"/keras-mlp_\" + filename + \".csv\")\nprint(\"Done!\")\n\n","sub_path":"recognizer.py","file_name":"recognizer.py","file_ext":"py","file_size_in_byte":5237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"36836407","text":"import numpy as np\n\n\ndef honest_rankings(utilities):\n \"\"\"\n Convert utilities into rankings using honest strategy\n\n Parameters\n ----------\n utilities : array_like\n A 2D collection of utilities.\n\n Rows represent voters, and columns represent candidate IDs.\n Higher utility numbers mean greater approval of that candidate by that\n voter.\n\n Returns\n -------\n election : array_like\n A collection of ranked ballots.\n Rows represent voters and columns represent rankings, from best to\n worst, with no tied rankings.\n Each cell contains the ID number of a candidate, starting at 0.\n\n For example, if a voter ranks Curie > Avogadro > Bohr, the ballot line\n would read ``[2, 0, 1]`` (with IDs in alphabetical order).\n\n Examples\n --------\n Generate an election with 4 voters and 3 candidates:\n\n >>> random_utilities(4, 3)\n array([[0.805, 0.759, 0.969],\n [0.392, 0.416, 0.898],\n [0.008, 0.702, 0.107],\n [0.663, 0.575, 0.174]])\n\n Here, Voter 2 prefers Candidate 1, then 2, then 0, as we can see when\n converted to rankings:\n\n >>> utilities = array([[0.805, 0.759, 0.969],\n [0.392, 0.416, 0.898],\n [0.008, 0.702, 0.107],\n [0.663, 0.575, 0.174]])\n >>> rankings_from_utilities(utilities)\n array([[2, 0, 1],\n [2, 1, 0],\n [1, 2, 0],\n [0, 1, 2]], dtype=uint8)\n \"\"\"\n # 256 candidates is plenty for real elections, so we'll limit it there and\n # use uint8 to save memory.\n n_cands = utilities.shape[1]\n if n_cands > 256:\n raise ValueError('Maximum number of candidates is 256')\n\n # Higher utilities for a voter are ranked first (earlier in row)\n return np.argsort(utilities)[:, ::-1].astype(np.uint8)\n\n\ndef approval_optimal(utilities):\n \"\"\"\n Convert utilities to optimal approval voting ballots\n\n Given a set of utilities for each voter-candidate pair, each voter is\n modeled as maximizing their expected utility, by approving any candidate\n that exceeds their mean utility over all candidates.[1]_\n\n Parameters\n ----------\n utilities : array_like\n A 2D collection of utilities.\n\n Rows represent voters, and columns represent candidate IDs.\n Higher utility numbers mean greater approval of that candidate by that\n voter.\n\n Returns\n -------\n election : ndarray\n A 2D collection of approval ballots.\n\n Rows represent voters, and columns represent candidate IDs.\n A cell contains 1 if that voter approves of that candidate,\n otherwise 0.\n\n References\n ----------\n .. [1] S. Merrill III, \"A Comparison of Efficiency of Multicandidate\n Electoral Systems\", American Journal of Political Science, vol. 28,\n no. 1, p. 26, 1984. :doi:`10.2307/2110786`\n\n Examples\n --------\n Voter 0 loves Candidates A (index 0) and B (index 1), but hates C (2).\n Voter 1 dislikes A, likes B, and loves C.\n Voter 2 hates A, and is lukewarm about B and C.\n\n >>> utilities = [[1.0, 1.0, 0.0],\n [0.1, 0.8, 1.0],\n [0.0, 0.5, 0.5],\n ]\n\n Each voter optimally chooses their approval threshold based on their mean\n utility:\n Voter 0 approves A and B.\n Voter 1 approves B and C.\n Voter 2 approves B and C.\n\n >>> approval_optimal(utilities)\n array([[1, 1, 0],\n [0, 1, 1],\n [0, 1, 1]], dtype=uint8)\n\n \"\"\"\n means = np.mean(utilities, 1)\n approvals = (utilities > means[:, np.newaxis]).astype(np.uint8)\n return approvals\n","sub_path":"elsim/strategies/strategies.py","file_name":"strategies.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"31812912","text":"\"\"\"\nYou are given a 2D integer array logs where each logs[i] = [birthi, deathi] indicates the birth and death years of the ith person.\nThe population of some year x is the number of people alive during that year. The ith person is counted in year x's population if x is in the inclusive range [birthi, deathi - 1]. Note that the person is not counted in the year that they die.\nReturn the earliest year with the maximum population.\n\nExample 1:\nInput: logs = [[1993,1999],[2000,2010]]\nOutput: 1993\nExplanation: The maximum population is 1, and 1993 is the earliest year with this population.\nExample 2:\n\nInput: logs = [[1950,1961],[1960,1971],[1970,1981]]\nOutput: 1960\nExplanation:\nThe maximum population is 2, and it had happened in years 1960 and 1970.\nThe earlier year between them is 1960.\n\nConstraints:\n1 <= logs.length <= 100\n1950 <= birthi < deathi <= 2050\n\"\"\"\n\nclass Solution:\n def maximumPopulation(self, logs: list[list[int]]) -> int:\n dates = []\n for b, d in logs:\n dates.append((b, 1))\n dates.append((d, -1))\n\n dates.sort()\n print(dates)\n\n population = max_year = max_population = 0\n for year, count in dates:\n population += count\n\n if population > max_population:\n max_year = year\n max_population = population\n\n return max_year\n\n\nSolution().maximumPopulation([[1993,1999],[2000,2010]])\nSolution().maximumPopulation([[1950,1961],[1960,1971],[1970,1981]])\n","sub_path":"Leetcode questions and answers/maximum_population_year.py","file_name":"maximum_population_year.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"517861116","text":"from app.utility.base_world import BaseWorld\n\n\nname = 'Acquire Cron Jobs Baseline'\nchallenge = 'Run the appropriate setup ability in the \\'Blue Manual\\' operation to acquire a list of currently ' \\\n 'existing cron jobs on the *nix machine.'\nextra_info = \"\"\"\"\"\"\n\n\nasync def verify(services):\n for op in await services.get('data_svc').locate('operations',\n match=dict(access=BaseWorld.Access.BLUE, name='Blue Manual')):\n if cron_jobs_baseline_found(op):\n return True\n return False\n\n\ndef cron_jobs_baseline_found(op):\n return '' in [link.ability.ability_id for link in op.chain if link.finish]\n","sub_path":"app/flags/manual/blue_4a_nix.py","file_name":"blue_4a_nix.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"467930038","text":"\"\"\"\nA module to get information pending parcels.\n\nThis code is released under the terms of the MIT license. See the LICENSE\nfile for more details.\n\"\"\"\nimport json\nimport requests\n\n\nclass AfterShip:\n \"\"\"This class is used to get parcel information from Aftership.\"\"\"\n\n BASE_URL = 'https://api.aftership.com/v4'\n\n def __init__(self):\n \"\"\"Initialize.\"\"\"\n\n def get_trackings(self, api_key):\n \"\"\"Get tracking information.\"\"\"\n tracking_info = {}\n header = {'aftership-api-key': api_key,\n 'Content-Type': 'application/json'}\n fetchurl = self.BASE_URL + '/trackings'\n try:\n tracking_request = requests.get(fetchurl,\n timeout=8,\n headers=header).json()['data']\n tracking_info = {'success': True, 'data': tracking_request}\n except ConnectionError:\n tracking_info = {'success': False}\n return tracking_info\n\n def add_tracking(self, api_key, slug, title, tracking_number):\n \"\"\"Add tracking information.\"\"\"\n tracking_info = {}\n header = {'aftership-api-key': api_key,\n 'Content-Type': 'application/json'}\n url = self.BASE_URL + '/trackings'\n tracking = {\n 'tracking': {\n 'slug': slug,\n 'tracking_number': tracking_number,\n 'title': title,\n }\n }\n try:\n requests.post(url,\n timeout=8,\n data=json.dumps(tracking),\n headers=header)\n tracking_info = {'success': True}\n except ConnectionError:\n tracking_info = {'success': False}\n return tracking_info\n\n def delete_tracking(self, api_key, slug, tracking_number):\n \"\"\"Delete tracking information.\"\"\"\n tracking_info = {}\n header = {'aftership-api-key': api_key,\n 'Content-Type': 'application/json'}\n url = self.BASE_URL + '/trackings/' + slug + '/' + tracking_number\n try:\n requests.delete(url, timeout=8, headers=header)\n tracking_info = {'success': True}\n except ConnectionError:\n tracking_info = {'success': False}\n return tracking_info\n","sub_path":"pyaftership/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"34489384","text":"\"\"\"\nProtocol level tests.\n\"\"\"\nimport asyncio\n\nimport pytest\n\nfrom aiosmtplib import SMTPResponseException, SMTPServerDisconnected, SMTPTimeoutError\nfrom aiosmtplib.protocol import SMTPProtocol\n\n\npytestmark = pytest.mark.asyncio()\n\n\nclass EchoServerProtocol(asyncio.Protocol):\n def connection_made(self, transport):\n self.transport = transport\n\n def data_received(self, data):\n self.transport.write(data)\n\n\n@pytest.fixture(scope=\"function\")\ndef echo_server(request, hostname, port, event_loop):\n server = event_loop.run_until_complete(\n event_loop.create_server(EchoServerProtocol, host=hostname, port=port)\n )\n\n def close_server():\n server.close()\n event_loop.run_until_complete(server.wait_closed())\n\n request.addfinalizer(close_server)\n\n\n@pytest.fixture(scope=\"function\")\ndef stream_reader(request):\n return asyncio.StreamReader(limit=128)\n\n\nasync def test_protocol_connect(echo_server, stream_reader, event_loop, hostname, port):\n connect_future = event_loop.create_connection(\n lambda: SMTPProtocol(stream_reader), host=hostname, port=port\n )\n _, protocol = await asyncio.wait_for(connect_future, timeout=1.0)\n\n assert isinstance(protocol._stream_reader, asyncio.StreamReader)\n assert isinstance(protocol._stream_writer, asyncio.StreamWriter)\n assert protocol._stream_reader._transport is not None\n assert not protocol._stream_reader._transport.is_closing()\n\n protocol._stream_writer.close()\n\n\nasync def test_protocol_read_limit_overrun(event_loop, stream_reader, hostname, port):\n async def client_connected(reader, writer):\n await reader.read(1000)\n long_response = (\n b\"220 At vero eos et accusamus et iusto odio dignissimos ducimus qui \"\n b\"blanditiis praesentium voluptatum deleniti atque corruptis qui \"\n b\"blanditiis praesentium voluptatum\\n\"\n )\n writer.write(long_response)\n await writer.drain()\n\n server = await asyncio.start_server(client_connected, host=hostname, port=port)\n connect_future = event_loop.create_connection(\n lambda: SMTPProtocol(stream_reader), host=hostname, port=port\n )\n\n _, protocol = await asyncio.wait_for(connect_future, timeout=1.0)\n\n with pytest.raises(SMTPResponseException) as exc_info:\n await protocol.execute_command(b\"TEST\\n\", timeout=1.0)\n\n assert exc_info.value.code == 500\n assert \"Line too long\" in exc_info.value.message\n\n server.close()\n await server.wait_closed()\n\n\nasync def test_protocol_connected_check_on_read_response(stream_reader):\n smtp_protocol = SMTPProtocol(stream_reader)\n smtp_protocol._stream_reader = None\n\n with pytest.raises(SMTPServerDisconnected):\n await smtp_protocol.read_response(timeout=1.0)\n\n\nasync def test_protocol_connected_check_on_write_and_drain(stream_reader):\n smtp_protocol = SMTPProtocol(stream_reader)\n smtp_protocol._stream_reader = None\n\n with pytest.raises(SMTPServerDisconnected):\n await smtp_protocol.write_and_drain(b\"foo\", timeout=1.0)\n\n\nasync def test_protocol_reader_connected_check_on_upgrade_transport(\n stream_reader, client_tls_context\n):\n smtp_protocol = SMTPProtocol(stream_reader)\n smtp_protocol._stream_reader = None\n\n with pytest.raises(SMTPServerDisconnected):\n await smtp_protocol.upgrade_transport(client_tls_context)\n\n\nasync def test_protocol_writer_connected_check_on_upgrade_transport(\n stream_reader, client_tls_context\n):\n smtp_protocol = SMTPProtocol(stream_reader)\n\n with pytest.raises(SMTPServerDisconnected):\n await smtp_protocol.upgrade_transport(client_tls_context)\n\n\nasync def test_protocol_reader_connected_check_on_starttls(\n stream_reader, client_tls_context\n):\n smtp_protocol = SMTPProtocol(stream_reader)\n smtp_protocol._stream_reader = None\n\n with pytest.raises(SMTPServerDisconnected):\n await smtp_protocol.starttls(client_tls_context, timeout=1.0)\n\n\nasync def test_protocol_writer_connected_check_on_starttls(\n stream_reader, client_tls_context\n):\n smtp_protocol = SMTPProtocol(stream_reader)\n\n with pytest.raises(SMTPServerDisconnected):\n await smtp_protocol.starttls(client_tls_context)\n\n\nasync def test_protocol_connected_check_on_drain_writer(stream_reader):\n smtp_protocol = SMTPProtocol(stream_reader)\n\n with pytest.raises(SMTPServerDisconnected):\n await smtp_protocol._drain_writer(timeout=1.0)\n\n\nasync def test_protocol_reader_connected_check_on_connection_made(stream_reader):\n smtp_protocol = SMTPProtocol(stream_reader)\n smtp_protocol._stream_reader = None\n\n with pytest.raises(SMTPServerDisconnected):\n await smtp_protocol.connection_made(None)\n\n\nasync def test_protocol_reader_connected_check_on_readline(stream_reader):\n smtp_protocol = SMTPProtocol(stream_reader)\n smtp_protocol._stream_reader = None\n\n with pytest.raises(SMTPServerDisconnected):\n await smtp_protocol._readline(timeout=1.0)\n\n\nasync def test_protocol_writer_connected_check_on_readline(stream_reader):\n smtp_protocol = SMTPProtocol(stream_reader)\n smtp_protocol._stream_writer = None\n\n with pytest.raises(SMTPServerDisconnected):\n await smtp_protocol._readline(timeout=1.0)\n\n\nasync def test_protocol_timeout_on_starttls(\n event_loop, stream_reader, hostname, port, client_tls_context\n):\n async def client_connected(reader, writer):\n await reader.read(1000)\n writer.write(b\"220 go ahead\\n\")\n await writer.drain()\n await asyncio.sleep(1.0)\n\n server = await asyncio.start_server(client_connected, host=hostname, port=port)\n connect_future = event_loop.create_connection(\n lambda: SMTPProtocol(stream_reader), host=hostname, port=port\n )\n\n _, protocol = await asyncio.wait_for(connect_future, timeout=1.0)\n\n with pytest.raises(SMTPTimeoutError):\n await protocol.starttls(client_tls_context, timeout=0.01)\n\n server.close()\n await server.wait_closed()\n\n\nasync def test_protocol_timeout_on_drain_writer(\n event_loop, stream_reader, echo_server, hostname, port\n):\n connect_future = event_loop.create_connection(\n lambda: SMTPProtocol(stream_reader), host=hostname, port=port\n )\n\n _, protocol = await asyncio.wait_for(connect_future, timeout=1.0)\n\n protocol.pause_writing()\n protocol._stream_writer.write(b\"1234\")\n\n with pytest.raises(SMTPTimeoutError):\n await protocol._drain_writer(timeout=0.01)\n\n protocol._stream_writer.close()\n\n\nasync def test_connectionerror_on_drain_writer(\n event_loop, stream_reader, echo_server, hostname, port\n):\n connect_future = event_loop.create_connection(\n lambda: SMTPProtocol(stream_reader), host=hostname, port=port\n )\n\n _, protocol = await asyncio.wait_for(connect_future, timeout=1.0)\n\n protocol.pause_writing()\n protocol._stream_reader._transport.close()\n\n with pytest.raises(ConnectionError):\n await protocol._drain_writer(timeout=1.0)\n\n\nasync def test_incompletereaderror_on_readline_with_partial_line(\n event_loop, stream_reader, hostname, port\n):\n partial_response = b\"499 incomplete response\\\\\"\n\n async def client_connected(reader, writer):\n writer.write(partial_response)\n writer.write_eof()\n await writer.drain()\n\n server = await asyncio.start_server(client_connected, host=hostname, port=port)\n connect_future = event_loop.create_connection(\n lambda: SMTPProtocol(stream_reader), host=hostname, port=port\n )\n\n _, protocol = await asyncio.wait_for(connect_future, timeout=1.0)\n\n response_bytes = await protocol._readline(timeout=1.0)\n\n assert response_bytes == partial_response\n assert protocol._stream_writer._transport.is_closing()\n\n server.close()\n await server.wait_closed()\n","sub_path":"tests/test_protocol.py","file_name":"test_protocol.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"352555192","text":"from django.test import TestCase\n\n# Create your tests here.\n\nimport os\nimport requests\nimport json\n\n\nTEST_SLASH_COMMAND = {\n \"token\": os.environ.get('SLACK_VERIFICATION_TOKEN'),\n \"team_id\": \"T0001\",\n \"team_domain\": \"example\",\n \"enterprise_id\": \"E0001\",\n \"enterprise_name\": \"Wizeline\",\n \"channel_id\": \"C2147483705\",\n \"channel_name\": \"test\",\n \"user_id\": \"U2147483697\",\n \"user_name\": \"gabmarti \",\n \"command\": \"/slashtest\",\n \"text\": \"94070\",\n \"response_url\": \"https://hooks.slack.com/commands/1234/5678\",\n \"trigger_id\": \"13345224609.738474920.8088930838d88f008e0\"\n}\nTEST_EVENT = {\n \"token\": os.environ.get('SLACK_VERIFICATION_TOKEN'),\n \"team_id\": \"T061EG9RZ\",\n \"api_app_id\": \"A0FFV41KK\",\n \"event\": {\n \"type\": \"reaction_added\",\n \"user\": \"U061F1EUR\",\n \"item\": {\n \"type\": \"message\",\n \"channel\": \"C061EG9SL\",\n \"ts\": \"1464196127.000002\"\n },\n \"reaction\": \"slightly_smiling_face\"\n },\n \"event_ts\": \"1465244570.336841\",\n \"type\": \"event_callback\",\n \"authed_users\": [\n \"U061F7AUR\"\n ]\n}\n\nTEST_EVENT_2 = {\n \"token\": \"BH9ueJhCTB7YGaULXxocJoOW\",\n \"team_id\": \"T5MBG2JKG\",\n \"api_app_id\": \"A5LL7G91B\",\n \"event\": {\n \"type\": \"message\",\n \"user\": \"U5KSYQ20Y\",\n \"text\": \"hello there\",\n \"ts\": \"1496764428.496706\",\n \"channel\": \"DD35PMQ8L\",\n \"event_ts\": \"1496764428.496706\"\n },\n \"type\": \"event_callback\",\n \"authed_users\": [\n \"U5MBXN7M4\"\n ],\n \"event_id\": \"Ev5PDV5YUS\",\n \"event_time\": 1496764428\n}\n\n#slash_test = requests.post(TEST_SLASH_COMMAND, 'http://localhost:8000/api/v1/commands/')\n#print(slash_test)\nevent_test = requests.post('http://localhost:8500/events/', TEST_EVENT_2)\nprint(event_test)\n\n\n","sub_path":"standupwizeapp/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"469480842","text":"text = 'As abc res obn nbo til tytty tytyytyt sa.'\n\ndef find_inverted_word(text):\n\ttext = text.lower()\n\tall_words = text.split()\n\n\tif '.' in all_words[-1]:\n\t\tindex = all_words[-1].index('.')\n\t\tall_words[-1] = all_words[-1][:index]\n\t\tprint('after replace:', all_words[-1])\n\n\tprint('all words:', all_words)\n\tfor word in all_words:\n\t\t\n\t\tone_couple = []\n\t\tinverted_word = ''\n\t\tcounter = len(word)\n\t\twhile counter > 0:\n\t\t\tinverted_word += word[counter-1]\n\t\t\tcounter -= 1\n\n\t\tprint(word, '-', inverted_word)\n\t\tif inverted_word in all_words:\n\n\t\t\tone_couple.append(word)\n\t\t\tone_couple.append(inverted_word)\n\n\t\t\treturn one_couple\n\treturn 0\n\nprint(find_inverted_word(text))\n\n","sub_path":"all/find_inverted_word.py","file_name":"find_inverted_word.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"312827849","text":"'''\n\nRun extra electron ID algorithms.\n\nOriginal author: M. Bachtis\n\n'''\n\nimport FWCore.ParameterSet.Config as cms\n\nfrom RecoEgamma.ElectronIdentification.\\\n cutsInCategoriesElectronIdentificationV06_DataTuning_cfi import \\\n eidVeryLoose, eidLoose, eidMedium, eidTight, \\\n eidSuperTight, eidHyperTight1, eidHyperTight2, \\\n eidHyperTight3, eidHyperTight4\n\n\nfrom EGamma.EGammaAnalysisTools.electronIdMVAProducer_cfi import \\\n mvaTrigV0, mvaNonTrigV0\n\nrecoElectronID = cms.Sequence(\n\teidVeryLoose + eidLoose + eidMedium + eidTight +\n\teidSuperTight + eidHyperTight1 + eidHyperTight2 +\n eidHyperTight3 + eidHyperTight4\n + mvaTrigV0 + mvaNonTrigV0\n)\n\n# For PAT\nelectronIDSources = cms.PSet(\n\tcicVeryLoose = cms.InputTag(\"eidVeryLoose\"),\n\tcicLoose = cms.InputTag(\"eidLoose\"),\n\tcicMedium = cms.InputTag(\"eidMedium\"),\n\tcicTight = cms.InputTag(\"eidTight\"),\n\tcicSuperTight = cms.InputTag(\"eidSuperTight\"),\n\tcicHyperTight1 = cms.InputTag(\"eidHyperTight1\"),\n\tcicHyperTight2 = cms.InputTag(\"eidHyperTight2\"),\n\tcicHyperTight3 = cms.InputTag(\"eidHyperTight3\"),\n\tcicHyperTight4 = cms.InputTag(\"eidHyperTight4\"),\n mvaTrigV0 = cms.InputTag(\"mvaTrigV0\"),\n mvaNonTrigV0 = cms.InputTag(\"mvaNonTrigV0\"),\n)\n","sub_path":"PatTools/python/electrons/electronID_cff.py","file_name":"electronID_cff.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"438888145","text":"#!/usr/bin/python3\n\"\"\"\nhandles REST API actions for Place Amenity\n\"\"\"\nfrom api.v1.views import app_views\nfrom os import getenv\nfrom flask import jsonify\nfrom flask import Flask\nfrom flask import request\nfrom flask import abort\nfrom models import storage\nfrom models.place import Place\nfrom models.amenity import Amenity\n\n\n@app_views.route(\n '/places//amenities',\n methods=['GET'],\n strict_slashes=False)\ndef place_amenity(place_id):\n \"\"\"handles amenities route\"\"\"\n place = storage.get(\"Place\", place_id)\n if place is None:\n abort(404)\n if getenv('HBNB_TYPE_STORAGE') != 'db':\n return jsonify(place.amenity_ids)\n return jsonify([p_a.to_dict() for p_a in place.amenities])\n\n\n@app_views.route(\n '/places//amenities/',\n methods=['POST'],\n strict_slashes=False)\ndef place_amenity_post(place_id, amenity_id):\n place = storage.get(\"Place\", place_id)\n amenity = storage.get(\"Amenity\", amenity_id)\n if place is None or amenity is None:\n abort(404)\n in_list_fs = True\n if getenv('HBNB_TYPE_STORAGE') != 'db':\n if amenity_id not in place.amenity_ids:\n in_list_fs = False\n if amenity in place.amenities and in_list_fs:\n return jsonify(amenity.to_dict()), 200\n if getev('HBNB_TYPE_STORAGE') != 'db':\n place.amenity_ids.append(amenity_id)\n else:\n place.amenities.append(amenity)\n storage.save()\n return jsonify(amenity.to_dict()), 201\n\n\n@app_views.route(\n '/places//amenities/',\n methods=['DELETE'],\n strict_slashes=False)\ndef place_amenity_with_id(place_id, amenity_id):\n \"\"\"handles amenities route with a parameter amenity_id\"\"\"\n amenity = storage.get(\"Amenity\", amenity_id)\n place = storage.get(\"Place\", place_id)\n if place is None or amenity is None:\n abort(404)\n if amenity not in place.amenities:\n abort(404)\n if getenv('HBNB_TYPE_STORAGE') != 'db':\n if amenity_id in place.amenity_ids:\n place.amenity_ids.pop(amenity_id)\n elif amenity in place.amenities:\n place.amenities.remove(amenity)\n storage.save()\n return jsonify({}), 200\n","sub_path":"api/v1/views/places_amenities.py","file_name":"places_amenities.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"453710247","text":"import logging\nimport random\nimport typing\n\nimport gym\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\n\nfrom DeepRL.Agent import DoubleDQNAgent\nfrom DeepRL.Env import EnvState, EnvAbstract\nfrom DeepRL.Replay import NaiveReplay\nfrom DeepRL.Train import Train\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\nclass DemoEnv(EnvAbstract):\n def __init__(self):\n super().__init__()\n self.g = gym.make('CartPole-v0')\n self.o: np.ndarray = None\n self.total_reward = 0.\n self.render = False\n\n def startNewGame(self):\n self.o = self.g.reset()\n logger.info('total_reward: {}'.format(self.total_reward))\n if not self.render and self.total_reward > 195:\n self.render = True\n self.total_reward = 0.\n self.in_game = True\n\n def getState(self) -> EnvState:\n return EnvState(self.in_game, self.o)\n\n def doAction(self, _action: int) -> float:\n self.o, reward, is_quit, _ = self.g.step(_action)\n self.in_game = not is_quit\n self.total_reward += reward\n if self.render:\n self.g.render()\n return reward\n\n def getInputs(\n self, _state_list: typing.Sequence[EnvState]\n ) -> np.ndarray:\n return np.array([\n d.state for d in _state_list\n ])\n\n def getRandomActions(\n self, _state_list: typing.Sequence[EnvState]\n ) -> typing.Sequence[int]:\n return [random.randint(0, 1) for _ in _state_list]\n\n def getBestActions(\n self, _data: np.ndarray,\n _state_list: typing.Sequence[EnvState]\n ) -> typing.Sequence[int]:\n return np.argmax(_data, 1)\n\n\nclass Model(nn.Module):\n def __init__(self):\n super().__init__()\n\n self.fc1 = nn.Linear(4, 4)\n self.fc2 = nn.Linear(4, 2)\n\n def forward(self, x: Variable):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\nif __name__ == '__main__':\n model = Model()\n agent = DoubleDQNAgent(\n _model=model, _env=DemoEnv(),\n _gamma=0.9, _batch_size=32,\n _epsilon_init=1.0, _epsilon_decay=0.9999,\n _epsilon_underline=0.1,\n _replay=NaiveReplay(),\n _optimizer=optim.SGD(model.parameters(), 0.001, 0.9)\n )\n agent.config.epoch_show_log = 100\n train = Train(\n agent,\n _epoch_max=10000,\n _step_init=100,\n _step_train=1,\n _step_update_target=1000,\n _step_save=10000000,\n )\n train.run()\n","sub_path":"samples/cartpole.py","file_name":"cartpole.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"315222733","text":"#!/usr/bin/python3\n\"\"\"\ntest_base_model.py - unit tests BaseModel class\n\"\"\"\n\nimport datetime\n# from models.base_model import BaseModel\nimport inspect\nimport models\nimport pep8 as pycodestyle\nimport unittest\nfrom unittest import mock\nimport uuid\n\n\nBaseModel = models.base_model.BaseModel\nmodule_doc = models.base_model.__doc__\n\n\nclass test_BaseModelDocs(unittest.TestCase):\n \"\"\"\n Test BaseModelDocs class - uses unittest\n \"\"\"\n\n @classmethod\n def setUpClass(self):\n \"\"\"\n setupClass() - setup for docstring tests\n \"\"\"\n self.base_funcs = inspect.getmembers(BaseModel, inspect.isfunction)\n\n def test_pycodestyle_conformance(self):\n \"\"\"\n test_pycodestyle_conformance() - test code with pycodestyle linter\n \"\"\"\n for path in ['models/base_model.py',\n 'tests/test_models/test_base_model.py']:\n with self.subTest(path=path):\n errors = pycodestyle.Checker(path).check_all()\n self.assertEqual(errors, 0)\n\n def test_module_docstring(self):\n \"\"\"\n Test for existence of module docstring\n \"\"\"\n self.assertIsNot(module_doc, None,\n \"base_model.py needs a docstring\")\n self.assertTrue(len(module_doc) > 1,\n \"base_model.py needs a docstring\")\n\n def test_class_docstring(self):\n \"\"\"\n Test for BaseModel class docstring\n \"\"\"\n self.assertIsNot(BaseModel.__doc__, None,\n \"BaseModel class needs a docstring\")\n self.assertTrue(len(BaseModel.__doc__) >= 1,\n \"BaseModel class needs a docstring\")\n\n def test_func_docstrings(self):\n \"\"\"\n Test for presence of docstrings in BaseModel methods\n \"\"\"\n for func in self.base_funcs:\n with self.subTest(function=func):\n self.assertIsNot(\n func[1].__doc__,\n None,\n \"{:s} method needs a docstring\".format(func[0])\n )\n self.assertTrue(\n len(func[1].__doc__) > 1,\n \"{:s} method needs a docstring\".format(func[0])\n )\n\n\nclass TestBaseModel(unittest.TestCase):\n \"\"\"\n \"\"\"\n\n def test_id(self):\n \"\"\"\n test_id() - does id exists\n \"\"\"\n bm1 = BaseModel()\n bm2 = BaseModel()\n for bm in [bm1, bm2]:\n uuid = bm.id\n with self.subTest(uuid=uuid):\n self.assertIs(type(uuid), str)\n self.assertRegex(uuid,\n '^[0-9a-f]{8}-[0-9a-f]{4}'\n '-[0-9a-f]{4}-[0-9a-f]{4}'\n '-[0-9a-f]{12}$')\n self.assertNotEqual(bm1.id, bm2.id)\n\n def test_created_at(self):\n \"\"\"\n test_created_at() - does created_at exists\n \"\"\"\n bm1 = BaseModel()\n d = datetime.datetime.now()\n self.assertEqual(type(bm1.created_at), type(d))\n\n def test_updated_at(self):\n \"\"\"\n test_updated_at() - does updated_at exists\n \"\"\"\n bm1 = BaseModel()\n d = datetime.datetime.now()\n self.assertEqual(type(bm1.updated_at), type(d))\n\n def test_id_unique(self):\n \"\"\"\n test_id_unique() - is id unique\n \"\"\"\n bm1 = BaseModel()\n n = uuid.uuid4()\n self.assertNotEqual(bm1.id, n)\n","sub_path":"0x02-restful_api_users/tests/test_models/test_base_model.py","file_name":"test_base_model.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"382354441","text":"\"\"\"@file sdr_snr_scorer.py\ncontains the scorer using SdrSnrScorer\"\"\"\n# Edited by Pieter Appeltans (added snr score)\nimport scorer\nimport numpy as np\nfrom nabu.postprocessing import data_reader\nimport bss_eval\n\n\nclass SdrSnrScorer(scorer.Scorer):\n \"\"\"the SDR scorer class. Uses the script from\n C. Raffel, B. McFee, E. J. Humphrey, J. Salamon, O. Nieto, D. Liang, and D. P. W. Ellis,\n 'mir_eval: A Transparent Implementation of Common MIR Metrics', Proceedings of the 15th\n International Conference on Music Information Retrieval, 2014\n\n a scorer using SDR\n\n \"\"\"\n\n score_metrics = ('SDR', 'SIR', 'SNR', 'SAR', 'perm')\n score_metrics_to_summarize = ('SDR', 'SIR', 'SNR', 'SAR')\n score_scenarios = ('SS', 'base')\n score_expects = 'data'\n\n def __init__(self, conf, evalconf, dataconf, rec_dir, numbatches, task, scorer_name, checkpoint_file):\n \"\"\"Reconstructor constructor\n Args:\n conf: the scorer configuration as a dictionary\n evalconf: the evaluator configuration as a ConfigParser\n dataconf: the database configuration\n rec_dir: the directory where the reconstructions are\n numbatches: the number of batches to process\n \"\"\"\n\n super(SdrSnrScorer, self).__init__(conf, evalconf, dataconf, rec_dir, numbatches, task, scorer_name, checkpoint_file)\n\n # get the original noise signal reader\n noise_names = conf['noise'].split(' ')\n noise_dataconfs = []\n for noise_name in noise_names:\n noise_dataconfs.append(dict(dataconf.items(noise_name)))\n self.noise_reader = data_reader.DataReader(noise_dataconfs, self.segment_lengths)\n\n def _get_score(self, org_src_signals, base_signals, rec_src_signals, noise_signal):\n \"\"\"score the reconstructed utterances with respect to the original source signals\n\n Args:\n org_src_signals: the original source signals, as a list of numpy arrarys\n base_signals: the duplicated base signal (original mixture), as a list of numpy arrarys\n rec_src_signals: the reconstructed source signals, as a list of numpy arrarys\n\n Returns:\n the score\"\"\"\n\n # convert to numpy arrays\n org_src_signals = np.array(org_src_signals)[:, :, 0]\n base_signals = np.array(base_signals)[:, :, 0]\n rec_src_signals = np.array(rec_src_signals)\n noise_signal = np.squeeze(noise_signal)\n #\n collect_outputs = dict()\n collect_outputs[self.score_scenarios[1]] = bss_eval.bss_eval_sources_extended(org_src_signals, base_signals, noise_signal)\n collect_outputs[self.score_scenarios[0]] = bss_eval.bss_eval_sources_extended(org_src_signals, rec_src_signals, noise_signal)\n\n nr_spk = len(org_src_signals)\n\n # convert the outputs to a single dictionary\n score_dict = dict()\n for i, metric in enumerate(self.score_metrics):\n score_dict[metric] = dict()\n\n for j, scen in enumerate(self.score_scenarios):\n score_dict[metric][scen] = []\n\n for spk in range(nr_spk):\n score_dict[metric][scen].append(collect_outputs[scen][i][spk])\n\n return score_dict\n\n","sub_path":"nabu/postprocessing/scorers/sdr_snr_scorer.py","file_name":"sdr_snr_scorer.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"153681294","text":"\"\"\"Stream conformity functions.\n\nDeclares our main :code:`conform` function and its usual conformity\nsub-functions. These are useful when trying to conform inconsistent\ninput user data.\n\n\"\"\"\nimport re\nfrom typing import Union, List, Optional, Callable\n\nfrom pyspark.sql import functions as F, DataFrame\n\nfrom ...utils import to_list, valid_or_default\n\nO = Union[Callable, str]\nO = Optional[Union[O, List[O]]]\n\n\n# region conforming functions\n\ndef sort_cols(df: DataFrame) -> DataFrame:\n \"\"\"Sort the columns in a spark DataFrame to increase consistency.\n\n :param df: the non-normalized DataFrame being read.\n :return: The frame with its columns sorted in alphabetic order.\n\n Examples\n --------\n .. jupyter-execute:: /examples/io/stream/sort_cols.py\n \"\"\"\n return df.select(*sorted(df.columns))\n\n\ndef lower_cols(df: DataFrame) -> DataFrame:\n \"\"\"Lower-case all columns in the first-level of a frame.\n\n :param df: the processing frame.\n :return: A frame in which all columns are lower-case.\n\n Examples\n --------\n .. jupyter-execute:: /examples/io/stream/lower_cols.py\n \"\"\"\n cols = [c.lower() for c in df.columns]\n assert_same_number_of_cols(df.columns, cols, 'lower_cols')\n\n return df.select([F.col(c).alias(r) for c, r in zip(df.columns, cols)])\n\n\ndef simple_cols(df: DataFrame) -> DataFrame:\n \"\"\"Simplify the columns in the first-level of a frame.\n\n All non-alphanumeric characters are replaced by \"_\" in this operation.\n Any leading/trailing underscores are removed.\n\n :param df: the processing frame.\n :return: A frame with simple column names.\n\n Examples\n --------\n .. jupyter-execute:: /examples/io/stream/lower_cols.py\n \"\"\"\n r = re.compile('[^0-9a-zA-Z]+')\n cols = [r.sub('_', c).strip('_') for c in df.columns]\n assert_same_number_of_cols(df.columns, cols, 'simple_cols')\n\n return df.select([F.col(c).alias(r) for c, r in zip(df.columns, cols)])\n\n\n# endregion\n\n\n# region validators\n\ndef assert_same_number_of_cols(a: List[str],\n b: List[str],\n op_name: str):\n \"\"\"Assert the number of columns is the same.\n\n Useful to make sure the frame was not tempered with by\n the application of a possibly-destructive operation.\n\n :param a: the first (old) collection of columns.\n :param b: the second (new) collection of columns.\n :param op_name: The name of the operation being performed.\n\n Examples\n --------\n .. jupyter-execute:: /examples/io/stream/assert_same_number_of_cols.py\n :raises: ValueError\n \"\"\"\n a, b = set(a), set(b)\n\n if len(a) != len(b):\n raise ValueError(f'Illegal operation `{op_name}` performed, as the '\n f'number of columns in the frame would change from '\n f'{len(a)} to {len(b)}. Difference:\\n'\n f' A - B: {a - b}\\n'\n f' B - A: {b - a}.')\n\n\n# endregion\n\n\nCONFORMING_OPS = {\n 'sort_cols': sort_cols,\n 'simple_cols': simple_cols,\n 'lower_cols': lower_cols,\n}\n\"\"\"Dict: Map to all available conforming functions.\"\"\"\n\nDEFAULT_OPS = ('lower_cols', 'simple_cols', 'sort_cols')\n\"\"\"Tuple[str]: Tuple of default operations executed in :code:`conform`.\"\"\"\n\n\ndef adapter(op: Union[str, Callable]) -> Callable:\n \"\"\"Retrieve a known conforming operation if its name is passed.\n Otherwise, this will work as the identity function.\n\n :param op: the operation's name or function of interest.\n :return: The operation referenced.\n \"\"\"\n global CONFORMING_OPS\n\n if callable(op):\n return op\n\n if isinstance(op, str):\n if op not in CONFORMING_OPS:\n raise ValueError(\n f'Conforming operation `{op}` not found in `CONFORMING_OPS`. '\n f'Available options are: {CONFORMING_OPS.items()}')\n return CONFORMING_OPS[op]\n\n raise ValueError(f'Cannot infer an appropriate adapter for operation `{op}`. '\n f'Valid arguments are callables or {list(CONFORMING_OPS.keys())}.')\n\n\ndef conform(dfs: Union[DataFrame, List[DataFrame]],\n ops: O = DEFAULT_OPS) -> List[DataFrame]:\n \"\"\"Conform {DataFrames} according to some operation.\n\n :param dfs: frame or list of frames to be conformed.\n :param ops: str, function or list of str/functions\n Operation or list of operations used to conform the frames.\n Defaults to all conforming operations :code:`CONFORMING_OPS`.\n\n :return: The sames frames passed as arguments, but conformed\n according to the selected functions.\n\n Examples\n --------\n .. jupyter-execute:: /examples/io/stream/conform.py\n \"\"\"\n if not dfs:\n return dfs\n\n dfs = to_list(dfs)\n ops = to_list(valid_or_default(ops, DEFAULT_OPS))\n\n for op in ops:\n op = adapter(op)\n dfs = [op(d) for d in dfs]\n\n return dfs\n","sub_path":"ink/core/forge/joins/core/io/stream/conforming.py","file_name":"conforming.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"529922606","text":"# Tools for patch extraction and generation\nimport numpy as np\n\nfrom .utils import extract\n\n\n# FIXME consider what happens if central_idx is outside of x, error is likely\n# Probably need to rewrite it to support it\ndef extract_patch(x: np.ndarray, *, center_idx: np.array, patch_size: np.array,\n spatial_dims: list) -> np.array:\n \"\"\"Returns extracted patch of specific spatial size and with specified \n center from x.\n Parameters\n ----------\n x\n Array with data. Some of it's dimensions are spatial. We extract \n spatial patch specified by spatial location and spatial size. If\n available patch is smaller than required, we pad with zeroes.\n center_idx\n Location of the center of the patch. Components\n correspond to spatial dimensions. If some of patch size components was\n even, patch center is supposed to be on the right center pixel.\n patch_size\n Spatial patch size. Output will have original shape for\n non-spatial dimensions and patch_size shape for spatial dimensions.\n spatial_dims\n Which of x's dimensions consider as spatial. Accepts\n negative parameters.\n \n Returns\n -------\n :\n Patch extracted from x, padded, if necessary. \n \n \"\"\"\n start = center_idx - patch_size // 2\n end = start + patch_size\n\n padding = np.zeros((x.ndim, 2), dtype=int)\n spatial_shape = np.array(extract(x.shape, spatial_dims))\n\n assert all([0 <= center_idx[i] < spatial_shape[i]\n for i in range(len(spatial_dims))])\n\n padding[spatial_dims, 0] = -start\n padding[spatial_dims, 1] = end - spatial_shape\n padding[spatial_dims] = np.maximum(0, padding[spatial_dims])\n\n start = np.maximum(start, 0)\n end = np.minimum(end, spatial_shape)\n\n slices = [slice(None)] * x.ndim\n for i, s in enumerate(spatial_dims):\n slices[s] = slice(start[i], end[i])\n\n patch = np.pad(x[slices], padding, mode='constant')\n assert np.all([ps == ts for ps, ts in\n zip(extract(patch.shape, spatial_dims), patch_size)])\n return patch\n\ndef extract_patches(xs: [np.ndarray], *, center_idx: np.array,\n patch_sizes: [np.array],\n spatial_dims: list) -> [np.array]:\n \"\"\"Applies extract_patch for each object in xs with corresponding patch\n size.\"\"\"\n patches = []\n for i, x in enumerate(xs):\n patch = extract_patch(\n x, center_idx=center_idx, spatial_dims=spatial_dims,\n patch_size=patch_sizes[i])\n patches.append(patch)\n return patches\n\n\ndef get_uniform_center_index(x_shape: np.array, patch_size: np.array,\n spatial_dims: list) -> np.array:\n \"\"\"\n Returns spatial center coordinates for the patch, chosen randomly.\n We assume that patch have to belong to the object boundaries.\n \n Parameters\n ----------\n x_shape:\n Object shape.\n patch_size:\n Size of the required patch\n spatial_dims:\n Elements from x_shape that correspond to spatial dims. Can be negative. \n\n Returns\n -------\n :\n Center indices for spatial dims. If patch size was even, center index\n is shifted to the right. \n\n \"\"\"\n max_spatial_center_idx = x_shape[spatial_dims] - patch_size + 1\n\n start_idx = np.random.rand(len(spatial_dims)) * max_spatial_center_idx\n start_idx = np.int32(start_idx)\n center_idx = start_idx + patch_size // 2\n return center_idx\n\n\ndef get_conditional_center_indices(\n spatial_mask: np.array, patch_size: np.array, spatial_dims: list):\n \"\"\"Returns array with spatial center indices for patches that completely \n belong to spatial_mask and spatial voxel mask is activated.\"\"\"\n c = np.argwhere(spatial_mask)\n\n l_bound = c - patch_size // 2\n r_bound = c + patch_size // 2 + patch_size % 2\n\n # Remove centers that are too left and too right\n c = c[np.all((l_bound >= 0) &\n (r_bound <= np.array(spatial_mask.shape)), axis=1)]\n return c\n","sub_path":"dpipe/medim/patch.py","file_name":"patch.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"465454172","text":"\"\"\"PyLinux URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path, include\r\nfrom linux.views import index, linux, search # url导入views\r\n# from RemoteLinux.views import linux_create, linux_detail, linux_list_detail, linux_update, linux_delete, connect_test\r\nfrom RemoteLinux import views\r\n\r\n\r\nimport monitor\r\n\r\nurlpatterns = [\r\n path('', include('userprofile.urls', namespace='login')),\r\n path('index/', index, name='index'),\r\n path('linux/', linux, name='linux'),\r\n path('admin/', admin.site.urls),\r\n path('search/', search, name='search'),\r\n path('create/', views.linux_create, name='linux_create'),\r\n path('detail/', views.linux_detail, name='linux_detail'),\r\n path('connect//', views.linux_connect, name='linux_connect'),\r\n path('list_detail//', views.linux_list_detail, name='linux_list_detail'),\r\n path('list_app//', views.linux_list_app, name='linux_list_app'),\r\n path('linux_update//', views.linux_update, name='linux_update'),\r\n path('linux_delete//', views.linux_delete, name='linux_delete'),\r\n path('connect_test/', views.connect_test, name='connect_test'),\r\n path('linux_copy/', views.linux_copy, name='copy_form'),\r\n path('userprofile/', include('userprofile.urls', namespace='userprofile')),\r\n path('monitor/', include('monitor.urls', namespace='monitor')),\r\n path('password/', include('password.urls', namespace='password')),\r\n]\r\n","sub_path":"PyLinux/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"64062724","text":"#!/usr/bin/env python\n# coding=utf-8\n\n# Python 全栈案例初体验 2-3\n# http://www.imooc.com/video/15369\n\n\nfunc = lambda x: x**2 # 冒号前面是传入参数,后面是一个处理传入参数的单行表达式。\n\nprint(func(3))\n\n\na_list = range(10)\n\n# c = map(func, a_list)\n\nc = map(lambda x: x**2, a_list) # 匿名函数执行完后就不存在了,一般直接写\n\nprint(c)","sub_path":"imooc/python_fullstck/2_3_lambda.py","file_name":"2_3_lambda.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"488270854","text":"import re\nimport secrets\nimport configparser\nimport random\nfrom common import (\n CommonMsg,\n init_loc,\n get_aliases,\n set_aliases,\n set_help\n )\nfrom models import Bridge\nfrom bot.show_image import show_image\nfrom bot.stats import stats, write_stats, say_random\n\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\n\nTMP_BRIDGES = []\nBOT_NAME = config['bot']['name']\nif config['bot']['cmd_without_dash'] == 'yes':\n CMD_SIGN = ''\nelse:\n CMD_SIGN = '-'\n\n\ndef make_msg(response, inc_msg):\n msg = CommonMsg()\n msg.is_skype = True\n msg.is_telegram = True\n msg.chat_id = inc_msg.chat_id\n msg.user = inc_msg.user\n msg.time = inc_msg.time\n if hasattr(response, 'read'): # if response is file-like object\n msg.file_obj = {'name': 'image.jpg', 'obj': response}\n else:\n msg.content = response\n msg.content_full = f'[{BOT_NAME}] {response}'\n return msg\n\n@set_aliases(get_aliases('help'))\ndef cmd_help(cmd):\n result = _(\"\"\"Available commands:\n {CMD_SIGN}{BOT_NAME} make bridge - creates a new bridge and returns a secret code.\n {CMD_SIGN}{BOT_NAME} use bridge [secret code] - tries to connect to another chat with \\\nspecified secret code.\n {CMD_SIGN}{BOT_NAME} set lang [lang] - change language.\n {CMD_SIGN}{BOT_NAME} ping - {ping_help}\n\nAvailable modules:\n show_image\nType {CMD_SIGN}{BOT_NAME} help [module name] to get help on a specific module.\n \"\"\").format(CMD_SIGN=CMD_SIGN, BOT_NAME=BOT_NAME, ping_help=ping.help)\n return result\n\ndef module_help(cmd):\n module_name = cmd.split('help')[1].strip().lower()\n bot_module = None\n doc_string = None\n try:\n bot_module = globals()[module_name]\n except:\n return _(\"Module {0} not found\").format(module_name)\n if bot_module:\n doc_string = bot_module.help.strip()\n if not doc_string:\n result = f\"Module {module_name} doesn't have a documentation\"\n else:\n result = f\"\"\"\n{module_name} help:\n{doc_string}\n\"\"\"\n return result\n\ndef make_bridge(msg):\n global TMP_BRIDGES\n secret = secrets.token_hex(nbytes=4)\n if msg.is_telegram:\n telegram_id = msg.chat_id\n else:\n telegram_id = None\n if msg.is_skype:\n skype_id = msg.chat_id\n else:\n skype_id = None\n TMP_BRIDGES.append({\n 'secret': secret,\n 'telegram_id': telegram_id,\n 'skype_id': skype_id})\n if msg.is_telegram:\n another_chat = 'skype'\n else:\n another_chat = 'telegram'\n result_msg = f\"\"\"New bridge opened. \\\nType this in {another_chat} chat:\\n{CMD_SIGN}{BOT_NAME} use bridge {secret}\"\"\"\n return result_msg\n\n\ndef use_bridge(cmd, msg):\n global TMP_BRIDGES\n secret = cmd.split('use bridge')[1].strip()\n if len(secret) == 0:\n return f\"\"\"The secret code is not specified. \\n\nExample: {CMD_SIGN}{BOT_NAME} use bridge 1234abcd\"\"\"\n for BRIDGE in TMP_BRIDGES:\n if BRIDGE['secret'] == secret:\n if msg.is_skype:\n BRIDGE['skype_id'] = msg.chat_id\n elif msg.is_telegram:\n BRIDGE['telegram_id'] = msg.chat_id\n Bridge.create(\n telegram_id = BRIDGE['telegram_id'],\n skype_id = BRIDGE['skype_id'])\n return 'The connection is established successfully'\n\ns = _(\"ping? pong!\")\n@set_aliases(get_aliases('ping'))\n@set_help(s)\ndef ping():\n return _('pong')\n\n\ndef set_lang(cmd):\n lang = cmd.split('set lang')[1].strip().lower()\n try:\n init_loc(lang)\n return _(\"Language is set to: {0}\").format(lang)\n except:\n return _(\"Can't set language to: {0}\").format(lang)\n\n\ndef bot(msg):\n r = None\n\n write_stats(msg)\n\n if len(re.split(fr'^{CMD_SIGN}{BOT_NAME}[.!?, ]', msg.content,\n flags=re.IGNORECASE)) == 2:\n cmd = re.split(fr'^{CMD_SIGN}{BOT_NAME}[.!?, ]', msg.content,\n flags=re.IGNORECASE)[1].strip().lower()\n if cmd in cmd_help.aliases: r = cmd_help(cmd)\n elif cmd.startswith('help'): r = module_help(cmd)\n elif cmd == 'make bridge': r = make_bridge(msg)\n elif cmd in ping.aliases: r = ping()\n elif cmd.startswith('use bridge'): r = use_bridge(cmd, msg)\n elif cmd.startswith('set lang'): r = set_lang(cmd)\n elif cmd.startswith(stats.aliases): r = stats(cmd)\n else:\n r = show_image(cmd)\n if not r:\n r = say_random(cmd)\n\n if r:\n r = make_msg(r, msg)\n return r\n","sub_path":"bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"608695371","text":"import requests\nimport json\nimport base64\nimport argparse\nimport os\nfrom jinja2 import Environment, PackageLoader, FileSystemLoader\n\n# parsing command argumments\nparser = argparse.ArgumentParser()\nparser.add_argument('user', type=str, help='user name')\nparser.add_argument('password', type=str, help='password of the user')\nparser.add_argument('version', type=str, help='Release version')\nparser.add_argument('page_id', type=int, help='Id of Confluence page you want to update')\nparser.add_argument('space_key', type=str, help='Id of Confluence space the page belongs')\nargs = parser.parse_args()\n\n\n# Setup jinja2 templating stuff\nthis_dir = os.path.dirname(os.path.abspath('templates'))\nenv = Environment(loader=FileSystemLoader('templates'), trim_blocks=True)\npage_deployment = env.get_template('release_deploymentplan.html').render(version=args.version)\npage_appplan = env.get_template('app_rollout.html').render(version=args.version)\npage_SkyQDE = env.get_template('Sky_Q_DE_Rollout.html').render(version=args.version)\npage_releasenote = env.get_template('release_note_template.html').render(version=args.version)\n\n\n# Title and content of the page we will create\npage_title = args.version + ' Release Deployment Plan'\napps = ['Android DE Rollout', 'Android UK Rollout', 'Desktop UK Rollout', 'iOS DE Rollout', 'iOS UK Rollout', 'LG UK Rollout', 'Roku UK Rollout', 'Web DE Rollout', 'Web UK Rollout', 'STB/Sky Q DE Rollout', 'Campaign Manager Rollout'];\nversionval = args.version + ' '\n\n# Create the basic auth string\nauth = base64.b64encode(b'{}:{}'.format(args.user, args.password))\n\n# API for Creating a new page as a child of another page\nurl = 'https://confluence.int.skystore.com/rest/api/content/'\n\ndef appDefinition(app_name):\n if app_name == 'STB/Sky Q DE Rollout':\n return page_SkyQDE\n else:\n return page_appplan\n\n# Request Headers\nheaders = {\n 'Authorization': 'Basic {}'.format(auth),\n 'Content-Type': 'application/json',\n}\n\n# request body\ndata = {\n 'type': 'page',\n 'title': page_title,\n 'ancestors': [{'id':args.page_id}],\n 'space': {'key':args.space_key},\n 'body': {\n 'storage':{\n 'value': page_deployment,\n 'representation':'storage',\n }\n }\n}\n\n# Ready to call the api!\ntry:\n\n r = requests.post(url=url, data=json.dumps(data), headers=headers)\n\n # Consider any status other than 2xx an error\n if not r.status_code // 100 == 2:\n print(\"Error: Unexpected response {}\".format(r))\n print(r.text)\n else:\n # Get father page Id\n father_id = r.json()['id']\n print(\"New child page will be created with Id {}\".format(father_id))\n\n # Request body for childs\n for app_name in apps:\n data_child = {\n 'type': 'page',\n 'title': versionval + app_name,\n 'ancestors': [{'id':father_id}],\n 'space': {'key':args.space_key},\n 'body': {\n 'storage':{\n 'value': appDefinition(app_name),\n 'representation':'storage'\n }\n }\n }\n\n # Creating Child\n create_child = requests.post(url=url, data=json.dumps(data_child), headers=headers)\n\n # Get Child Id\n child_id = create_child.json()['id']\n print(\"New child page will be created with Id {}\".format(child_id))\n\n if app_name not in ['STB/Sky Q DE Rollout']:\n # Get Test report templates\n template = '{}.html'.format(app_name.replace(' ','_'))\n page_testreport = env.get_template(template).render(version=args.version)\n\n # Request body for grandchilds\n data_grandchild = {\n 'type': 'page',\n 'title': versionval + app_name + ' Test Report',\n 'ancestors': [{'id':child_id}],\n 'space': {'key':args.space_key},\n 'body': {\n 'storage':{\n 'value': page_testreport,\n 'representation':'storage'\n }\n }\n }\n\n # Create grand childs\n create_grandchild = requests.post(url=url, data=json.dumps(data_grandchild), headers=headers)\n\n # Request body for grandchilds\n data_grandchild2 = {\n 'type': 'page',\n 'title': versionval + app_name + ' Release Notes',\n 'ancestors': [{'id':child_id}],\n 'space': {'key':args.space_key},\n 'body': {\n 'storage':{\n 'value': page_releasenote,\n 'representation':'storage'\n }\n }\n }\n\n create_grandchild2 = requests.post(url=url, data=json.dumps(data_grandchild2), headers=headers)\n print(\"New child page will be created with Id {}\".format(create_grandchild2))\n print('Structure created successfully!')\n\nexcept requests.exceptions.RequestException as e:\n\n # A serious problem happened, like an SSLError or InvalidURL\n print(\"Error: {}\".format(e))\n","sub_path":"structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":5314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"574667161","text":"from __future__ import unicode_literals\n\nfrom multiprocessing import Process\nimport socket\nimport subprocess\n\nfrom fabric.api import env, hosts, task\nfrom fabric.colors import green\nfrom fabric.utils import puts\n\nfrom fh_fablib import run_local, require_services\n\n\ndef own_ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect(('feinheit.ch', 80))\n return s.getsockname()[0]\n\n\n@task(default=True)\n@hosts('')\n@require_services\ndef dev(host='127.0.0.1', port=8000):\n \"\"\"Runs the development server, SCSS watcher and backend services if they\n are not running already\"\"\"\n if host == 'net':\n host = own_ip()\n\n puts(green(\n 'Starting dev server on http://%s:%s/' % (host, port),\n bold=True))\n\n jobs = [\n lambda: run_local(\n 'venv/bin/python -Wonce manage.py runserver 0.0.0.0:%s' % (\n port,\n ),\n ),\n lambda: run_local('HOST=%s yarn run dev' % host),\n ]\n jobs = [Process(target=j) for j in jobs]\n [j.start() for j in jobs]\n [j.join() for j in jobs]\n\n\n@task\n@hosts('')\ndef mm():\n \"\"\"Wrapper around the ``makemessages`` management command which excludes\n dependencies (virtualenv, bower components, node modules)\"\"\"\n run_local(\n 'venv/bin/python manage.py makemessages -a'\n ' -i app/cms'\n ' -i bower_components'\n ' -i node_modules'\n ' -i venv')\n\n \"\"\"Also statici18n ``makemessages`` command will be executed\"\"\"\n run_local(\n 'venv/bin/python manage.py makemessages -d djangojs -a'\n ' -e jsx,js'\n ' -i app/static/jsi18n'\n ' -i app/cms'\n ' -i app/templates/elephantblog'\n ' -i bower_components'\n ' -i node_modules'\n ' -i venv')\n\n\n@task\n@hosts('')\ndef cm():\n \"\"\"Wrapper around ``compilemessages`` which does not descend into\n venv\"\"\"\n run_local(\n '. venv/bin/activate && for dir in '\n '$(find . -name venv -prune -or -name locale -print)'\n '; do (cd $dir; cd ..; django-admin.py compilemessages); done')\n\n\n@task\n@hosts('')\n@require_services\ndef services():\n \"\"\"Starts all required background services\"\"\"\n pass\n\n\n@task\n@hosts('')\ndef kill():\n \"\"\"Send SIGTERM to postgres and redis-server\"\"\"\n subprocess.call(\n \"ps -ef | awk '/(postgres|redis)/ {print $2}' | xargs kill\",\n shell=True)\n\n\n@task(aliases=['prettier'])\n@hosts('')\ndef prettify():\n \"\"\"Prettifies JS and SCSS code using prettier\"\"\"\n for cmd in env['box_prettify']:\n run_local(cmd)\n\n\n@task\n@hosts('')\ndef optimize():\n \"\"\"Optimizes SVG, PNG and JPEG files with svgo and imagemagick (convert)\"\"\"\n for cmd in env['box_optimize']:\n run_local(cmd)\n","sub_path":"fh_fablib/dev.py","file_name":"dev.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"429443354","text":"from rest_framework import serializers\nfrom .models import *\n\n\nclass aFormSerializer(serializers.HyperlinkedModelSerializer):\n\n class Meta:\n model = aForm\n fields = ('url', 'form_name',\n 'field_name0', 'field_type0',\n 'field_name1', 'field_type1',\n 'field_name2', 'field_type2',\n 'field_name3', 'field_type3',\n 'field_name4', 'field_type4',\n 'field_name5', 'field_type5',\n 'field_name6', 'field_type6'\n )\n","sub_path":"brite/mkform/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"526124692","text":"import numpy as np\nfrom matplotlib import pyplot as plt \n#print(np.__version__)\n#print(np.random.randn())\n\nw = 1.347\nb = 0.627\n\nx = np.random.randn(300, 1) * 10\n#print(x)\nnoize = np.random.randn(300, 1) * 7\n\ny = w * x + b + noize\n#print(y)\n\nplt.title('sample points')\nplt.xlabel('x')\nplt.ylabel('y')\nplt.plot(x, y, 'ob')\nplt.show()\n\npArray = np.hstack((x, y))\n#print(pArray)\n\nnp.savetxt('points.csv', pArray, delimiter=',')","sub_path":"深度学习与PyTorch入门实战教程/006/create_points.py","file_name":"create_points.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"330357063","text":"import socket\nimport threading\n\nconn_list = []\n\nPORT = 3001\n\nser = socket.socket()\nser.bind((\"\", PORT))\nprint(\"Binding to Port...\")\n\nser.listen(10)\nprint(\"Listening\")\n\ndef client_server_conn(conn, addr, conn_list_len):\n print(f\"Connected to {addr[0]} on {addr[1]}\")\n while True:\n if conn_list[conn_list_len] == None:\n break\n data = conn.recv(200)\n print(f\"{addr[0]}:> {data.decode('utf-8')}\")\n conn.send(bytes(input(f\":>>\"), \"utf-8\"))\n\n\ncount = 1\nwhile True:\n conn, addr = ser.accept()\n th = threading.Thread(target = client_server_conn, args = (conn, addr, len(conn_list)))\n conn_list.append((conn,addr,th))\n th.start()\n count += 1\n if count > 2:\n break\n\nfor tup in conn_list:\n x = tup\n x = None\n tup[0].close()\n tup[2].join()\n print(f\"tup[1] connection closed\")\n\n\n\n","sub_path":"Athira_VS/aug31/server_q2.py","file_name":"server_q2.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"504703100","text":"from sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom database_setup import Base, Restaurant, MenuItem\n\n# create db engine and bind to new session and objects\nengine = create_engine('sqlite:///restaurantmenu.db')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind = engine)\nsession = DBSession()\n\n# query all restaurants and menu items\nprint(session.query(Restaurant).all())\nprint(session.query(MenuItem).all())\n\n# query first restaurant\nfirstRest = session.query(Restaurant).first()\n\n# query filter by name\ncheesePizzas = session.query(MenuItem).filter_by(name = \"Cheese Pizza\")","sub_path":"vagrant/restaurant/db_examples/query_db.py","file_name":"query_db.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"502275069","text":"import os\nfrom setuptools import setup, find_packages\n\n\ndef read(filename):\n return open(os.path.join(os.path.dirname(__file__), filename)).read()\n\n\nsetup(\n name = 'django-tinycart',\n version = '0.1.dev',\n url = 'https://github.com/trilan/django-tinycart',\n license = 'BSD',\n description = 'Just a shopping cart for your Django projects.',\n long_description = read('README.rst'),\n author = 'Mike Yumatov',\n author_email = 'mike@yumatov.org',\n packages = find_packages(),\n test_suite = 'tests.main',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"166516584","text":"from autoflow import AutoFlowEstimator\n\nautoflow_pipeline=AutoFlowEstimator()\nensemble_estimator,Xy_test=autoflow_pipeline.fit_ensemble(\n task_id=\"task_608d5761d4c28c4cea208a0f5e83ba22\",\n hdl_id=\"hdl_2215affa927badf430851ce424ae4394\",\n trials_fetcher_params={\"k\":20},\n return_Xy_test=True\n)\nscore=ensemble_estimator.score(Xy_test[0],Xy_test[1])\nprint(score)\nprint(ensemble_estimator)","sub_path":"test/run_ensemble.py","file_name":"run_ensemble.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"377242593","text":"import requests\n\ncity_name = input()\ncoutry_code = input()\nresponse = requests.post(\n url=\"https://api.openweathermap.org/data/2.5/weather?\",\n params={\"appid\": \"5d1c5370369029f2d3d9274729db73b2\",\n \"q\": f\"{city_name},{coutry_code}\",\n \"units\": \"metric\"\n\n },\n data={\n \"city name\": city_name,\n \"country code\": coutry_code,\n },\n)\n# a = response.json() # --- что приходит с апи\nweather = response.json()[\"main\"][\"temp\"]\n# print((weather.get('main')).get('temp'))\nprint(f\"Temperature in {city_name} {weather}C\")\n","sub_path":"netwrk/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344971783","text":"import pandas as pd\n\nfrom App_Main.Backend.Words.CsvToWordAdapter import CsvToWordAdapter\n\n\nclass WordDataLoader():\n\n def __init__(self,word_path,trans_path):\n try:\n self.df = pd.read_csv(word_path,sep='|')\n if (trans_path is not None):\n self.dftr = pd.read_csv(trans_path,sep='|')\n self.lang = list(self.dftr)\n #print (self.lang)\n else:\n self.dftr = None\n self.lang = None\n except IOError as error:\n print (error)\n raise IOError\n\n self.size=self.df.shape[0]\n self.pointer=0\n\n def hasNext(self):\n if(self.pointer>=self.size):return False\n return True\n\n def next_word(self):\n if(self.pointer>=self.size):\n raise IndexError\n\n this_word=self.df.iloc[self.pointer]\n\n if(self.dftr is not None):\n translation = self.dftr.iloc[self.pointer]\n else:\n translation = None\n\n self.pointer = self.pointer + 1\n data = CsvToWordAdapter(this_word, translation,self.lang)\n return data","sub_path":"App_Main/Backend/Words/WordDataLoader.py","file_name":"WordDataLoader.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"172430590","text":"import numpy as np\n\n\n# dict CONFIG will be imported by pylitho.py\n\nCONFIG = {\n 'MPI' : True,\n 'MPI_VARIATION_TYPE' : 21,\n 'MPI_VARIATION_PARAMS' : np.array((1300.0,1500.0,5e3,50e3,50,300)),\n 'MPI_VIRT_PROCS' : 10*10*10,\n\n\n 'OUTPUT_FILE' : True,\n 'OUTPUT_FILE_EVERY_TSTEP' : 1000,\n 'OUTDIR' : \"output\", # no trailing slash!\n 'MODELNAME' : \"002-ini\",\n 'OUTPUT_OVERWRITE' : True,\n\n 'RESTART' : False,\n 'RESTART_INDIR' : \"output\",\n 'RESTART_MODELNAME' : \"test1\",\n 'RESTART_TSTEP' : 9450,\n 'RESTART_POST_MOD' : 0, # post-restart modifications\n\n 'NX' : 300,\n 'L_KM' : (0, 300), # in km\n 'MAXTIME_MA' : 700, # in Ma\n 'MAXRUNTIME_MA' : 700, # in Ma\n 'TSTEP_MULTI' : 0.5,\n\n 'DIFFSCHANGE_ACCURACY' : 1e-10, # max diffus. change, iteration criteria\n\n 'MOHO_DEPTH_KM' : 35,\n\n # ****\n # define type of initial T field\n # if restart==True, use type -1 to prevent overwriting\n # ****\n #'TINI_TYPE' : -1,\n 'TINI_TYPE' : 0,\n #'TINI_TYPE' : 1,\n #'TINI_TYPE' : 10,\n\n # ****\n # define type of k=k(T) relation\n # ****\n\n # comment/uncomment as needed\n\n 'KT_RELATION_TYPE' : 0,\n 'KT_RELATION_PARAMS' : np.array(()),\n\n #'KT_RELATION_TYPE' : 1,\n #'KT_RELATION_PARAMS' : np.array((1e-3),ndmin=1),\n\n #'KT_RELATION_TYPE' : 2,\n #'KT_RELATION_PARAMS' : np.array((1e-3, 5e-10)),\n\n\n # ****\n # define type of cp=cp(T) relation\n # ****\n\n 'CT_RELATION_TYPE' : 0,\n 'CT_RELATION_PARAMS' : np.array(()),\n\n #'CT_RELATION_TYPE' : 1,\n #'CT_RELATION_PARAMS' : np.array((8.95e-10, -2.13e-6, 0.00172, 0.716, 750.0)),\n\n\n # ****\n # define type of k0 field\n # ****\n 'K0_TYPE' : 10,\n\n # ****\n # define type of cp0 field\n # ****\n #'C0_TYPE' : 0,\n 'C0_TYPE' : 10,\n\n # ****\n # define type of rho field\n # ****\n #'RHO0_TYPE' : 0,\n 'RHO0_TYPE' : 10,\n\n\n # ****\n # define type of H field\n # ****\n #'H0_TYPE' : 0,\n 'H0_TYPE' : 10,\n #'H0_TYPE' : 2,\n\n 'BND_BOT_TYPE' : 0,\n 'BND_BOT_HFLOW' : 0.0,\n 'BND_BOT_TEMP' : 1350.0,\n 'BND_TOP_TEMP' : 0,\n 'EROSION_SPEED_M_MA' : 0,\n 'EROSION_SPEED_TYPE' : 1,\n 'MAX_TIME_TO_ERODE_MA' : 0,\n}\n","sub_path":"configs/config-002-ini.py","file_name":"config-002-ini.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"320221743","text":"# 引用函示庫\nimport requests\nfrom bs4 import BeautifulSoup\nimport csv\nimport datetime\nimport pandas as pd\nfrom io import StringIO\nimport json\nimport time\n\nstarttime = datetime.datetime.now()\nsii_close_url = \"https://www.twse.com.tw/exchangeReport/STOCK_DAY_AVG_ALL?response=open_data\"\nrsii = requests.get(sii_close_url)\ndf = pd.read_csv(StringIO(rsii.text.replace(\"=\", \"\")))\nurl = f\"https://www.tpex.org.tw/web/stock/aftertrading/daily_close_quotes/stk_quote_result.php?l=zh-tw&_={int(time.time())}\"\nreqs = requests.get(url)\n#利用json.loads()解碼JSON\nreqsjson = json.loads(reqs.text)\nd = {}\nfor req in reqsjson[\"aaData\"]:\n d[req[0]] = req[2]\ndef getClose(stock):\n try:\n return df[df[\"股票代號\"] == stock].iat[0,2]\n except:\n return d[stock]\n else:\n return \"error\"\n \n# 處理資料\nList = []\ndef add_things(h, last):\n for i in range(1, len(h)):\n stock_list = h[i].split(\",\") # 以逗號作為分割\n company = stock_list[3]\n for i in range(1, len(last)):\n if company == last[i].split(\",\")[3]: # 同一間公司\n last_stock_list = last[i].split(\",\")\n for i in range(10):\n stock_list[i] = stock_list[i].strip(\"\\\"\") # 把 \" 刪除\n last_stock_list[i] = last_stock_list[i].strip(\"\\\"\")\n temp_list = []\n temp_list.append(stock_list[2]) # 加入股票代號\n temp_list.append(stock_list[3]) # 加入股票名字\n if stock_list[5] != \"\":\n temp_list.append(format(int(stock_list[5]), ',')) # 加入當月營收\n else:\n temp_list.append(\"None\")\n temp_list.append(format(int(stock_list[6]), ',')) # 加入上月營收\n temp_list.append(format(int(last_stock_list[5]), ','))\n temp_list.append(format(int(last_stock_list[6]), ','))\n try:\n stock_list[8] = float(stock_list[8]) # 加入MOM\n temp_list.append(round(stock_list[8], 2))\n except:\n temp_list.append(\"None\") # 避免有些公司上月營收是 0\n temp_list.append(format(int(stock_list[7]), ',')) # 去年同期營收\n try:\n stock_list[9] = float(stock_list[9]) # 加入YOY\n temp_list.append(round(stock_list[9], 2))\n except:\n temp_list.append(\"None\")\n try:\n cur = getClose(stock_list[2])\n temp_list.append(cur)\n except:\n temp_list.append(\"Error\")\n List.append(temp_list)\n\n\n#處理使用者輸入介面\nwhile True:\n try:\n year = int(input(\"請輸入年度: \")) # 如果不是輸入整數會讓使用者重新輸入\n break\n except:\n print(\"年份格式錯誤,請重新輸入\")\nwhile True:\n try:\n month = int(input(\"請輸入月份: \")) # 如果不是輸入整數會讓使用者重新輸入\n # 如果不是輸入正確月份會讓使用者重新輸入\n if month < 1 or month > 12:\n print(\"無此月份,請重新輸入\")\n continue\n break\n except:\n print(\"月份格式錯誤,請重新輸入\")\n\nfirst_row = []\nstart = datetime.datetime.now() # 計算程式執行時間\nstart_time = start.strftime(\"%Y-%m-%d %H:%M:%S\")\ncsv_time = start.strftime(\"%Y-%m-%d\")\nfirst_row.append(\"程式執行時間\")\nfirst_row.append(start_time)\n\n# 紀錄當前年度和月(會呈現在excel第一列)\nnumber = []\nnumber.append(\"\")\nyearString = str(year) + \"年度\"\nnumber.append(yearString)\nmonthString = str(month) + \"月\"\nnumber.append(monthString)\n\n# 抓上上個月\nanother_year = year\nif month == 1:\n another_year = another_year - 1\n another_month = 11\nelif month == 2:\n another_year = another_year - 1\n another_month = 12\nelse:\n another_month = month - 2\n\n# 抓上市公司資料,透過網頁表格直接進行下載\nurl_sii = requests.get(f\"https://mops.twse.com.tw/nas/t21/sii/t21sc03_{year}_{month}.csv\")\nurl_sii.encoding='utf-8' # 解碼\nurl_sii_last = requests.get(f\"https://mops.twse.com.tw/nas/t21/sii/t21sc03_{another_year}_{another_month}.csv\")\nurl_sii_last.encoding='utf-8' # 解碼\nadd_things(url_sii.text.splitlines(), url_sii_last.text.splitlines()) # 把下載的表格丟進函式處理\n\n# 抓上櫃公司資料,透過網頁表格直接進行下載\nurl_otc = requests.get(f\"https://mops.twse.com.tw/nas/t21/otc/t21sc03_{year}_{month}.csv\")\nurl_otc.encoding='utf-8' # 解碼\nurl_otc_last = requests.get(f\"https://mops.twse.com.tw/nas/t21/otc/t21sc03_{another_year}_{another_month}.csv\")\nurl_otc_last.encoding='utf-8' # 解碼\nadd_things(url_otc.text.splitlines(), url_otc_last.text.splitlines())\n\nwith open(f'{year}年度{month}月{csv_time}.csv', 'w', newline='', encoding='utf-8-sig') as csvfile:\n # 建立 CSV 檔寫入器\n writer = csv.writer(csvfile)\n # 寫入一列資料\n writer.writerow(first_row)\n writer.writerow(number) # 寫入檔案的年度和月份\n writer.writerow([\"公司代號\", \"公司名稱\", \"當月營收\", \"上月營收\", \"上上月營收\", \"上上上月營收\", \"MOM\", \"去年同期營收\", \"YOY\", \"收盤價\"])\n # 寫入另外幾列資料\n for i in range(len(List)):\n writer.writerow(List[i])\n\n#long running\nendtime = datetime.datetime.now()\nprint (endtime - starttime)\n","sub_path":"new_stock_add_close.py","file_name":"new_stock_add_close.py","file_ext":"py","file_size_in_byte":5498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"205003444","text":"class Solution(object):\n\tdef isPalindrome(self, s):\n\t\t\n\t\ttargetstrings= \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\t\tnewstr= []\n\t\tl = len(s)\n\t\tfor c in s:\n\t\t\tif c in targetstrings:\n\t\t\t\tnewstr.append(c)\n\n\t\tnews = \"\".join(x.lower() for x in newstr)\n\n\t\tprint(news)\n\n\t\tl = len(news)\n\n\t\ti = 0\n\n\t\twhile i< (l/2):\n\t\t\tif news[i]!=news[l-i-1]:\n\t\t\t\treturn False\n\t\t\ti+=1\n\n\t\treturn True\n\n\ntestClass= Solution()\n\nprint(testClass.isPalindrome(\"0P\"))\t\n\t\t\t","sub_path":"125-valid-palindrome/125.py","file_name":"125.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"28092299","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nCopyright 2018 NAVER Corp.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and\nassociated documentation files (the \"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial\nportions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\nINCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\nPARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\nHOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\nCONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE\nOR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\ncho = \"ㄱㄲㄴㄷㄸㄹㅁㅂㅃㅅㅆㅇㅈㅉㅊㅋㅌㅍㅎ\" # len = 19\njung = \"ㅏㅐㅑㅒㅓㅔㅕㅖㅗㅘㅙㅚㅛㅜㅝㅞㅟㅠㅡㅢㅣ\" # len = 21\njong = \"ㄱ/ㄲ/ㄱㅅ/ㄴ/ㄴㅈ/ㄴㅎ/ㄷ/ㄹ/ㄹㄱ/ㄹㅁ/ㄹㅂ/ㄹㅅ/ㄹㅌ/ㄹㅍ/ㄹㅎ/ㅁ/ㅂ/ㅂㅅ/ㅅ/ㅆ/ㅇ/ㅈ/ㅊ/ㅋ/ㅌ/ㅍ/ㅎ\".split('/') # len = 27\ntest = cho + jung + ''.join(jong)\n\nhangul_length = len(cho) + len(jung) + len(jong) # 67\n\n\ndef is_valid_decomposition_atom(x):\n return x in test\n\n\ndef decompose(x):\n in_char = x\n if x < ord('가') or x > ord('힣'):\n return chr(x)\n x = x - ord('가')\n y = x // 28\n z = x % 28\n x = y // 21\n y = y % 21\n # if there is jong, then is z > 0. So z starts from 1 index.\n zz = jong[z - 1] if z > 0 else ''\n if x >= len(cho):\n print('Unknown Exception: ', in_char, chr(in_char), x, y, z, zz)\n return cho[x] + jung[y] + zz\n\n\ndef decompose_as_one_hot(in_char, warning=True):\n one_hot = []\n # print(ord('ㅣ'), chr(0xac00))\n # [0,66]: hangul / [67,194]: ASCII / [195,245]: hangul danja,danmo / [246,249]: special characters\n # Total 250 dimensions.\n if ord('가') <= in_char <= ord('힣'): # 가:44032 , 힣: 55203\n x = in_char - 44032 # in_char - ord('가')\n y = x // 28\n z = x % 28\n x = y // 21\n y = y % 21\n # if there is jong, then is z > 0. So z starts from 1 index.\n zz = jong[z - 1] if z > 0 else ''\n if x >= len(cho):\n if warning:\n print('Unknown Exception: ', in_char, chr(in_char), x, y, z, zz)\n\n one_hot.append(x)\n one_hot.append(len(cho) + y)\n if z > 0:\n one_hot.append(len(cho) + len(jung) + (z - 1))\n return one_hot\n else:\n if in_char < 128:\n return [hangul_length + in_char] # 67~\n elif ord('ㄱ') <= in_char <= ord('ㅣ'):\n return [hangul_length + 128 + (in_char - 12593)] # 194~ # [ㄱ:12593]~[ㅣ:12643] (len = 51)\n elif in_char == ord('♡'):\n return [hangul_length + 128 + 51] # 245~ # ♡\n elif in_char == ord('♥'):\n return [hangul_length + 128 + 51 + 1] # ♥\n elif in_char == ord('★'):\n return [hangul_length + 128 + 51 + 2] # ★\n elif in_char == ord('☆'):\n return [hangul_length + 128 + 51 + 3] # ☆\n else:\n if warning:\n print('Unhandled character:', chr(in_char), in_char)\n return []\n\n\ndef decompose_str(string):\n return ''.join([decompose(ord(x)) for x in string])\n\n\ndef decompose_str_as_one_hot(string, warning=True):\n tmp_list = []\n for x in string:\n tmp_list.extend(decompose_as_one_hot(ord(x), warning=warning))\n return tmp_list\n","sub_path":"missions/examples/kin/example/kor_char_parser.py","file_name":"kor_char_parser.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"220548847","text":"# public\r\nimport os\r\nimport re\r\nimport cv2\r\nimport json\r\nimport time\r\nimport random\r\nimport numpy as np\r\nfrom typing import Union, Tuple, List\r\n\r\n# private\r\nfrom .utils import parse_vsi_meta, parse_vsi_anno\r\nfrom data.vsi_reader import VsiReader\r\n\r\n\r\nclass Jsoner(object):\r\n \"\"\"\r\n Traverse all vsi and generate coco-format json\r\n\r\n TODO: Note that the RLE mask haven't implemented.\r\n \"\"\"\r\n\r\n def __init__(self, work_dir: str, split: str,\r\n classes_valid: Union[Tuple[str], List[str]] = [\"certain\", \"NO\", \"uncertain\"],\r\n write: bool = False):\r\n\r\n assert split in [\"train\", \"val\", \"test\", \"evaluate\", \"debug\"], \\\r\n \"ParamError: split must be train, val, and test\"\r\n\r\n self.work_dir = work_dir\r\n self.dir_annos = os.path.join(work_dir, \"annos\", split)\r\n self.writable = write\r\n # self.dir_annos = \"I:\\\\GIST\\\\annos\"\r\n self.classes_list = [\"certain\", \"NO\", \"uncertain\"]\r\n self.classes_valid = classes_valid\r\n self.counter_image = 0\r\n self.counter_vsi = 0\r\n self.counter_annotation = 0\r\n vsis, images, annotaions = self._vsis(self.dir_annos, mode=\"all\")\r\n self._base = {\r\n \"info\": self._info(),\r\n \"liencse\": self._license(),\r\n \"categories\": self._categories(),\r\n \"vsis\": vsis,\r\n \"images\": images,\r\n \"annotations\": annotaions\r\n }\r\n\r\n def _vsis(self, dir: str, mode: str = \"exist\"):\r\n \"\"\"\r\n construct vsi part of coco formats\r\n\r\n Parameters\r\n ----------\r\n - **mode**: str\r\n\r\n the slicing mode of entire wsi,\r\n \"exist\" means only the patch containing mito will be saved,\r\n \"all\" means all the patch will be saved, and\r\n the patch whose shape is smaller than fixed shape will be padding with 0\r\n \"\"\"\r\n assert mode.lower() in [\"exist\", \"all\"], \\\r\n \"ParamError: mode({}) must be in [exist, all]\".format(mode)\r\n\r\n if not os.path.exists(dir):\r\n raise NotADirectoryError(f\"{dir} not found.\")\r\n vsis = []\r\n images = []\r\n annotations = []\r\n if mode.lower() == \"exist\":\r\n for idx_meta, file_meta in \\\r\n enumerate([file for file in os.listdir(dir) if \"meta\" in file]):\r\n path_meta = os.path.join(dir, file_meta)\r\n vsi = self._vsi(path_meta=path_meta, id_vsi=self.counter_vsi + 1) # this id is vsi_id\r\n vsis.append(vsi)\r\n path_anno = re.sub(\"meta\", \"annotation\", path_meta)\r\n if os.path.exists(path_anno):\r\n annos = parse_vsi_anno(path_anno)\r\n basename = os.path.basename(path_anno)\r\n name_vsi = basename.split(\"_\")[0] + \".vsi\"\r\n path_vsi = os.path.join(self.work_dir, \"data\", name_vsi)\r\n reader = VsiReader(path=path_vsi, tilesize_init=(992, 992))\r\n numX, numY = reader.getNumTile()\r\n for idxX in range(numX):\r\n for idxY in range(numY):\r\n field = reader.getTileField(idxX, idxY) # x1y1x2y2\r\n flag_object_image = False\r\n for anno in annos:\r\n bbox = anno[\"bbox\"]\r\n bbox = [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]\r\n if self._testIn(bbox, field) and anno[\"tag\"] in self.classes_valid:\r\n flag_object_image = True\r\n bbox_new = self.clip(bbox, field) # x1y1x2y2\r\n bbox_deshift = [bbox_new[0] - field[0], bbox_new[1] - field[1],\r\n bbox_new[2] - field[0], bbox_new[3] - field[1], ]\r\n class_bbox = self.classes_list.index(anno[\"tag\"]) + 1\r\n annotation = self._annotation(id=self.counter_annotation + 1,\r\n id_image=self.counter_image + 1,\r\n id_category=class_bbox,\r\n bbox=bbox_deshift)\r\n annotations.append(annotation)\r\n self.counter_annotation += 1\r\n if flag_object_image:\r\n name_file = \"{idimg}_{idvsi}_{x}_{y}.png\".format(idimg=self.counter_image + 1,\r\n idvsi=basename.split(\"_\")[0],\r\n x=field[0],\r\n y=field[1])\r\n coco_url = os.path.join(\"/mnt/nvme/GIST/testadd2020\", name_file)\r\n image = self._image(location=field[:2],\r\n id=self.counter_image + 1,\r\n file_name=name_file,\r\n id_vsi=self.counter_vsi + 1,\r\n url=coco_url)\r\n block, _ = reader.getTile(indexTileX=idxX, indexTileY=idxY) # read image block\r\n if self.writable:\r\n cv2.imwrite(coco_url, block[..., ::-1]) # save images\r\n images.append(image)\r\n self.counter_image += 1\r\n self.counter_vsi += 1\r\n\r\n elif mode.lower() == \"all\":\r\n for idx_meta, file_meta in \\\r\n enumerate([file for file in os.listdir(dir) if \"meta\" in file]):\r\n path_meta = os.path.join(dir, file_meta)\r\n vsi = self._vsi(path_meta=path_meta, id_vsi=self.counter_vsi + 1) # this id is vsi_id\r\n vsis.append(vsi)\r\n\r\n basename = os.path.basename(path_meta)\r\n print(basename.split(\"_\")[0])\r\n flag_anno_exist = False\r\n path_anno = re.sub(\"meta\", \"annotation\", path_meta)\r\n if os.path.exists(path_anno):\r\n annos = parse_vsi_anno(path_anno)\r\n flag_anno_exist = True\r\n name_vsi = basename.split(\"_\")[0] + \".vsi\"\r\n path_vsi = os.path.join(self.work_dir, \"data\", name_vsi)\r\n reader = VsiReader(path=path_vsi, tilesize_init=(992, 992))\r\n numX, numY = reader.getNumTile()\r\n for idxX in range(numX):\r\n for idxY in range(numY):\r\n flag_object_image = False\r\n field = reader.getTileField(idxX, idxY) # x1y1x2y2\r\n if flag_anno_exist:\r\n for anno in annos:\r\n bbox = anno[\"bbox\"]\r\n bbox = [bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]]\r\n if self._testIn(bbox, field) and anno[\"tag\"] in self.classes_valid:\r\n flag_object_image = True\r\n bbox_new = self.clip(bbox, field) # x1y1x2y2\r\n bbox_deshift = [bbox_new[0] - field[0], bbox_new[1] - field[1],\r\n bbox_new[2] - field[0], bbox_new[3] - field[1], ]\r\n class_bbox = self.classes_list.index(anno[\"tag\"]) + 1\r\n annotation = self._annotation(id=self.counter_annotation + 1,\r\n id_image=self.counter_image + 1,\r\n id_category=class_bbox,\r\n bbox=bbox_deshift)\r\n annotations.append(annotation)\r\n self.counter_annotation += 1\r\n if flag_object_image:\r\n block, _ = reader.getTile(indexTileX=idxX, indexTileY=idxY) # read image block\r\n else:\r\n dice = random.randint(1, 10000)\r\n if dice <= 128:\r\n block, _ = reader.getTile(indexTileX=idxX, indexTileY=idxY) # read image block\r\n if block.mean() >= 220: # remove the white bachground\r\n continue # moderate the computation problem\r\n else:\r\n continue\r\n if block.shape[0] < 992 or block.shape[1] < 992: # padding the image\r\n base = np.zeros([992, 992, 3], dtype=np.uint8)\r\n base[:block.shape[0], :block.shape[1]] = block\r\n else:\r\n base = block\r\n print(\"{x}/{h}, {y}/{v}\".format(h=numX, v=numY, x=idxX, y=idxY))\r\n name_file = \"{idimg}_{idvsi}_{x}_{y}.png\".format(idimg=self.counter_image + 1,\r\n idvsi=basename.split(\"_\")[0],\r\n x=field[0],\r\n y=field[1])\r\n coco_url = os.path.join(\"/mnt/nvme/GIST/trainadd2020\", name_file)\r\n image = self._image(location=field[:2],\r\n id=self.counter_image + 1,\r\n file_name=name_file,\r\n id_vsi=self.counter_vsi + 1,\r\n url=coco_url)\r\n if self.writable:\r\n cv2.imwrite(coco_url, base[..., ::-1]) # save RGB images\r\n images.append(image)\r\n self.counter_image += 1\r\n self.counter_vsi += 1\r\n return vsis, images, annotations\r\n\r\n def _annotation(self, id: int, id_image: int, id_category: int, bbox: list or tuple, ):\r\n \"\"\"\r\n construct annotation element in coco json.\r\n\r\n Parameter\r\n ---------\r\n id : int\r\n the id of annotation\r\n id_image : int\r\n the id of corresponding image\r\n id_category : int\r\n the id of category this annotaion belongs to\r\n bbox : list or tuple\r\n the bounding box [x_min, y_min, x_max, y_max]\r\n \"\"\"\r\n annotation = {\r\n \"id\": id,\r\n \"image_id\": id_image,\r\n \"category_id\": id_category,\r\n \"segmentation\": [], # TODO : makeup the segmentaion calculation of bbox\r\n \"area\": (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]),\r\n \"bbox\": [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]],\r\n \"iscrowd\": 0,\r\n }\r\n return annotation\r\n\r\n def _image(self, location: tuple or list, id: int,\r\n file_name: str, id_vsi: int, url: str):\r\n image = {\r\n \"id\": id,\r\n \"width\": 992,\r\n \"height\": 992,\r\n \"file_name\": file_name,\r\n \"vsi_id\": id_vsi,\r\n \"x\": location[0],\r\n \"y\": location[1],\r\n \"license\": 0,\r\n \"flickr_url\": \"\",\r\n \"coco_url\": url,\r\n \"date_captured\": self._get_time_stamp(),\r\n }\r\n return image\r\n\r\n def clip(self, bbox: tuple or list, field: tuple or list) -> tuple:\r\n \"\"\"\r\n clip the bbox to adjust the image field\r\n\r\n Parameter\r\n ---------\r\n bbox : tuple or list\r\n [x_min, y_min, x_max, y_max]\r\n field:tuple or list\r\n [x_min, y_min, x_max, y_max]\r\n\r\n Return\r\n -------\r\n (tuple or list) [x_min, y_min, x_max, y_max] new bounding box\r\n \"\"\"\r\n x1_new = max(bbox[0], field[0])\r\n y1_new = max(bbox[1], field[1])\r\n x2_new = min(bbox[2], field[2])\r\n y2_new = min(bbox[3], field[3])\r\n\r\n return x1_new, y1_new, x2_new, y2_new\r\n\r\n def _testIn(self, bbox: tuple or list, field: tuple or list):\r\n \"\"\"\r\n :param bbox: (tuple or list) [x_min, y_min, x_max, y_max]\r\n :param field: (tuple or list) [x_min, y_min, x_max, y_max]\r\n :return: (bool)\r\n \"\"\"\r\n borderL, borderR, borderU, borderD = 20, 20, 20, 20\r\n x1_bbox, y1_bbox, x2_bbox, y2_bbox = bbox\r\n x1_field, y1_field, x2_field, y2_field = field\r\n if x1_bbox + borderL >= x1_field and \\\r\n y1_bbox + borderU >= y1_field and \\\r\n x2_bbox - borderR <= x2_field and \\\r\n y2_bbox - borderD <= y2_field:\r\n return True\r\n else:\r\n return False\r\n\r\n def _vsi(self, path_meta: str, id_vsi: int):\r\n \"\"\"\r\n read annotation and responding meta file of a vsi file\r\n :param path_meta: the path of annotation file\r\n :return:\r\n \"\"\"\r\n if not os.path.exists(path_meta):\r\n raise FileNotFoundError(f\"{path_meta} not found.\")\r\n\r\n basename = os.path.basename(path_meta)\r\n meta = parse_vsi_meta(path_meta=path_meta)\r\n vsi = {\r\n \"id\": id_vsi,\r\n \"width\": meta[\"size\"][0],\r\n \"height\": meta[\"size\"][1],\r\n \"case_id\": basename.split(\"-\")[0],\r\n \"file_name\": re.sub(\"_meta.json\", \".vsi\", basename),\r\n }\r\n return vsi\r\n\r\n def _images(self):\r\n pass\r\n\r\n def _info(self):\r\n info = {\r\n \"year\": 2020,\r\n \"version\": \"0.3\",\r\n \"description\": \"This is unstable 0.3 version of the GIST dataset.\",\r\n \"contributor\": \"Yichen Yang, Tao Yuan\",\r\n \"url\": \"\",\r\n \"date_created\": self._get_time_stamp(),\r\n }\r\n return info\r\n\r\n def _license(self):\r\n license = {\r\n \"id\": 0,\r\n \"name\": \"\",\r\n \"url\": \"\",\r\n }\r\n return license\r\n\r\n def _categories(self):\r\n categories = []\r\n category_certain = {\r\n \"id\": 1, # 从 1 开始\r\n \"name\": \"mito\",\r\n \"supercategory\": \"certain\",\r\n }\r\n categories.append(category_certain) if \"certain\" in self.classes_valid else None\r\n category_no = {\r\n \"id\": 2,\r\n \"name\": \"non\",\r\n \"supercategory\": \"certain\",\r\n }\r\n categories.append(category_no) if \"NO\" in self.classes_valid else None\r\n category_uncertain = {\r\n \"id\": 3,\r\n \"name\": \"uncertain\",\r\n \"supercategory\": \"uncertain\",\r\n }\r\n categories.append(category_uncertain) if \"uncertain\" in self.classes_valid else None\r\n\r\n return categories\r\n\r\n def _get_time_stamp(self):\r\n ct = time.time()\r\n local_time = time.localtime(ct)\r\n data_head = time.strftime(\"%Y-%m-%d %H:%M:%S\", local_time)\r\n data_secs = (ct - int(ct)) * 10e5\r\n time_stamp = \"%s.%06d\" % (data_head, data_secs)\r\n return time_stamp\r\n\r\n def write(self, path_out: str, force: bool = True):\r\n if os.path.exists(path_out):\r\n if not force:\r\n raise FileExistsError(f\"{path_out} has existed.\")\r\n json.dump(self._base, open(path_out, \"w\"), indent=2)\r\n\r\n\r\nif __name__ == '__main__':\r\n import bioformats\r\n import javabridge\r\n\r\n javabridge.start_vm(class_path=bioformats.JARS) # I code javabridge here, and maybe it will be proven wrong\r\n\r\n er = Jsoner(\"/media/lansv/passport/GIST/\", split=\"train\",\r\n classes_valid=[\"certain\", \"NO\", \"uncertain\"], write=True)\r\n er.write(\"/mnt/nvme/GIST/annotations/train_add.json\", force=True)\r\n javabridge.kill_vm() # I code javabridge here, and maybe it will be proven wrong TODO\r\n\r\n print(\"End.\")\r\n","sub_path":"datasets/gist/jsoner.py","file_name":"jsoner.py","file_ext":"py","file_size_in_byte":16389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"269313880","text":"import matplotlib\nmatplotlib.use(\"Agg\")\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.losses import mean_squared_error\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom tensorflow.keras.utils import to_categorical\nfrom Classification.Training.architecture.LeNet import LeNet,AlexNet,RandomF\nfrom utils.paths import list_bdfs, list_files\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport random\nimport os\nimport mne\nfrom utils.flt import filters\nfrom utils.xls import crearxls\nfrom utils.c2df import converttoDataFrame, addAnnotation\nimport glob\nimport pandas as pd\nimport pickle\nimport openpyxl\nfrom sklearn.metrics import confusion_matrix, f1_score, roc_curve, precision_score, recall_score, accuracy_score, roc_auc_score\nfrom sklearn import metrics\nfrom mlxtend.plotting import plot_confusion_matrix\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.applications.vgg16 import preprocess_input\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.ensemble import RandomForestClassifier\nimport joblib\nimport seaborn as sns\n\ndef readBdf(path,lowfrec,highpass,canales,name):\n raw = mne.io.read_raw_bdf(path, preload=True)\n raw_selection = raw.crop(tmin=1, tmax=12)\n raw_selection.pick_channels(canales)\n eeg_picks = mne.pick_types(raw_selection.info, eeg=True)\n raw_filtered=filters(raw_selection,eeg_picks,lowfrec,highpass)\n df=converttoDataFrame(raw_filtered, name)\n return df\n\nclass trainingRF(object):\n\tdef __init__(self, path_to_dataset, save_to_dir_model, lbl, bs, epochs, lr, seed, numclasses, splitDataset,numberOfExperiment,lowfrec,highpass,conaug,canales,destino,ordendata,arquitec,optimi,fromModel,modeloh5):\n\t\tself.path_to_dataset = path_to_dataset\n\t\tself.save_to_dir_model = save_to_dir_model\n\t\tself.lbl = lbl\n\t\tself.bs = bs\n\t\tself.epochs = epochs\n\t\tself.lr = lr\n\t\tself.seed = seed\n\t\tself.numclasses = numclasses\n\t\tself.test_size = splitDataset\n\t\tself.numberOfExperiment = numberOfExperiment\n\t\tself.lowfrec = lowfrec\n\t\tself.highpass = highpass\n\t\tself.conaug=conaug\n\t\tself.canales=canales\n\t\tself.destino=destino\n\t\tself.ordendata=ordendata\n\t\tself.arquitec=arquitec\n\t\tself.optimi=optimi\n\t\tself.fromModel=fromModel\n\t\tself.modeloh5=modeloh5\n\n\tdef labeled(self,label):\n\t\treturn self.lbl.get(label)\n\n\tdef train(self):\n\t\tpath_code = os.getcwd()\n\t\tBS = self.bs\n\t\tEPOCHS = self.epochs\n\t\tINIT_LR = self.lr\n\t\tseed = self.seed\n\t\tsplit_test_size = self.test_size\n\t\ttotal_classes = self.numclasses\n\t\tdata = []\n\t\tlabels = []\n\t\tlistaD = []\n\t\tlowfrec = self.lowfrec\n\t\thighpass = self.highpass\n\t\tconaug=self.conaug\n\t\tordendata=self.ordendata\n\t\tdestino=self.destino\n\t\tarquitec=self.arquitec\n\t\toptimi=self.optimi\n\t\tfromModel=self.fromModel\n\t\tmodeloh5=self.modeloh5\n\t\tcanales=self.canales\n\n\t\tdata = pd.read_csv(\"D:\\TraingEEG\\DataSetConstruido\\DataSerCsvMa.csv\",)\n\t\tprint(data.head())\n\t\tdata = np.array(data, dtype=\"float\")\n\t\t(trainX, testX, trainY, testY) = train_test_split(np.delete(data, 10, axis=1), data[:,10], test_size=split_test_size, random_state=seed)\n\n\t\tos.system(\"mkdir Results\\\\results\\\\\"+fromModel+\"\\\\model\\\\\"+destino+\"\\\\img\")\n\t\tmodel = RandomForestClassifier(n_estimators=100,)\n\t\tif(optimi==\"Adam\"):\n\t\t\topt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS)\n\t\tif(optimi==\"RMSprop\"):\n\t\t\topt = RMSprop(lr=INIT_LR, decay=INIT_LR / EPOCHS)\n\n\t\tif(conaug):\n\t\t\tH = model.fit(x=aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS,\tepochs=EPOCHS, verbose=1)\n\t\telse:\n\t\t\tH = model.fit(trainX,trainY)\n\n\t\tos.system(\"mkdir Results\\\\results\\\\\"+destino+\"\\\\img\")\n\t\tos.system(\"mkdir Results\\\\results\\\\\"+destino+\"\\\\model\")\n\t\tos.system(\"mkdir Results\\\\results\\\\\"+destino+\"\\\\excels\")\n\t\tif(arquitec==\"LeNet\"):\n\t\t\tos.system(\"mkdir Results\\\\results\\\\\"+destino+\"\\\\Semana_1_Experimentacion_LeNet\")\n\t\tif(arquitec==\"AlexNet\"):\n\t\t\tos.system(\"mkdir Results\\\\results\\\\\"+destino+\"\\\\Semana_1_Experimentacion_AlexNet\")\n\t\tif(arquitec==\"RandomF\"):\n\t\t\tos.system(\"mkdir Results\\\\results\\\\\"+destino+\"\\\\Semana_1_Experimentacion_RandomF\")\n\n\t\tmodelNamet = 'Results/results/'+destino+'/model/Ex'+str(self.numberOfExperiment)\n\t\tmodelName = modelNamet+self.save_to_dir_model\n\n\t\tjoblib.dump(model, modelNamet+'.pkl')\n\n\t\tnumberClasses = self.numclasses\n\t\tnumberOfExperiment = self.numberOfExperiment\n\t\tparameterOfMoficate = 'epochs'\n\t\tdatasetName = self.path_to_dataset\n\t\tmodelName = 'LeNet'\n\t\tnameFile = str(numberOfExperiment) + '_' + str(modelName) + '_'+ str(parameterOfMoficate)\n\t\texperimentPath = 'Results/results/'+destino+'/Semana_1_Experimentacion_LeNet'\n\n\t\thyperparameter = {\n\t\t\t'id':[numberOfExperiment],\n\t\t\t'name_file' : [nameFile],\n\t\t\t'dataset_name': [datasetName],\n\t\t\t'lowfrec':[lowfrec],\n\t\t\t'highpass':[highpass],\n\t\t\t'aug':[conaug],\n\t\t\t'BS': [BS],\n\t\t\t'EPOCHS': [EPOCHS],\n\t\t\t'INIT_LR': [INIT_LR],\n\t\t\t'Class number': [numberClasses],\n\t\t\t'modelName': [modelName],\n\t\t\t'Seed' : [seed],\n\t\t\t'shear_range': ['None'],\n\t\t\t'orden ingreso Data':[listaD],\n\t\t\t'canales':[canales],\n\t\t\t'destino':[destino]\n\t\t\t}\n\t\tclass_names = ['insatisfecho','satisfecho']\n\n\t\tpredictions = model.predict(testX)\n\t\tprint(accuracy_score(testY, predictions))\n\t\tprint(confusion_matrix(testY, predictions))\n\t\tmatc=confusion_matrix(testY, predictions)\n\t\tprint(\"matc\")\n\t\tplt.style.use(\"ggplot\")\n\t\tplt.figure()\n\t\tplot_confusion_matrix(conf_mat=matc, figsize=(9,9), class_names = class_names, show_normed=False)\n\t\tplt.tight_layout()\n\t\tplt.title(\"Matrix test\")\n\t\tplt.xlabel(\"Predictions\")\n\t\tplt.ylabel(\"Actual\")\n\t\tplt.legend(loc=\"lower left\")\n\t\timgName='Results/results/'+fromModel+'/model/'+destino+'/img/matrixTest'+str(self.numberOfExperiment)\n\t\timgName=imgName+'training_results.png'\n\t\tplt.savefig(imgName)\n\t\tprint(metrics.classification_report(testY,predictions, digits = 4))\n\n\t\tmatc = matc.astype('float') / matc.sum(axis=1)[:, np.newaxis]\n\n\t\tplt.figure(figsize=(16,7))\n\t\tsns.set(font_scale=1.4)\n\t\tsns.heatmap(matc, annot=True, annot_kws={'size':10}, cmap=plt.cm.Greens, linewidths=0.2)\n\n\t\ttick_marks = np.arange(len(class_names))\n\t\ttick_marks2 = tick_marks + 0.5\n\t\tplt.xticks(tick_marks, class_names, rotation=25)\n\t\tplt.yticks(tick_marks2, class_names, rotation=0)\n\t\tplt.xlabel('Predicted label')\n\t\tplt.ylabel('True label')\n\t\tplt.title('Confusion matrix for Random Forest Model')\n\t\timgName='Results/results/'+fromModel+'/model/'+destino+'/img/Confusion_matrix_for_'+arquitec+'_Model'+str(self.numberOfExperiment)\n\t\timgName=imgName+'.png'\n\t\tplt.savefig(imgName)\n\n\t\tprint(\"Fin\")","sub_path":"triangEEG/Classification/Training/trainDataSetRF.py","file_name":"trainDataSetRF.py","file_ext":"py","file_size_in_byte":6641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"219105426","text":"import base64\nimport random\nimport string\n\nfrom django.core.cache import cache\nfrom django.core.mail import send_mail\n\nfrom rest_framework import serializers\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\n\nfrom .models import User\n\n\ndef encode(text):\n enc_bytes = text.encode('ascii')\n base64_bytes = base64.b64encode(enc_bytes)\n base64_enc = base64_bytes.decode('ascii')\n return base64_enc\n\n\ndef decode(text):\n base64_bytes = text.encode('ascii')\n text_bytes = base64.b64decode(base64_bytes)\n decoded_text = text_bytes.decode('ascii')\n return decoded_text\n\n\nclass UserRegistrationSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = User\n fields = ('email',)\n\n def create(self, validated_data):\n email = validated_data['email']\n if (email and User.objects.filter(email=email).exists()):\n raise serializers.ValidationError(\n {'email': 'Email addresses must be unique.'}\n )\n confirmation_code = encode(''.join(random.choice(\n string.ascii_uppercase + string.digits\n ) for _ in range(8)))\n username = email.replace('@', '_').replace('.', '_')\n email = email\n c_c = confirmation_code\n cache.set_many({'u': username, 'e': email, 'c_c': c_c}, timeout=300)\n send_mail(\n 'Ваш код подтверждения',\n confirmation_code,\n 'from@example.com',\n [f'{email}'],\n fail_silently=False,\n )\n return self.data['email']\n\n\nclass MyAuthTokenSerializer(serializers.ModelSerializer):\n email = serializers.EmailField()\n\n class Meta:\n model = User\n fields = ('email', 'confirmation_code')\n\n def validate(self, data):\n send_confirmation_code = data['confirmation_code']\n data = cache.get_many(['u', 'e', 'c_c'])\n if not data:\n raise serializers.ValidationError(\n 'Время подтверждения регистрации истекло'\n )\n username = data['u']\n email = data['e']\n confirmation_code = data['c_c']\n if send_confirmation_code == confirmation_code:\n user = User.objects.create(\n username=username,\n email=email,\n confirmation_code=confirmation_code\n )\n user.save()\n refresh = TokenObtainPairSerializer.get_token(user)\n data['token'] = str(refresh.access_token)\n return data\n\n\nclass UserSerializer(serializers.ModelSerializer):\n class Meta:\n model = User\n fields = (\n 'email',\n 'first_name',\n 'last_name',\n 'username',\n 'bio',\n 'role'\n )\n","sub_path":"api/users/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"368802052","text":"import turtle\nt = turtle.Turtle()\nt.hideturtle()\nt.speed(0)\nscreen = turtle.Screen()\nscreen.tracer(0, 0)\n\n#Score\nleft_score = 0\nright_score = 0\n# Ball\nradius_ball = 10\nballx = 0\nbally = 0\nnormal_speed = 1\ndy = normal_speed\ndx = normal_speed\n# Ball imaginary box\nbox_x = ballx - radius_ball\nbox_y = bally + 2*radius_ball\n# Border\nborder_width = 600\nborder_length = 800\n# Paddle\npaddle_size = 100 \npaddle_width = 10\npaddle_l_x = -border_length/2 \npaddle_l_y = paddle_size/2\npaddle_r_x = border_length/2 - paddle_width\npaddle_r_y = paddle_size/2\n\n\ndef draw_score():\n t.penup()\n t.goto(0, 310)\n t.pendown()\n t.write(\"{}:{}\".format(left_score, right_score), font=(\"Arial\", 16, \"normal\"))\n\n\ndef draw_ball(x,y):\n t.penup()\n t.goto(x,y+radius_ball)\n t.pendown()\n t.circle(radius_ball,360)\n\n\n\ndef border():\n t.penup()\n t.goto(-border_length/2,border_width/2)\n t.seth(0)\n t.pendown()\n t.forward(border_length)\n t.right(90)\n t.forward(border_width)\n t.right(90)\n t.forward(border_length)\n t.right(90)\n t.forward(border_width)\n\n\n\n\n\ndef paddle_left():\n global paddle_l_y\n t.penup()\n t.goto(paddle_l_x, paddle_l_y)\n t.seth(0)\n t.pendown()\n t.forward(paddle_width)\n t.rt(90)\n t.fd(paddle_size)\n t.rt(90)\n t.fd(paddle_width)\n t.rt(90)\n t.fd(paddle_size)\n\n\n \n\ndef paddle_right():\n global paddle_r_y\n t.penup()\n t.goto(paddle_r_x, paddle_r_y)\n t.seth(0)\n t.pendown()\n t.forward(paddle_width)\n t.rt(90)\n t.fd(paddle_size)\n t.rt(90)\n t.fd(paddle_width)\n t.rt(90)\n t.fd(paddle_size)\n\n\n\ndef draw_game():\n t.clear()\n draw_ball(ballx, bally)\n border()\n paddle_left()\n paddle_right()\n draw_score()\n screen.update()\n ball_move()\n wall_collision()\n ai()\n smart_ai()\n screen.ontimer(draw_game, 1)\n\n\ndef ball_move():\n global ballx, bally,box_x,box_y\n ballx = ballx + dx\n bally = bally + dy\n box_x = ballx - radius_ball\n box_y = bally + 2*radius_ball\n\ndef wall_collision():\n global dx, dy, ballx, bally, right_score, left_score\n box_r_x = box_x + 2*radius_ball\n paddle_r_lower_y = paddle_r_y - paddle_size\n\n box_lower_y = box_y - 2*radius_ball\n paddle_l_lower_y = paddle_l_y - paddle_size\n \n # left paddle \n if box_x <= paddle_l_x + paddle_width*2 and (box_lower_y < paddle_l_y and box_y > paddle_l_lower_y):\n dx = dx * -1\n dx = dx * 1.05\n dy = dy * 1.05\n ballx = paddle_l_x + paddle_width + 2*radius_ball + 1\n print(\"L\")\n\n # right paddle\n elif box_r_x - radius_ball >= paddle_r_x and (box_lower_y < paddle_r_y and box_y > paddle_r_lower_y):\n dx = dx * -1\n dx = dx * 1.05\n dy = dy * 1.05\n ballx = paddle_r_x - radius_ball - 1\n print(\"R\")\n\n # left edge - loss for left\n elif box_x - radius_ball <= -border_length/2:\n #dx = dx * -1\n ballx = 0\n bally = 0\n dx = normal_speed\n dy = normal_speed\n right_score += 1\n\n # right - loss for right\n elif box_x + radius_ball >= border_length/2:\n #dx = dx * -1\n ballx = 0\n bally = 0\n dx = normal_speed * -1\n dy = normal_speed * -1\n left_score += 1\n \n\n #top edge \n elif box_y >= border_width/2:\n dy = dy * -1\n bally = border_width/2 - 2*radius_ball - 1\n \n #bottom edge\n elif box_y - 2*radius_ball <= -border_width/2:\n dy = dy * -1\n bally = -border_width/2 + 2*radius_ball + 1\n\n\ndef right_paddle_up():\n global paddle_r_y\n if paddle_r_y < border_width/2:\n paddle_r_y += 50\n \ndef right_paddle_down():\n global paddle_r_y\n if paddle_r_y - paddle_size > -border_width/2:\n paddle_r_y -= 50\n\n\ndef left_paddle_up():\n global paddle_l_y\n if paddle_l_y < border_width/2:\n paddle_l_y += 50\n\ndef left_paddle_down():\n global paddle_l_y\n if paddle_l_y - paddle_size > -border_width/2:\n paddle_l_y -= 50\n\n\ndef ai():\n global paddle_l_y, paddle_r_y\n if bally + paddle_size/2 > border_width/2:\n paddle_l_y = border_width/2\n #paddle_r_y = border_width/2\n elif bally + paddle_size/2 - paddle_size <= -border_width/2:\n paddle_l_y = -border_width/2 + paddle_size\n #paddle_r_y = -border_width/2 + paddle_size\n else:\n paddle_l_y = bally + paddle_size/2\n #paddle_r_y = bally + paddle_size/2\n\n################################################################\n\ndef get_contact_point(ballx, bally, dx, dy):\n y_intercept = 0\n if dy < 0:\n y_intercept = bally + ballx\n if -border_length/2 + y_intercept > -border_width/2 and -border_length/2 + y_intercept < border_width/2:\n return [border_length/2, -border_length/2 + y_intercept]\n else:\n return [-border_width/2 + y_intercept, -border_width/2, dx, -dy]\n else:\n y_intercept = bally - ballx\n if border_length/2 + y_intercept > -border_width/2 and border_length/2 + y_intercept < border_width/2:\n return [border_length/2, border_length/2 + y_intercept]\n else:\n return [border_width/2 - y_intercept, border_width/2, dx, -dy]\n \n \ndef smart_ai():\n global paddle_l_y, paddle_r_y\n if dx > 0:\n contact_point = get_contact_point(ballx, bally, dx, dy)\n #print(contact_point)\n while not contact_point[0] == border_length/2:\n contact_point = get_contact_point(contact_point[0], contact_point[1], contact_point[2], contact_point[3])\n #print(contact_point)\n print(contact_point)\n paddle_r_y = contact_point[1] + paddle_size/2\n print(paddle_r_y)\n################################################################\n\n\n \n\n \ndraw_game()\nscreen.onkey(right_paddle_up, \"Up\")\nscreen.onkey(right_paddle_down, \"Down\")\nscreen.onkey(left_paddle_up, \"w\")\nscreen.onkey(left_paddle_down, \"s\")\nscreen.listen()\nscreen.mainloop()","sub_path":"turtle_examples/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":5640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"327533258","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('app', '0004_auto_20150317_0823'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='empinfo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('email_confirmed', models.BooleanField(default=False)),\n ('provider', models.CharField(default=b'None', max_length=200)),\n ('propicurl', models.CharField(default=b'None', max_length=200)),\n ('link', models.CharField(default=b'None', max_length=200)),\n ('gender', models.CharField(default=b'None', max_length=200)),\n ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, unique=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterField(\n model_name='leave',\n name='employee',\n field=models.ForeignKey(to='app.empinfo'),\n preserve_default=True,\n ),\n ]\n","sub_path":"leavesystem/app/migrations/0005_auto_20150317_0828.py","file_name":"0005_auto_20150317_0828.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"416697888","text":"from ..forms import Muestra\r\nimport random\r\nfrom django.db.models import Count\r\nimport json\r\nfrom django.shortcuts import render\r\nfrom ..Clients import ClientFactory\r\n\r\n\r\n# Auxiliar function to assist the analytics for Sesion\r\n# It gets metrics associated with each class present\r\ndef get_metrics(muestras_general):\r\n\r\n # Obtains the \"Muestra\" objects grouping by \"pred\" and counting it\r\n muestras_pred = muestras_general.values('pred').annotate(\r\n cantidad=Count('pred'))\r\n\r\n # Group by pred_true adding the count of it\r\n muestras_pred_true = muestras_general.values('pred_true').annotate(\r\n cantidad=Count('id'))\r\n\r\n muestras_val = muestras_general.values('pred', 'pred_true')\r\n\r\n # Flatten the results\r\n muestras_p = muestras_pred.values_list('pred', flat=True)\r\n muestras_pt = muestras_pred_true.values_list('pred_true', flat=True)\r\n\r\n # Gets unique values\r\n possible_values = list(muestras_p) + list(muestras_pt)\r\n possible_values = list(dict.fromkeys(possible_values))\r\n\r\n # We have to check if None is a possibility; it makes sense in the model, not in here\r\n if None in possible_values:\r\n possible_values.remove(None)\r\n\r\n metrics_dict = {}\r\n\r\n for value in possible_values:\r\n\r\n TP = muestras_val.filter(pred=value, pred_true=value).count()\r\n FP = muestras_val.filter(pred=value).exclude(pred_true=value).count()\r\n FN = muestras_val.filter(pred_true=value).exclude(pred=value).count()\r\n TN = muestras_val.exclude(pred_true=value).exclude(pred=value).count()\r\n\r\n precission = 0\r\n\r\n if (TP + FP) != 0:\r\n precission = TP / (TP + FP)\r\n\r\n recall = 0\r\n\r\n if (TP + FN) != 0:\r\n recall = TP / (TP + FN)\r\n\r\n specificity = 0\r\n\r\n if (TN + FN) != 0:\r\n specificity = TN / (TN + FN)\r\n\r\n f1_score = 0\r\n\r\n if (precission + recall) != 0:\r\n f1_score = 2 * ((precission * recall) / (precission + recall))\r\n\r\n val_dict = {\r\n 'TP' : TP,\r\n 'FP' : FP,\r\n 'FN' : FN,\r\n 'TN' : TN,\r\n 'precission' : precission,\r\n 'recall' : recall,\r\n 'specificity' : specificity,\r\n 'f1_score' : f1_score\r\n }\r\n\r\n metrics_dict[value] = val_dict\r\n\r\n return metrics_dict, possible_values\r\n\r\ndef analytics_sesion(request):\r\n\r\n def random_color():\r\n\r\n r = random.randint(0, 255)\r\n g = random.randint(0, 255)\r\n b = random.randint(0, 255)\r\n\r\n return 'rgba({}, {}, {}, 255)'.format(r, g, b)\r\n\r\n if request.method == \"GET\" and request.GET.get(\"id_sesion\"):\r\n\r\n id_s = request.GET[\"id_sesion\"]\r\n\r\n muestras_general = Muestra.objects.filter(sesion=id_s)\r\n\r\n muestras_no_val = muestras_general.values('pred_true').filter(pred_true=None).annotate(\r\n cantidad=Count(\"id\")\r\n )\r\n\r\n try:\r\n cantidad_no_val = muestras_no_val.values_list(\"cantidad\", flat=True).get(pred_true=None)\r\n except:\r\n cantidad_no_val = 0\r\n\r\n datos_muestras = muestras_general.values('pred').annotate(\r\n cantidad=Count('pred'),\r\n probabilidad=Count('pred') / Count('id')).order_by('-cantidad')\r\n\r\n data = []\r\n labels = []\r\n colors = []\r\n\r\n for dato in datos_muestras:\r\n data.append(dato['cantidad'])\r\n labels.append(dato['pred'])\r\n colors.append(random_color())\r\n\r\n data_obj = {\r\n\r\n 'datasets' : [{\r\n 'data' : data,\r\n 'backgroundColor' : colors\r\n }],\r\n\r\n 'labels' : labels,\r\n\r\n }\r\n\r\n data_obj = json.dumps(data_obj)\r\n\r\n val_dict, possible_values = get_metrics(muestras_general)\r\n\r\n context = {\r\n 'datos_muestras' : datos_muestras,\r\n 'data' : data_obj,\r\n 'val_dict' : val_dict,\r\n 'classes' : possible_values,\r\n 'cantidad_no_val' : cantidad_no_val\r\n }\r\n\r\n client = ClientFactory.get_client(request)\r\n\r\n return client.show_graficos_sesion(request, context)\r\n","sub_path":"hacht/main/Analytics/Sesion.py","file_name":"Sesion.py","file_ext":"py","file_size_in_byte":4150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"532883194","text":"import matplotlib.pyplot as plt\r\nfrom astropy.wcs import WCS\r\nfrom astropy.io import fits\r\nfrom astropy.visualization import (ZScaleInterval, LinearStretch,\r\n ImageNormalize)\r\nfrom astropy.visualization import wcsaxes\r\n\r\n########################################################################################################################\r\n\r\n# Choose fits file and targets' RA/Dec\r\nfile = 'E:\\\\DSS_M31_B.fits'\r\nRAs = [[00.,37.,26.10],[00.,44.,22.01],[00.,41.,13.71],[00.,39.,53.55],[00.,46.,41.54],[00.,43.,42.69],[00.,39.,53.66]]\r\nDecs = [[40.,15.,52.1],[41.,55.,21.4],[40.,37.,18.7],[40.,28.,27.7],[42.,08.,50.9],[41.,51.,26.9],[40.,25.,52.2]]\r\n\r\n\r\nhdu = fits.open(file)[0]\r\nw = WCS(hdu.header)\r\ndata = hdu.data\r\n\r\ndRAs = []\r\ndDecs = []\r\nfor RA in RAs:\r\n dRA = float(RA[0])*15 + float(RA[1])/4 + float(RA[2])/240\r\n dRAs.append(dRA)\r\nfor Dec in Decs:\r\n dDec = float(Dec[0]) + float(Dec[1])/60 + float(Dec[2])/3600\r\n dDecs.append(dDec)\r\n\r\npix = w.all_world2pix(dRAs,dDecs,1)\r\nxpix = (pix[0])\r\nypix = (pix[1])\r\n\r\n\r\nnorm = ImageNormalize(data, interval=ZScaleInterval(),\r\n stretch=LinearStretch())\r\n\r\n\r\nfig = plt.figure(figsize=(10,8))\r\nax = wcsaxes.WCSAxes(fig=fig, rect=[0.1,0.1,0.8,0.8],wcs=w)\r\nfig.add_axes(ax)\r\nax.imshow(data, cmap=plt.cm.gist_heat, origin='lower', vmin=8500, norm=norm)\r\nax.scatter(xpix, ypix, s=300,\r\n edgecolor='white', facecolor='none',lw=2)\r\nlon = ax.coords[0]\r\nlon.set_major_formatter('hh:mm')\r\nlon.set_axislabel('$\\\\alpha_{J2000}$', fontsize=16)\r\nlon.display_minor_ticks(True)\r\nlat = ax.coords[1]\r\nlat.set_major_formatter('dd:mm')\r\nlat.set_axislabel('$\\\\delta_{J2000}$', fontsize=16)\r\nlat.display_minor_ticks(True)\r\nax.coords.grid(color='yellow',alpha=0.5,linestyle='dashed')\r\nax.set_facecolor('black')\r\n\r\nplt.show()\r\n#plt.savefig('C:\\\\Users\\\\caleb\\\\Documents\\\\MATLAB\\\\ASTR310\\\\Project-2\\\\img\\\\DSS2_M31_B.png', bbox_inches='tight')","sub_path":"Project 2/coords.py","file_name":"coords.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"364839009","text":"import feedparser\nimport re\n\n# Returns the title and dictionary of word counts for a RSS feed\ndef get_word_counts(url):\n # Parse feed\n data = feedparser.parse(url)\n word_counts = {}\n\n # Iterate through all entries and accumulate words\n for entry in data.entries:\n if ('summary' in entry):\n summary = entry.summary\n else:\n summary = entry.description\n\n words = get_words(entry.title + ' ' + summary)\n for word in words:\n word_counts.setdefault(word, 0)\n word_counts[word] += 1\n return data.feed['title'], word_counts\n\ndef get_words(html):\n # Remove all the HTML tags\n txt=re.compile(r'<[^>]+>').sub('',html)\n\n # Split words by all non-alpha characters\n words=re.compile(r'[^A-Z^a-z]+').split(txt)\n\n # Convert to lowercase\n return [word.lower( ) for word in words if word!='']\n\n\napcount={}\nwordcounts={}\nwordlist=[]\n\nfor feedurl in file('./data/feedlist.txt'):\n try:\n title, word_count = get_word_counts(feedurl)\n wordcounts[title] = word_count\n for word,count in word_count.items():\n apcount.setdefault(word, 0)\n if count > 1:\n apcount[word] += 1\n except:\n print(\"Failed to parse %s\" % feedurl)\n\nfor w, bc in apcount.items():\n frac = float(bc)/len(feedlist)\n if frac > 0.1 and frac < 0.5:\n wordlist.append(w)\n\nout = file('blogdata.txt','w')\nout.write('Blog')\n\nfor word in wordlist: \n out.write('\\t%s' % word)\nout.write('\\n')\n\nfor blog,wc in wordcounts.items( ):\n out.write(blog)\n\nfor word in wordlist:\n if word in wc: out.write('\\t%d' % wc[word])\n else: out.write('\\t0')\n out.write('\\n')","sub_path":"Machine Learning/Programming Collective Intelligence/Discovering Groups/generatefeedvector.py","file_name":"generatefeedvector.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"114104990","text":"\"\"\" Contains pyramid layers \"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nfrom . import ConvBlock, Upsample\n\n\nclass PyramidPooling:\n \"\"\" Pyramid Pooling module.\n\n Zhao H. et al. \"`Pyramid Scene Parsing Network `_\"\n\n Parameters\n ----------\n layout : str\n Layout for convolution layers.\n filters : int\n Number of filters in each pyramid branch.\n kernel_size : int\n Kernel size\n pool_op : str\n Pooling operation ('mean' or 'max').\n pyramid : tuple of int\n Number of feature regions in each dimension, default is (0, 1, 2, 3, 6).\n `0` is used to include `inputs` into the output tensor.\n flatten : bool\n If True, then the output is reshaped to a vector of constant size.\n If False, spatial shape of the inputs is preserved.\n name : str\n Layer name that will be used as a scope.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.args, self.kwargs = args, kwargs\n\n def __call__(self, inputs):\n return pyramid_pooling(inputs, *self.args, **self.kwargs)\n\n\ndef pyramid_pooling(inputs, layout='cna', filters=None, kernel_size=1, pool_op='mean', pyramid=(0, 1, 2, 3, 6),\n flatten=False, name='psp', **kwargs):\n \"\"\" Pyramid Pooling module. \"\"\"\n shape = inputs.get_shape().as_list()\n data_format = kwargs.get('data_format', 'channels_last')\n\n static_shape = np.array(shape[1: -1] if data_format == 'channels_last' else shape[2:])\n dynamic_shape = tf.shape(inputs)[1: -1] if data_format == 'channels_last' else tf.shape(inputs)[2:]\n axis = -1 if data_format == 'channels_last' else 1\n num_channels = shape[axis]\n if filters is None:\n filters = num_channels // len(pyramid)\n\n with tf.variable_scope(name):\n layers = []\n for level in pyramid:\n if level == 0:\n x = inputs\n else:\n # Pooling\n if None not in static_shape:\n x = _static_pyramid_pooling(inputs, static_shape, level, pool_op, name='pool-%d' % level)\n upsample_shape = static_shape\n else:\n x = _dynamic_pyramid_pooling(inputs, level, pool_op, num_channels, data_format)\n upsample_shape = dynamic_shape\n\n # Conv block to set number of feature maps\n x = ConvBlock(layout, filters=filters, kernel_size=kernel_size,\n name='conv-%d' % level, **kwargs)(x)\n\n # Output either vector with fixed size or tensor with fixed spatial dimensions\n if flatten:\n x = tf.reshape(x, shape=(-1, level*level*filters),\n name='reshape-%d' % level)\n concat_axis = -1\n else:\n x = Upsample(layout='b', shape=upsample_shape, name='upsample-%d' % level, **kwargs)(x)\n concat_axis = axis\n\n layers.append(x)\n x = tf.concat(layers, axis=concat_axis, name='concat')\n return x\n\ndef _static_pyramid_pooling(inputs, spatial_shape, level, pool_op, **kwargs):\n pool_size = tuple(np.ceil(spatial_shape / level).astype(np.int32).tolist())\n pool_strides = tuple(np.floor((spatial_shape - 1) / level + 1).astype(np.int32).tolist())\n\n output = ConvBlock('p', pool_op=pool_op, pool_size=pool_size, pool_strides=pool_strides, **kwargs)(inputs)\n return output\n\ndef _dynamic_pyramid_pooling(inputs, level, pool_op, num_channels, data_format):\n if data_format == 'channels_last':\n h_axis, w_axis = 1, 2\n else:\n h_axis, w_axis = -2, -1\n\n inputs_shape = tf.shape(inputs)\n h_float = tf.cast(tf.gather(inputs_shape, h_axis), tf.float32)\n w_float = tf.cast(tf.gather(inputs_shape, w_axis), tf.float32)\n\n if pool_op == 'mean':\n pooling_op = tf.reduce_mean\n elif pool_op == 'max':\n pooling_op = tf.reduce_max\n else:\n raise ValueError('Wrong mode')\n\n def calc_pos(idx, level, size):\n \"\"\" Compute floor(idx*size // level) and cast it to tf.int. \"\"\"\n return tf.cast(tf.floor(tf.multiply(tf.divide(idx, level), size)), tf.int32)\n\n result = []\n for row in range(level):\n for col in range(level):\n start_h = calc_pos(row, level, h_float)\n end_h = calc_pos(row+1, level, h_float)\n start_w = calc_pos(col, level, w_float)\n end_w = calc_pos(col+1, level, w_float)\n\n if data_format == 'channels_last':\n pooling_region = inputs[:, start_h:end_h, start_w:end_w, :]\n else:\n pooling_region = inputs[..., start_h:end_h, start_w:end_w]\n\n pool_result = pooling_op(pooling_region, axis=(h_axis, w_axis))\n result.append(pool_result)\n\n output = tf.reshape(tf.stack(result, axis=1), shape=(-1, level, level, num_channels))\n return output\n\n\n\nclass ASPP:\n \"\"\" Atrous Spatial Pyramid Pooling module.\n\n Chen L. et al. \"`Rethinking Atrous Convolution for Semantic Image Segmentation\n `_\"\n\n Parameters\n ----------\n layout : str\n Layout for convolution layers.\n filters : int\n Number of filters in the output tensor.\n kernel_size : int\n Kernel size for dilated branches (default=3).\n rates : tuple of int\n Dilation rates for branches, default=(6, 12, 18).\n image_level_features : int or tuple of int\n Number of image level features in each dimension.\n\n Default is 2, i.e. 2x2=4 pooling features will be calculated for 2d images,\n and 2x2x2=8 features per 3d item.\n\n Tuple allows to define several image level features, e.g (2, 3, 4).\n name : str\n Layer name that will be used as a scope.\n\n See also\n --------\n PyramidPooling\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.args, self.kwargs = args, kwargs\n\n def __call__(self, inputs):\n return aspp(inputs, *self.args, **self.kwargs)\n\n\ndef aspp(inputs, layout='cna', filters=None, kernel_size=3, rates=(6, 12, 18), image_level_features=2,\n name='aspp', **kwargs):\n \"\"\" Atrous Spatial Pyramid Pooling module. \"\"\"\n data_format = kwargs.get('data_format', 'channels_last')\n axis = -1 if data_format == 'channels_last' else 1\n if filters is None:\n filters = inputs.get_shape().as_list()[axis]\n if isinstance(image_level_features, int):\n image_level_features = (image_level_features,)\n\n with tf.variable_scope(name):\n x = ConvBlock(layout, filters=filters, kernel_size=1, name='conv-1x1', **kwargs)(inputs)\n layers = [x]\n\n for level in rates:\n x = ConvBlock(layout, filters=filters, kernel_size=kernel_size, dilation_rate=level,\n name='conv-%d' % level, **kwargs)(inputs)\n layers.append(x)\n\n x = pyramid_pooling(inputs, filters=filters, pyramid=image_level_features,\n name='image_level_features', **kwargs)\n layers.append(x)\n\n x = tf.concat(layers, axis=axis, name='concat')\n x = ConvBlock(layout, filters=filters, kernel_size=1, name='last_conv', **kwargs)(x)\n return x\n","sub_path":"batchflow/batchflow/models/tf/layers/pyramid.py","file_name":"pyramid.py","file_ext":"py","file_size_in_byte":7244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"365695895","text":"\"\"\"\nNOTE: Credit goes out to the doctop team for this providing this amazing\nexample.\nThis file was derived from the example\nhttps://github.com/docopt/docopt/blob/master/examples/interactive_example.py\nThis example uses docopt with the built in cmd module to implement an\ninteractive primfeed commandline application.\n\nUsage:\n primefeed view_feed\n primefeed post <body>\n primefeed comment <post_id> <title> <body>\n primefeed (-i | --interactive)\n primefeed (-h | --help | --version)\n\nOptions:\n -i, --interactive Interactive Mode\n -h, --help Show this screen and exit.\n\"\"\"\n\nimport cmd\nimport sys\n\nfrom docopt import DocoptExit, docopt\n\n# Import the file with our Api class\nfrom app.primefeed import Primefeed\n\n\ndef docopt_cmd(func):\n \"\"\"\n This decorator is used to simplify the try/except block and pass the result\n of the docopt parsing to the called action.\n \"\"\"\n\n def fn(self, arg):\n try:\n opt = docopt(fn.__doc__, arg)\n\n except DocoptExit as e:\n # The DocoptExit is thrown when the args do not match.\n # We print a message to the user and the usage block.\n\n print('Invalid Command!')\n print(e)\n return\n\n except SystemExit:\n # The SystemExit exception prints the usage for --help\n # We do not need to do the print here.\n\n return\n\n return func(self, opt)\n\n fn.__name__ = func.__name__\n fn.__doc__ = func.__doc__\n fn.__dict__.update(func.__dict__)\n return fn\n\n\nclass PrimeFeedInteractive (cmd.Cmd):\n intro = 'Welcome to the primitive social app!' \\\n + ' (type help for a list of commands.)'\n prompt = '(primitive_cli) '\n file = None\n\n def __init__(self):\n # Instantiate an instance of our Primefeed class\n self.app = Primefeed()\n\n super(PrimeFeedInteractive, self).__init__()\n\n @docopt_cmd\n def do_view_feed(self, arg):\n \"\"\"Usage: view_feed\"\"\"\n # view feed code goes here\n\n # Prints the arguments passed for fun, remove when you are done\n print(arg)\n\n @docopt_cmd\n def do_comment(self, arg):\n \"\"\"Usage: comment <post_id> <title> <body>\"\"\"\n # comment code goes here\n\n # Prints the arguments passed for fun, remove when you are done\n print(arg)\n\n @docopt_cmd\n def do_post(self, arg):\n \"\"\"Usage: post <title> <body>\"\"\"\n # post code goes here\n\n # Prints the arguments passed for fun, remove when you are done\n print(arg)\n\n def do_quit(self, arg):\n \"\"\"Quits out of Interactive Mode.\"\"\"\n\n print('Good Bye!')\n exit()\n\n\nopt = docopt(__doc__, sys.argv[1:])\n\nif opt['--interactive']:\n PrimeFeedInteractive().cmdloop()\n\nprint(opt)\n","sub_path":"primefeed.py","file_name":"primefeed.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"266017615","text":"# -*- coding: utf-8 -*-\n\"\"\"\n yinsho.services.iddaService\n #####################\n\n yinsho IddaService module\n\"\"\"\nimport datetime\nfrom flask import json, g\nfrom sqlalchemy import and_, func,text\nfrom sqlalchemy.orm import joinedload_all,joinedload\n\nfrom ..base import utils\nfrom ..model import idda \n \n\nclass IddaService():\n \"\"\" iddaService \"\"\"\n \"参数类型的保存,所有项都是必填,插入前检查key是否已存在\"\n def idda_save(self,**kwargs):\n self.save_list = ['instition_number','business_type','now_state','adjust_time_type','adjust_time','adjust_value']\n newdata = kwargs.get('newdata')\n data ={}\n for field in self.save_list:\n value = newdata.get(field)\n if value:\n data[field] = value\n else:\n return u\"%s 需要填写\" % field\n g.db_session.add(idda(**data))\n return u\"添加成功\" \n\n def type_update(self, **kwargs):\n self.save_list = ['instition_number','now_state','business_type','adjust_time_type','adjust_time','adjust_value']\n newdata = kwargs.get('newdata')\n data ={}\n tid = newdata.get('id')\n if not tid:return u\"无更新主键\"\n for k,v in newdata.items():\n if k in self.save_list : data[k] = v\n g.db_session.query(idda).filter(idda.id==tid).update(data)\n return u\"修改成功\"\n \n \n def type_delete(self,**kwargs):\n g.db_session.query(idda).filter(idda.id==kwargs.get('delete_id')).delete()\n return u\"删除成功\"\n","sub_path":"src_20170503/src/web/server/fabs/services/idda.py","file_name":"idda.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"93298686","text":"from flask_login import login_required, current_user\nfrom flask_restful import reqparse, Resource\nfrom flask import redirect, abort, flash, request, url_for\nfrom models.vets import Vets\n\n\nclass VetsAPI(Resource):\n method_decorators = [login_required]\n\n def __init__(self):\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('name', type=str)\n self.parser.add_argument('age', type=str)\n self.parser.add_argument('address', type=str)\n self.parser.add_argument('profession', type=str)\n self.parser.add_argument('gender', type=str)\n\n def post(self, id):\n method = reqparse.RequestParser()\n method.add_argument('_method', type=str)\n method = method.parse_args()\n if method['_method'] == \"Delete\":\n if VetsAPI.delete(self, id):\n flash(\"Success, Vet is deleted!\")\n else:\n flash(\"Fail, Vet is not deleted!\")\n next_page = request.args.get(\"next\", url_for(\"home_page\"))\n return redirect(next_page)\n elif method['_method'] == \"Update\":\n if VetsAPI.put(self, id):\n flash(\"Success, Vet is updated!\")\n else:\n flash(\"Fail, Vet is not updated!\")\n next_page = request.args.get(\"next\", url_for(\"home_page\"))\n return redirect(next_page)\n else:\n abort(405)\n\n def get(self, id):\n u = Vets.get(id=id)\n if u:\n vet = u.__dict__\n return vet\n return {}, 404\n\n def put(self, id):\n if current_user.is_admin:\n args = self.parser.parse_args()\n u = Vets.get(id=id)\n if u and args:\n u.update(**args)\n return u.__dict__\n else:\n abort(403)\n return {}, 404\n\n def delete(self, id):\n if current_user.is_admin:\n u = Vets.get(id=id)\n if u:\n r = u.__dict__\n u.delete()\n return r, 200\n else:\n abort(403)\n\n\nclass VetsListAPI(Resource):\n method_decorators = [login_required]\n\n def __init__(self):\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('name', type=str)\n self.parser.add_argument('age', type=str)\n self.parser.add_argument('address', type=str)\n self.parser.add_argument('gender', type=str)\n self.parser.add_argument('profession', type=str)\n\n def get(self):\n if current_user.is_admin:\n qs = Vets.filter()\n if qs:\n r = [u.__dict__ for u in qs]\n return r\n else:\n abort(403)\n abort(404)\n\n def post(self):\n args = self.parser.parse_args()\n if args:\n if Vets.create(**args):\n vet = Vets.filter(**args).__getitem__(0)\n flash('You were successfully created Vet!')\n next_page = request.args.get(\"next\", url_for(\"home_page\"))\n return redirect(next_page)\n else:\n flash('You were failed to create Vet!')\n next_page = request.args.get(\"next\", url_for(\"home_page\"))\n return redirect(next_page)\n return {}, 404\n","sub_path":"views/vets.py","file_name":"vets.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"354807544","text":"'''\nCreated on 2019/07/07\n\n@author: shunsuke\n'''\n\n# sort の使い方について\nCOLOR_A = {\n \"no\": 5,\n \"kind\": 1,\n \"name\": \"aliceblue\",\n}\nCOLOR_B = {\n \"no\": 4,\n \"kind\": 1,\n \"name\": \"Black\",\n}\nCOLOR_C = {\n \"no\": 3,\n \"kind\": 2,\n \"name\": \"coral\",\n}\nCOLOR_D = {\n \"no\": 2,\n \"kind\": 2,\n \"name\": \"Deepskyblue\",\n}\nCOLOR_F = {\n \"no\": 1,\n \"kind\": 2,\n \"name\": \"forestgreen\",\n}\n\nsample_list = [\n COLOR_A,\n COLOR_B,\n COLOR_C,\n COLOR_D,\n COLOR_F\n]\n\n# 1. 番号の順番で並び替える\nno_index = [5, 2, 3, 4, 1]\n\nprint(\n sorted(\n sample_list,\n key=lambda data: no_index.index(data[\"no\"])\n )\n)\n\n# 2. 文字列の順番で並び変える\nname_index = [\n COLOR_B[\"name\"], COLOR_D[\"name\"], COLOR_F[\"name\"],\n COLOR_C[\"name\"], COLOR_A[\"name\"]\n]\n\nprint(\n sorted(\n sample_list,\n key=lambda data: name_index.index(data[\"name\"])\n )\n)\n\n# 3. 種類と文字列(大文字小文字区別なし辞書)の順番で並び替える\nprint(\n sorted(\n sample_list,\n key=lambda data: [no_index.index(data[\"kind\"]),\n str.lower(data[\"name\"])]\n )\n)\n\n","sub_path":"apps/sort_sample.py","file_name":"sort_sample.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"168216184","text":"import numpy as np\nfrom algorithm.sigmoid import sigmoid\n\nfrom cost_function.abstract import AbstractCostFunction\nfrom sparsity.kullback_leibler_divergence import KullbackLeiblerDivergence\n\n__author__ = 'cenk'\n\n\nclass SumOfSquareCost(AbstractCostFunction):\n def __init__(self, encoder, activation_function=sigmoid, sparsity_function=KullbackLeiblerDivergence):\n self._input_unit_size = encoder._input_unit_size\n self._hidden_unit_size = encoder._hidden_unit_size\n self._rho = encoder._rho\n self._lamda = encoder._lamda\n self._beta = encoder._beta\n limits = encoder._limits\n\n self._W1 = encoder.theta[limits[0]:limits[1]].reshape(self._hidden_unit_size, self._input_unit_size)\n self._W2 = encoder.theta[limits[1]:limits[2]].reshape(self._input_unit_size, self._hidden_unit_size)\n\n self._B1 = encoder.theta[limits[2]:limits[3]].reshape(self._hidden_unit_size, 1)\n self._B2 = encoder.theta[limits[3]:limits[4]].reshape(self._input_unit_size, 1)\n\n self._actication_function = activation_function\n self._sparsity_function = sparsity_function\n self.theta = encoder.theta\n\n def calculate(self, theta, input_data):\n print('Calculating Sum Of Squares')\n hidden_layer = self._actication_function(np.dot(self._W1, input_data) + self._B1)\n output_layer = np.dot(self._W2, hidden_layer) + self._B2\n\n rho_cap = np.sum(hidden_layer, axis=1) / input_data.shape[1]\n diff = output_layer - input_data\n error_value = 0.5 * np.sum(np.multiply(diff, diff)) / input_data.shape[1]\n weight_decay = 0.5 * self._lamda * (\n np.sum(np.multiply(self._W1, self._W1)) + np.sum(np.multiply(self._W2, self._W2)))\n\n sparsity_value = self._sparsity_function.calculate(beta=self._beta, rho=self._rho, rho_cap=rho_cap)\n cost = error_value + weight_decay + sparsity_value\n\n KL_div_grad = self._beta * (-(self._rho / rho_cap) + ((1 - self._rho) / (1 - rho_cap)))\n del_out = diff\n del_hid = np.multiply(np.dot(np.transpose(self._W2), del_out) + np.transpose(np.matrix(KL_div_grad)),\n np.multiply(hidden_layer, 1 - hidden_layer))\n\n \"\"\" Compute the gradient values by averaging partial derivatives\n Partial derivatives are averaged over all training examples \"\"\"\n\n W1_grad = np.dot(del_hid, np.transpose(input_data))\n W2_grad = np.dot(del_out, np.transpose(hidden_layer))\n b1_grad = np.sum(del_hid, axis=1)\n b2_grad = np.sum(del_out, axis=1)\n\n W1_grad = W1_grad / input_data.shape[1] + self._lamda * self._W1\n W2_grad = W2_grad / input_data.shape[1] + self._lamda * self._W2\n b1_grad = b1_grad / input_data.shape[1]\n b2_grad = b2_grad / input_data.shape[1]\n\n \"\"\" Transform numpy matrices into arrays \"\"\"\n\n W1_grad = np.array(W1_grad)\n W2_grad = np.array(W2_grad)\n b1_grad = np.array(b1_grad)\n b2_grad = np.array(b2_grad)\n\n \"\"\" Unroll the gradient values and return as 'theta' gradient \"\"\"\n\n theta_grad = np.concatenate((W1_grad.flatten(), W2_grad.flatten(),\n b1_grad.flatten(), b2_grad.flatten()))\n\n return [cost, theta_grad]\n","sub_path":"cost_function/sum_of_squares.py","file_name":"sum_of_squares.py","file_ext":"py","file_size_in_byte":3270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"231249931","text":"# model settings\nmodel = dict(\n type='FastRCNN',\n pretrained='/home/ubuntu/CUAI_2021/Advanced_Won_Cho/code/Swin-Transformer-Object-Detection/work_dirs/swin+cascade/swin_base_patch4_window7_224_22k.pth',\n backbone=dict(\n type='SwinTransformer',\n embed_dim=96,\n depths=[2, 2, 6, 2],\n num_heads=[3, 6, 12, 24],\n window_size=7,\n mlp_ratio=4.,\n qkv_bias=True,\n qk_scale=None,\n drop_rate=0.,\n attn_drop_rate=0.,\n drop_path_rate=0.2,\n ape=False,\n patch_norm=True,\n out_indices=(0, 1, 2, 3),\n use_checkpoint=False),\n neck=dict(\n type='FPN',\n in_channels=[256, 512, 1024, 2048],\n out_channels=256,\n num_outs=5),\n roi_head=dict(\n type='StandardRoIHead',\n bbox_roi_extractor=dict(\n type='SingleRoIExtractor',\n roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),\n out_channels=256,\n featmap_strides=[4, 8, 16, 32]),\n bbox_head=dict(\n type='Shared2FCBBoxHead',\n in_channels=256,\n fc_out_channels=1024,\n roi_feat_size=7,\n num_classes=11,\n bbox_coder=dict(\n type='DeltaXYWHBBoxCoder',\n target_means=[0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n reg_class_agnostic=False,\n loss_cls=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),\n loss_bbox=dict(type='DIoULoss', loss_weight=1.0))),\n # model training and testing settings\n train_cfg=dict(\n rcnn=dict(\n assigner=dict(\n type='MaxIoUAssigner',\n pos_iou_thr=0.5,\n neg_iou_thr=0.5,\n min_pos_iou=0.5,\n match_low_quality=False,\n ignore_iof_thr=-1),\n sampler=dict(\n type='RandomSampler',\n num=512,\n pos_fraction=0.25,\n neg_pos_ub=-1,\n add_gt_as_proposals=True),\n pos_weight=-1,\n debug=False)),\n test_cfg=dict(\n rcnn=dict(\n score_thr=0.05,\n nms=dict(type='soft_nms', iou_threshold=0.5),\n max_per_img=100)))\n","sub_path":"jo-member_works_det/configs/_base_/models/fast_rcnn_r50_fpn.py","file_name":"fast_rcnn_r50_fpn.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"461950833","text":"from django.conf import settings\nfrom django.db import models\n\n\nclass TableColumn(models.Model):\n\n name = models.CharField(max_length=200, verbose_name='Наименование')\n width = models.SmallIntegerField(verbose_name='Ширина')\n num = models.SmallIntegerField(verbose_name='Порядковый номер')\n\n class Meta:\n verbose_name = 'Колонка таблицы'\n verbose_name_plural = 'Колонки таблицы'\n ordering = ('num',)\n\n def __str__(self):\n return f\"{self.name} ({self.num})\"\n\n\nclass CSVFilePath(models.Model):\n\n filepath = models.FileField()\n\n class Meta:\n verbose_name = 'csv-файл'\n verbose_name_plural = 'csv-файлы'\n\n @classmethod\n def get_path(cls):\n instance = CSVFilePath.objects.first()\n if instance:\n return instance.filepath.name\n else:\n return None\n\n @classmethod\n def set_path(cls, filepath:str):\n instance = CSVFilePath.objects.first()\n if instance:\n instance.filepath = filepath\n instance.save()\n else:\n CSVFilePath.objects.create(filepath=filepath)\n\n def __str__(self):\n return self.filepath.name\n\n","sub_path":"table/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"376710288","text":"import cluster.pbsjob as job\n\nclass resmigjob(job.pbsjob):\n \"\"\" Keeps track of jobs for performing residual migration \"\"\"\n\n def __init__(self,pars,velname,refname,idx,jobname,parpath=\".\",logpath=\".\",user='joseph29',verb=False):\n # Inherit from job class\n super(resmigjob,self).__init__(logpath,user,verb)\n # Create the par file for this job\n self.pfname = parpath + '/' + jobname + self.jobid + '.par'\n self.write_resmigpar(self.pfname,pars,velname,refname,idx)\n # Keep names and idx for this job\n self.velname = velname; self.refname = refname\n self.idx = idx\n\n def write_resmigpar(self,name,pars,velname,refname,idx):\n \"\"\"Writes a par file for residual migration fault training data \"\"\"\n # Build the par file\n parout=\"\"\"[defaults]\n# IO\nvelin=%s\nrefin=%s\noutdir=%s\ndpath=%s\nptbpf=%s\nimgpf=%s\nvelidx=%d\n# Other\nnthreads=%d\nverb=%s\n\"\"\"%(velname, refname, pars.outdir, pars.datapath, pars.ptbpf, pars.imgpf, idx, #IO\n pars.nprocs,pars.verb) # Other\n # Write the par file\n with open(name,'w') as f:\n f.write(parout)\n\n return\n\n","sub_path":"cluster/resmigflt.py","file_name":"resmigflt.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"411623870","text":"import torch as t\nimport torch.nn as nn\n\nclass CVAE(nn.Module):\n def __init__(self,args):\n super().__init__()\n self.args=args\n\n # reduce dimension\n self.encoder=nn.Linear(self.args.input_dim,self.args.hidden_dim)\n\n # every class has a special mean value objective\n # input should be one-hot\n self.condition=nn.Linear(self.args.num_classes,self.args.latent_dim)\n\n # generate \\mu\n self.mean_generator=nn.Linear(self.args.hidden_dim,self.args.latent_dim)\n # generate log \\sigma ^ 2\n self.log_var_generator=nn.Linear(self.args.hidden_dim,self.args.latent_dim)\n\n self.decoder=nn.Sequential(\n nn.Linear(self.args.latent_dim,self.args.hidden_dim),\n nn.ReLU(),\n nn.Linear(self.args.hidden_dim,self.args.input_dim),\n nn.Sigmoid()\n )\n\n self.bce=nn.BCELoss(reduction='none')\n\n # batch: batch_size * input_dim\n # label: batch_size * num_classes\n def forward(self,batch,label):\n hidden=t.relu(self.encoder(batch))\n condition_mean=self.condition(label)\n # \\mu\n mean=self.mean_generator(hidden)\n # log \\sigma ^ 2\n log_var=self.log_var_generator(hidden)\n # \\epsilon\n Z=t.randn(batch.shape[0],self.args.latent_dim).cuda()\n # \\mu + \\epsilon * \\sigma\n Z=mean+Z*t.exp(log_var/2)\n output=self.decoder(Z)\n\n reconstruction_loss=t.sum(self.bce(output,batch),dim=-1)\n\n kl_loss=t.sum(0.5*(-log_var+(mean-condition_mean)*(mean-condition_mean)+t.exp(log_var)-1),dim=-1)\n\n cvae_loss=t.mean(reconstruction_loss+kl_loss)\n\n return output,cvae_loss\n\n def sample(self,num):\n # sample\n return t.randn(num,self.args.latent_dim)\n\n\n\n","sub_path":"model/CVAE.py","file_name":"CVAE.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"620651477","text":"config = {\n 'datasets': {\n 'ucf101': {\n 'nb_classes': 101,\n },\n 'synthetic_boxes': {\n 'nb_classes': 5,\n 'parameters': [\n 'linear_move',\n 'jitter_move',\n 'random_move',\n 'random_angle',\n 'random_angle_per_frame',\n 'background_shapes',\n 'random_background_color',\n 'random_bg_per_frame',\n 'random_foreground_color',\n 'random_fg_per_frame',\n ],\n },\n },\n}\n","sub_path":"datasets/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"506840773","text":"import os\nfrom os import scandir\ndef has_subdir(path):\n \"\"\"directory names not starting with '.' under given path.\"\"\"\n y=True\n for entry in scandir(path):\n if not((not entry.name.startswith('.') and entry.is_dir()) or \".html\" in entry.name):\n y=False\n if y:\n \treturn True\n return False\n\n#s=\"ufraw-batch --out-type=jpeg --out-path=. ./*.NEF\"\nimport sys\npath = sys.argv[1]\n\ndef apply_recursive(path):\n if has_subdir(path):\n for sub_dir in scandir(path):\n if not \".html\" in sub_dir.name:\n apply_recursive(path+'/'+sub_dir.name)\n # break\n else:\n p=path.replace(' ','\\ ')\n for f in scandir(path):\n # if not \"_bg_\" in f.name:\n # os.system(\"rm \"+p+\"/\"+f.name)\n if \".NEF\" in f.name:\n os.system(\"ufraw-batch --out-type=jpeg --out-path=\"+p+\" \"+p+\"/\"+f.name)\n os.system('rm '+p+\"/\"+f.name)\n os.system(\"python bg_removal.py \"+p+\"/\"+f.name.split('.')[0]+'.jpg')\n print(p+\"/\"+f.name.split('.')[0]+'.jpg')\n os.system('rm '+p+'/*.NEF')\n\n\napply_recursive(path)","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"187885391","text":"# Copyright 2017 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ------------------------------------------------------------------------------\nimport argparse\nfrom math import floor, log\n\nfrom sawtooth_cli.network_command.parent_parsers import base_multinode_parser\nfrom sawtooth_cli.network_command.parent_parsers import split_comma_append_args\nfrom sawtooth_cli.network_command.parent_parsers import make_rest_apis\nfrom sawtooth_cli.network_command.fork_graph import ForkGraph\nfrom sawtooth_cli.network_command.fork_graph import SimpleBlock\n\n\ndef add_compare_chains_parser(subparsers, parent_parser):\n \"\"\"Creates the arg parsers needed for the compare command.\n \"\"\"\n parser = subparsers.add_parser(\n 'compare-chains',\n help='Compare chains from different nodes.',\n description=(\n 'Compute and display information about how the chains at '\n 'different nodes differ.'\n ),\n formatter_class=argparse.RawDescriptionHelpFormatter,\n epilog='''\nBy default, prints a table of summary data and a table of per-node data with\nthe following fields. Pass --tree for a fork graph.\n\nCOMMON ANCESTOR\n The most recent block that all chains have in common.\n\nCOMMON HEIGHT\n Let min_height := the minimum height of any chain across all nodes passed\n in. COMMON HEIGHT = min_height.\n\nHEAD\n The block id of the most recent block on a given chain.\n\nHEIGHT\n The block number of the most recent block on a given chain.\n\nLAG\n Let max_height := the maximum height of any chain across all nodes passed\n in. LAG = max_height - HEIGHT for a given chain.\n\nDIVERG\n Let common_ancestor_height := the height of the COMMON ANCESTOR.\n DIVERG = HEIGHT - common_ancestor_height\n\n''',\n parents=[parent_parser, base_multinode_parser()])\n\n parser.add_argument(\n '--table',\n action='store_true',\n help='Print out a fork table for all nodes since the common ancestor.')\n\n parser.add_argument(\n '--tree',\n action='store_true',\n help='Print out a fork tree for all nodes since the common ancestor.')\n\n\ndef do_compare_chains(args):\n \"\"\"Calculates and outputs comparison between all nodes on the network.\"\"\"\n urls = split_comma_append_args(args.urls)\n users = split_comma_append_args(args.users)\n clients = make_rest_apis(urls, users)\n chains = get_chain_generators(clients)\n\n tails = get_tails(chains)\n graph = build_fork_graph(chains, tails)\n\n if args.table:\n print_table(graph, tails)\n\n elif args.tree:\n print_tree(graph, tails)\n\n else:\n print_summary(graph, tails)\n\n\ndef get_chain_generators(clients):\n # Convert the block dictionaries to simpler python data structures to\n # conserve memory and simplify interactions.\n return [\n map(SimpleBlock.from_block_dict, c.list_blocks(limit=3))\n for c in clients\n ]\n\n\ndef print_summary(graph, tails):\n \"\"\"Print out summary and per-node comparison data.\"\"\"\n # Get comparison data\n heads = get_heads(tails)\n heights = get_heights(tails)\n max_height = max(heights)\n common_height, block_ids_at_common_height = get_common_height(tails)\n lags = get_lags(heights, max_height)\n common_ancestor = graph.root\n divergences = get_divergences(heights, graph.root)\n\n # Print summary info\n col_1 = 8\n col_n = 8\n format_str = '{:<' + str(col_1) + '} ' + ('{:<' + str(col_n) + '} ') * 2\n header = format_str.format(\"COMMON\", \"HEIGHT\", \"BLOCKS\")\n print(header)\n print(\"-\" * len(header))\n print(format_str.format(\n \"ANCESTOR\", common_ancestor.num, common_ancestor.ident[:col_n]))\n print(format_str.format(\n \"HEIGHT\", common_height, str(block_ids_at_common_height)))\n print()\n\n # Print per-node data\n col_1 = 6\n col_n = 8\n format_str = \\\n '{:<' + str(col_1) + '} ' + ('{:<' + str(col_n) + '} ') * len(tails)\n header = format_str.format(\"NODE\", *list(range(len(tails))))\n print(header)\n print('-' * len(header))\n\n print(format_str.format(\"HEAD\", *heads))\n print(format_str.format(\"HEIGHT\", *heights))\n print(format_str.format(\"LAG\", *lags))\n print(format_str.format(\"DIVERG\", *divergences))\n print()\n\n\ndef print_table(graph, tails):\n \"\"\"Print out a table of nodes and the blocks they have at each block height\n starting with the common ancestor.\"\"\"\n node_count = len(tails)\n\n # Get the width of the table columns\n num_col_width = max(\n floor(log(max(get_heights(tails)), 10)) + 1,\n len(\"NUM\"))\n node_col_width = max(\n floor(log(node_count, 10)) + 1,\n 8)\n\n # Construct the output format string\n format_str = ''\n format_str += '{:<' + str(num_col_width) + '} '\n for _ in range(node_count):\n format_str += '{:<' + str(node_col_width) + '} '\n\n nodes_header = [\"NODE \" + str(i) for i in range(node_count)]\n header = format_str.format(\"NUM\", *nodes_header)\n print(header)\n print('-' * len(header))\n\n prev_block_num = -1\n node_list = [''] * node_count\n for block_num, _, siblings in graph.walk():\n if block_num != prev_block_num:\n # Need to skip the first one\n if prev_block_num != -1:\n print(format_str.format(prev_block_num, *node_list))\n\n node_list.clear()\n node_list.extend([''] * node_count)\n prev_block_num = block_num\n\n for block_id, node_ids in siblings.items():\n for node_id in node_ids:\n node_list[node_id] = block_id[:8]\n\n # Print the last one\n print(format_str.format(prev_block_num, *node_list))\n\n\ndef print_tree(graph, tails):\n \"\"\"Print out a tree of blocks starting from the common ancestor.\"\"\"\n num_col_width = max(\n floor(log(max(get_heights(tails)), 10)) + 1,\n len(\"NUM\"))\n col_n = 8\n\n format_str = (\n '{:<' + str(num_col_width) + '} '\n + ('{:<' + str(col_n) + '} ') * 2 + '{}'\n )\n\n header = format_str.format(\"NUM\", \"PARENT\", \"BLOCK\", \"NODES\")\n print(header)\n print('-' * len(header))\n walker = graph.walk()\n\n next_block_num, parent, siblings = next(walker)\n cliques = {}\n while True:\n block_num = next_block_num\n\n try:\n while block_num == next_block_num:\n cliques[parent] = siblings\n next_block_num, parent, siblings = next(walker)\n except StopIteration:\n break\n\n print_cliques_at_height(block_num, cliques, format_str)\n\n cliques = {}\n\n print_cliques_at_height(block_num, cliques, format_str)\n\n\ndef print_cliques_at_height(block_num, cliques, format_str):\n print(format_str.format(block_num, '', '', ''))\n for parent, siblings in cliques.items():\n print(format_str.format('', parent[:8], '', ''))\n for block_id, nodes in siblings.items():\n print(format_str.format(\n '', '', block_id[:8], format_siblings(nodes)))\n print()\n\n\ndef format_siblings(nodes):\n return \"{\" + \", \".join(str(n) for n in nodes) + \"}\"\n\n\ndef get_heads(tails):\n return [tail[-1].ident[:8] for tail in tails]\n\n\ndef get_heights(tails):\n return [tail[-1].num for tail in tails]\n\n\ndef get_common_height(tails):\n block_ids = set(tail[0].ident[:8] for tail in tails)\n return tails[0][0].num, block_ids\n\n\ndef get_lags(heights, max_height):\n return [max_height - height for height in heights]\n\n\ndef get_divergences(heights, root):\n return [height - root.num for height in heights]\n\n\ndef get_tails(chains):\n \"\"\"\n Args:\n An ordered collection of block generators.\n\n Returns a list of blocks for all chains where:\n 1. The first block in all the lists has the same block number\n 2. Each list has all blocks from the common block to the current block\n in increasing order\n \"\"\"\n\n def get_num_of_oldest(blocks):\n return blocks[0].num\n\n # Get the first block from every chain\n tails = [[next(chain)] for chain in chains]\n\n # Find the minimum block number between all chains\n min_block_num = min(map(get_num_of_oldest, tails))\n\n # Walk all chains back to the minimum block number, adding blocks to the\n # chain lists as we go\n for i, chain in enumerate(chains):\n tail = tails[i]\n while get_num_of_oldest(tail) > min_block_num:\n tail.insert(0, next(chain))\n\n return tails\n\n\ndef _compare_across(collections, key):\n \"\"\"Return whether all the collections return equal values when called with\n `key`.\"\"\"\n if len(collections) < 2:\n return True\n c0 = key(collections[0])\n return all(c0 == key(c) for c in collections[1:])\n\n\ndef build_fork_graph(chains, tails):\n \"\"\"\n Args:\n An ordered collection of block generators which have been consumed to\n the point where they are all at the same block height and the tails of\n the chains from that block height (in the same order).\n\n Returns a ForkGraph.\n \"\"\"\n graph = ForkGraph()\n\n # Add tails to the graph first\n for i, tail in enumerate(tails):\n for block in reversed(tail):\n graph.add_block(i, block)\n\n # If we are already at the common ancestor, stop\n if _compare_across(\n [tail[0] for tail in tails], key=lambda block: block.ident\n ):\n return graph\n\n # Chains should now all be at the same height, so we can walk back\n # to common ancestor\n while True:\n heads = [next(chain) for chain in chains]\n for i, block in enumerate(heads):\n graph.add_block(i, block)\n if _compare_across(heads, key=lambda block: block.ident):\n break\n\n return graph\n","sub_path":"cli/sawtooth_cli/network_command/compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":10175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"484642090","text":"from django.shortcuts import render\nfrom .reading import *\nimport Login\nimport time\nimport json\n\ndef reading(request):\n if request.method == 'POST':\n if len(request.POST.get('foreign')) >= 5000:\n ctx = {'title':'ML(MyLang) Reading', 'error':'1'}\n return render(request, 'reading.html', ctx)\n score = read(request.POST.get('foreign'), request.POST.get('kor'), request.POST.get('category'))\n if score[0] == u'-':\n score = u'0%'\n user = Login.get_current_user(request)\n if user == -1:\n return render(request, \"login_please.html\")\n reading = [[score, time.time()]] + json.loads(str(user.readding_level))\n user.readding_level = json.dumps(reading)\n user.save()\n ctx = {'score': score, 'fore': request.POST.get('foreign'), 'kor': request.POST.get('kor'),\n 'test': request.POST.get('test'), 'title':'ML(MyLang) Reading', 'error':'0', 'user_id':Login.get_current_user(request).user_id}\n ctx['number'] = Login.get_current_user(request).new_message\n return render(request, 'reading.html', ctx)\n ctx = {'title':'ML(MyLang)', 'error':'0'}\n if Login.get_current_user(request) != -1:\n ctx['user_id'] = Login.get_current_user(request).user_id\n ctx['number'] = Login.get_current_user(request).new_message\n else:\n return render(request, 'login_please.html')\n return render(request, 'reading.html', ctx)","sub_path":"MyLang/LangPi/Read.py","file_name":"Read.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"509145867","text":"# -*- coding: utf-8 -*-\n\"\"\"\n flaskext.cache\n ~~~~~~~~~~~~~~\n\n Adds cache support to your application.\n\n :copyright: (c) 2010 by Thadeus Burgess.\n :license: BSD, see LICENSE for more details\n\"\"\"\nfrom functools import wraps\n\nfrom werkzeug.contrib.cache import (SimpleCache, NullCache, MemcachedCache,\n GAEMemcachedCache, FileSystemCache)\nfrom flask import request, current_app\n\n\nclass Cache(object):\n \"\"\"\n This class is used to control the cache objects.\n\n If TESTING is True it will use NullCache.\n \"\"\"\n\n def __init__(self, app=None):\n self.cache = None\n\n if app is not None:\n self.init_app(app)\n else:\n self.app = None\n\n def init_app(self, app):\n \"This is used to initialize cache with your app object\"\n\n app.config.setdefault('CACHE_DEFAULT_TIMEOUT', 300)\n app.config.setdefault('CACHE_THRESHOLD', 500)\n app.config.setdefault('CACHE_KEY_PREFIX', None)\n app.config.setdefault('CACHE_MEMCACHED_SERVERS', None)\n app.config.setdefault('CACHE_DIR', None)\n app.config.setdefault('CACHE_TYPE', 'NullCache')\n\n self.app = app\n\n self._set_cache()\n\n def _set_cache(self):\n if self.app.config['TESTING']:\n self.cache = NullCache()\n else:\n if self.app.config['CACHE_TYPE'] == 'Null':\n self.cache = NullCache()\n elif self.app.config['CACHE_TYPE'] == 'Simple':\n self.cache = SimpleCache(\n threshold=self.app.config['CACHE_THRESHOLD'],\n default_timeout=self.app.config['CACHE_DEFAULT_TIMEOUT'])\n elif self.app.config['CACHE_TYPE'] == 'Memcached':\n self.cache = MemcachedCache(\n self.app.config['CACHE_MEMCACHED_SERVERS'],\n default_timeout=self.app.config['CACHE_DEFAULT_TIMEOUT'],\n key_prefix=self.app.config['CACHE_KEY_PREFIX'])\n elif self.app.config['CACHE_TYPE'] == 'GAE':\n self.cache = GAEMemcachedCache(\n default_timeout=self.app.config['CACHE_DEFAULT_TIMEOUT'],\n key_prefix=self.app.config['CACHE_KEY_PREFIX'])\n elif self.app.config['CACHE_TYPE'] == 'FileSystem':\n self.cache = FileSystemCache(\n self.app.config['CACHE_DIR'],\n threshold=self.app.config['CACHE_THRESHOLD'],\n default_timeout=self.app.config['CACHE_DEFAULT_TIMEOUT'])\n\n def get(self, *args, **kwargs):\n \"Proxy function for internal cache object.\"\n return self.cache.get(*args, **kwargs)\n\n def set(self, *args, **kwargs):\n \"Proxy function for internal cache object.\"\n self.cache.set(*args, **kwargs)\n\n def add(self, *args, **kwargs):\n \"Proxy function for internal cache object.\"\n self.cache.add(*args, **kwargs)\n\n def delete(self, *args, **kwargs):\n \"Proxy function for internal cache object.\"\n self.cache.delete(*args, **kwargs)\n\n def cached(self, timeout=None, key_prefix='view/%s', unless=None):\n \"\"\"\n Decorator. Use this to cache a function. By default the cache key\n is `view/request.path`. You are able to use this decorator with any\n function by changing the `key_prefix`. If the token `%s` is located\n within the `key_prefix` then it will replace that with `request.path`\n\n Example::\n\n # An example view function\n @cache.cached(timeout=50)\n def big_foo():\n return big_bar_calc()\n\n # An example misc function to cache.\n @cache.cached(key_prefix='MyCachedList')\n def get_list():\n return [random.randrange(0, 1) for i in range(50000)]\n\n .. code-block:: pycon\n\n >>> my_list = get_list()\n\n :param timeout: Default None. If set to an integer, will cache for that\n amount of time.\n :param key_prefix: Default 'view/%(request.path)s'. Beginning key to .\n use for the cache key.\n :param unless: Default None. Cache will *always* execute the caching\n facilities unless this callable is true.\n This will bypass the caching entirely.\n \"\"\"\n\n def decorator(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n #: Bypass the cache entirely.\n if callable(unless) and unless() is True:\n return f(*args, **kwargs)\n\n if '%s' in key_prefix:\n cache_key = key_prefix % request.path\n else:\n cache_key = key_prefix\n\n rv = self.cache.get(cache_key)\n if not rv or current_app.debug:\n rv = f(*args, **kwargs)\n self.cache.set(cache_key, rv, timeout=timeout)\n return rv\n return decorated_function\n return decorator\n\n def memoize(self, timeout=None):\n \"\"\"\n Use this to cache the result of a function, taking its arguments into\n account in the cache key.\n\n Information on\n `Memoization <http://en.wikipedia.org/wiki/Memoization>`_.\n\n Example::\n\n @cache.memoize(timeout=50)\n def big_foo(a, b):\n return a + b + random.randrange(0, 1000)\n\n .. code-block:: pycon\n\n >>> big_foo(5, 2)\n 753\n >>> big_foo(5, 3)\n 234\n >>> big_foo(5, 2)\n 753\n\n :param timeout: Default None. If set to an integer, will cache for that\n amount of time.\n \"\"\"\n\n def memoize(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n cache_key = (f.__name__, id(f), args, str(kwargs))\n\n rv = self.cache.get(cache_key)\n if rv is None:\n rv = f(*args, **kwargs)\n self.cache.set(cache_key, rv)\n return rv\n return decorated_function\n return memoize\n","sub_path":"flaskext/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":6206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"313745356","text":"#\n# @copyright (c) 2018 Abelardo López Lagunas\n#\n# @file 4_Recursion.py\n#\n# @author Abelardo López Lagunas\n#\n# @date Wed Jun 13 15:14 2018 -- File created\n# Thu Aug 16 14:52 2018 -- Formatting fixes\n#\n# @brief Example of recursion\n#\n# References:\n# Based on examples form the book:\n#\n# Eric Matthes. “Python Crash Course.” No starch press.\n#\n# Restrictions:\n# There is no attempt to check input parameters\n#\n# Revision history:\n# Wed Jun 13 15:14 2018 -- File created\n# Thu Jun 14 14:19 2018 -- Added time analysis & map\n#\n# @note Intended for the TC2006 class\n#\n# Lets time the execution of the code\n#\nimport timeit\n\n\n#\n# Lets implement the factorial as a traditional recursion\n#\ndef factR(n):\n \"\"\"This is the traditional recursive implementation of\n the factorial\"\"\"\n assert isinstance(n, int) and n >= 1\n return 1 if n <= 1 else n*factR(n-1)\n\n\n#\n# Now implement tail recursion correctly. However, there\n# should be no difference from the above since Python does\n# not perform tail recursion optimization\ndef factRR(n):\n \"\"\"This is the correct tail recursion implementation \"\"\"\n assert isinstance(n, int) and n >= 1\n return factSupport(1, n)\n\n\ndef factSupport(product, n):\n if n < 2:\n return product\n else:\n return factSupport(product*n, n-1)\n\n\n#\n# Now implement an iterative version\n#\ndef factI(n):\n \"\"\"This is the traditional iterative implementation of\n the factorial\"\"\"\n assert isinstance(n, int) and n >= 1\n product = 1\n while n >= 1:\n product *= n\n n -= 1\n return product\n\n\n#\n# This the \"main\" program\n#\nprint(\"Comparing recursive vs iterative factorials\\n\")\nnum = 765\n#\n# Compare the different methods using the timeit method over\n# 1000 iterations.\n#\niterations = 1000\nprint(\"Using the timeit.repeat method with \" + str(iterations)\n + \" iterations\\n\")\nl1 = timeit.repeat(stmt=\"factI(num)\", globals=globals(), number=1,\n repeat=iterations)\nl2 = timeit.repeat(stmt=\"factR(num)\", globals=globals(), number=1,\n repeat=iterations)\nl3 = timeit.repeat(stmt=\"factRR(num)\", globals=globals(), number=1,\n repeat=iterations)\n#\n# Now compute the average of the iterations and display the\n# relative speedups.\n#\nt1 = sum(l1)/iterations\nt2 = sum(l2)/iterations\nt3 = sum(l3)/iterations\n\nprint(\"Iterative: \", t1, \"seconds\")\nprint(\"Recursive: \", t2, \"seconds\")\nprint(\"Tail recursive: \", t3, \"seconds\\n\")\nprint(\"So recursive is \", (1-(t1/t2))*100, \"% slower\")\nprint(\"So tail recursive is \", (1-(t1/t3))*100, \"% slower\\n\")\n#\n# Finally show how Python supports higher-order functions\n# through the map function\n#\nprint(\"Apply map to factI over [1..10]\\n\",\n list(map(factI, range(1, 10))), \"\\n\")\n#\n# As a bonus show how everything in Python is an object\n#\nnewFact = factI\nprint(\"Apply map to newFact over [1..10]\\n\",\n list(map(newFact, range(1, 10))))\n","sub_path":"3_python/4_Recursion.py","file_name":"4_Recursion.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"189340127","text":"'''\nGiven a collection of distinct numbers, return all possible permutations.\nFor example,\n[1,2,3] have the following permutations:\n[1,2,3], [1,3,2], [2,1,3], [2,3,1], [3,1,2], and [3,2,1].\n'''\n\n\nclass Solution(object):\n def permute(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n result = []\n self.get_permute1([], nums, result)\n # self.permutation(result, [], nums)\n return result\n\n def get_permute(self, current, num, result):\n if not num:\n result.append(current)\n return\n for i, v in enumerate(num):\n # current.append(num[i])\n temp_list = num[:]\n temp_list.remove(v)\n # del temp_list[i]\n # self.get_permute(current, num[:i] + num[i + 1:], result)\n self.get_permute(current + [v], temp_list, result)\n # current.pop()\n\n def get_permute1(self, current, num, result):\n if not num:\n result.append(current + [])\n return\n for i, v in enumerate(num):\n current.append(num[i])\n temp_list = num[:]\n temp_list.remove(v)\n self.get_permute(current, temp_list, result)\n current.pop()\n\n # def get_permute(self, current, list, result):\n # if len(list)==1:\n # result.append(current)\n # return\n # for i, v in enumerate(list):\n # current.append(list[i])\n # self.get_permute(current, list[i + 1:], result)\n # # current.pop()\n\n def permutation(self, result, current, list):\n\n if len(list) == 1:\n result.append(str + \",\" + list[0])\n else:\n for temp_str in list:\n temp_list = list[:]\n temp_list.remove(temp_str)\n self.permutation(result, str + \",\" + temp_str, temp_list)\n\n\nif __name__ == \"__main__\":\n # assert Solution().permute([1, 2, 3]) == [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1]]\n print(Solution().permute([1, 2, 3]))\n","sub_path":"algorithm/common/permu.py","file_name":"permu.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"309359925","text":"import requests\nimport re\nimport datetime\nimport time\nimport ssl\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom bs4 import BeautifulSoup\nimport urllib.request, urllib.parse, urllib.error\nimport seaborn as sns\nimport itertools\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\n# List of tickers - Benchmark must come LAST!!!\n#tickers = ['XLE', 'XLY', 'XLK', 'XLB', 'VFH', 'XLI', 'XLV', 'XLU', 'XLP', 'IYR', 'SPY']\ntickers = ['EWO', 'EWK', 'EWQ', 'EWG', 'EWI', 'EWN', 'EWP', 'EWD', 'EWL', 'EWU', 'VGK']\nETF_store = [] # Initialize an array for price series\nPE_store = [] # Initialize an array for P/E ratios\nPB_store = [] # Initialize an array for P/B ratios\nPCF_store = [] # Initialize an array for P/CF ratios\nyield_store = [] # Initialize an array for current TTM yields\nstart = datetime.date(2009,1,1) # YYYY,MM,DD\nstart_unix = str(int(time.mktime(start.timetuple()))) # Start in UNIX/Epoch time\nend_unix = str(int(time.time())) # Today in UNIX/Epoch time\nstart_date = datetime.datetime.fromtimestamp(int(start_unix)).strftime('%Y-%m-%d') # Start in date time\nend_date = datetime.datetime.fromtimestamp(int(end_unix)).strftime('%Y-%m-%d') # End in date time\n\n# Main loop for scraping all data\nfor i in range(0, len(tickers)):\n # Retrieve html of ticker[i] for historical data\n url1 = 'https://finance.yahoo.com/quote/'\n url2 = '/history?period1='\n url3 = '&period2='\n url4 = '&interval=1d&filter=history&frequency=1d'\n url = url1 + tickers[i] + url2 + start_unix + url3 + end_unix + url4\n html = requests.get(url)\n html = html.text # Convert to text (adj_close prices not under a nice tag - thus won't use Beautifulsoup)\n trunc_string = '}],\"isPending\"' # Cut off from a point we no longer need html\n truncpos = html.find(trunc_string)\n html = html[:truncpos]\n # Now pull the holdings page - for this we can use BeautifulSoup tags\n url5 = '/holdings?p='\n url_holdings = url1 + tickers[i] + url5 + tickers[i]\n html2 = urllib.request.urlopen(url_holdings).read()\n soup_holdings = BeautifulSoup(html2, 'html.parser')\n\n # Retrieve the valuation ratios\n # P/E Ratio\n PE_box = soup_holdings.find('span', attrs={'data-reactid':'131'}) # Retrieve value\n PE_ratio = float(PE_box.text) # Convert to float\n PE_store.append(PE_ratio) # Store\n # P/B Ratio\n PB_box = soup_holdings.find('span', attrs={'data-reactid': '136'}) # Retrieve value\n PB_ratio = float(PB_box.text) # Convert to float\n PB_store.append(PB_ratio) # Store\n # P/CF Ratio\n PCF_box = soup_holdings.find('span', attrs={'data-reactid': '146'}) # Retrieve value\n PCF_ratio = float(PCF_box.text) # Convert to float\n PCF_store.append(PCF_ratio) # Store\n # Yield\n # Retrieve value - again for this one we'll just use text scrape as no nice tag\n yield_start = 'yield\":{\"raw\":'\n yield_end = ',\"fmt\"'\n yield_startpos = html.find(yield_start) + len(yield_start) + 1\n yield_endpos = html.find(yield_end, yield_startpos)\n div_yield = 100*float(html[yield_startpos:yield_endpos]) # Convert to float and percents form\n yield_store.append(div_yield) # Store\n\n # Retrieve the adjusted close prices\n adj_close_prices = [] # Initialize an array\n string_start = 'adjclose'\n string_end = '}'\n endpos = 0 # Initialize endpos for first loop\n while endpos > -1:\n startpos = html.find(string_start) + len(string_start) + 2\n endpos = html.find(string_end, startpos)\n adj_close = float(html[startpos:endpos])\n adj_close_prices.append(adj_close) # Store the close price for each date on each loop\n html = html[endpos+1:] # Keep truncating on each loop to remove pirce we just used\n ETF_store.append(adj_close_prices) # Store all ETF price arrays\nETF_store = np.transpose(ETF_store)\nETF_normalized = 100*np.array(ETF_store / ETF_store[-1:]) # Normalize to starting value of 100\nrel_prices = 100*np.transpose(np.transpose(ETF_normalized) / np.transpose(ETF_normalized)[-1:]) # Prices relative to bmark\nrel_df = pd.DataFrame(np.flip(rel_prices, axis=0))\nETF_df = pd.DataFrame(np.flip(ETF_normalized, axis=0))\nn_groups = len(tickers)\nindex = np.arange(n_groups)\nEWMA_200 = []\nfor i in index:\n EWMA = pd.ewma(ETF_df.iloc[:,i], span=200, min_periods=200 - 1)\n EWMA_200.append(EWMA)\nEWMA_200 = np.transpose(np.asarray(EWMA_200))\nEWMA_50 = []\nfor i in index:\n EWMA = pd.ewma(ETF_df.iloc[:,i], span=50, min_periods=50 - 1)\n EWMA_50.append(EWMA)\nEWMA_50 = np.transpose(np.asarray(EWMA_50))\ndist_200 = 100*((ETF_df.as_matrix() - EWMA_200) / EWMA_200)\ndist_50 = 100*((ETF_df.as_matrix() - EWMA_50) / EWMA_50)\ndist_200_current = dist_200[-1]\ndist_50_current = dist_50[-1]\n# Compute the time since last bullish or bearish moving average cross\n# First, get arrays of sign changes logical\nsgn_change_200 = ((np.roll(np.sign(dist_200), 1, axis=0) - np.sign(dist_200)) != 0).astype(int)\nsgn_change_50 = ((np.roll(np.sign(dist_200), 1, axis=0) - np.sign(dist_200)) != 0).astype(int)\n# Find last time there was a nonzero element. Flip due to the 1s in the beginning from nan values\nindex_cross_200 = []\nfor i in range(len(tickers) - 1):\n index_cross = next((i for i, x in enumerate(np.flip(sgn_change_200[:,i], axis=0)) if x), None)\n index_cross_200.append(index_cross)\nindex_cross_50 = []\nfor i in range(len(tickers) - 1):\n index_cross = next((i for i, x in enumerate(np.flip(sgn_change_50[:,i], axis=0)) if x), None)\n index_cross_50.append(index_cross)\n\n\n\n# Performance metrics\n# Calculate relative performance\nrel_performance = rel_prices[0, :] / rel_prices[-1, :] - 1\ncolumn_names = ['Relative Return (to ' + tickers[-1] + ') ' + start_date + ' to Present'] # Name column\n# Round values, create DataFrame, and remove the index\ndf_abs_pf = np.round(pd.DataFrame(np.transpose(np.transpose(rel_performance)),\n index=tickers, columns=column_names), 4)[:-1]\n# Sort relative performance descending\ndf_abs_pf = df_abs_pf.sort_values(column_names, ascending=False)\n\n# Plots\nerror_config = {'ecolor': '0.3'}\nbar_width = 0.35\nopacity = 0.8\n# Initialize an array of colors (not happy with this, improvements welcome)\ncolors_line = itertools.cycle(sns.color_palette('husl', len(tickers) - 1))\n# Create a gridspec for subplots (x,y) creates a grid of x by y subgrids\ngs1 = gridspec.GridSpec(8, 8)\n# Position all the subplots as desired\nax = plt.subplot(gs1[0:3, :-4])\nax1 = plt.subplot(gs1[3:6, :-4])\nax1b = plt.subplot(gs1[6:, 0:2])\nax1c = plt.subplot(gs1[6:, 2:4])\nax2 = plt.subplot(gs1[4:6, 4:-2])\nax3 = plt.subplot(gs1[4:6, -2:])\nax4 = plt.subplot(gs1[6:, 4:-2])\nax5 = plt.subplot(gs1[6:, -2:])\nfor i,c in zip(range(len(tickers) - 1),colors_line):\n ax.plot(np.flip(rel_prices, axis=0)[:, i], color=c, linewidth=1) # Plot the relative time series\nax.set_ylabel('Price vs ' + tickers[-1] + ' ( ' + start_date + ' = 100)')\nbox = ax.get_position()\nleg = ax.legend(np.transpose(tickers[:-1]), loc='upper left', bbox_to_anchor=(-0.01, 1.20),\n fancybox=True, shadow=True, ncol=6)\n# Get the individual lines inside legend and increase line width for readability\nfor line in leg.get_lines():\n line.set_linewidth(3)\nax.set_xticklabels([])\nax.text(.12,.9,'Relative vs. ' + tickers[-1],\n horizontalalignment='center',\n transform=ax.transAxes)\nfor i,c in zip(range(len(tickers) - 1),colors_line):\n ax1.plot(ETF_df.iloc[:, i], color=c, linewidth=0.5) # Plot the normalized time series\nfor i, c in zip(range(len(tickers) - 1), colors_line):\n ax1.plot(EWMA_200[:, i], color=c, dashes=[2,1]) # Plot the long EWMA\nax1.set_ylabel('Price (' + start_date + ' = 100)')\nax1.set_xticklabels([])\nax1.text(.2,.9,'Price (Dashed = 200D EWMA)',\n horizontalalignment='center',\n transform=ax1.transAxes)\n\nfor i,c in zip(range(len(tickers) -1),colors_line):\n ax1b.bar(index[i], dist_200_current[i], bar_width, color=c,\n alpha=opacity) # Bar plot of 200D EWMA Distance\n# Some formatting\nax1b.set_ylim([np.amin(dist_200_current) - 2, np.amax(dist_200_current) + 5])\nax1b.yaxis.set_tick_params(labelsize=6)\nax1b.set_xticks(index[:-1])\nax1b.set_xticklabels(tickers[:-1], rotation=90, fontsize=10)\nax1b.text(.4,.9,'% Distance from 200D EWMA',\n horizontalalignment='center',\n transform=ax1b.transAxes)\nfor i,c in zip(range(len(tickers) -1),colors_line):\n ax1c.bar(index[i], index_cross_200[i], bar_width, color=c,\n alpha=opacity) # Bar plot of Days since last 200D EWMA cross (Bullish OR Bearish)\n# Some formatting\nax1c.set_ylim([0, np.amax(index_cross_200) + 50])\nax1c.yaxis.set_tick_params(labelsize=6)\nax1c.set_xticks(index[:-1])\nax1c.set_xticklabels(tickers[:-1], rotation=90, fontsize=10)\nax1c.text(.3,.9,'Days Since Last Cross',\n horizontalalignment='center',\n transform=ax1c.transAxes)\n\nfor i,c in zip(range(len(tickers) -1),colors_line):\n ax2.bar(index[i], PE_store[i], bar_width, color=c,\n alpha=opacity) # Bar plot of PE ratios\n# Some formatting\nax2.set_ylim([0, np.amax(PE_store) + 5])\nax2.yaxis.set_tick_params(labelsize=6)\nax2.set_xticklabels([])\nax2.axhline(PE_store[-1], color=\"black\")\nax2.text(.5,.85,'P/E Ratios (' + tickers[-1] + ' = Black Line)',\n horizontalalignment='center',\n transform=ax2.transAxes)\nfor i,c in zip(range(len(tickers) - 1),colors_line):\n ax3.bar(index[i], PB_store[i], bar_width, color=c,\n alpha=opacity) # Bar plot of PB ratios\n# Some formatting\nax3.set_ylim([0, np.amax(PB_store) + 1])\nax3.yaxis.set_tick_params(labelsize=6)\nax3.set_xticklabels([])\nax3.axhline(PB_store[-1], color=\"black\")\nax3.text(.5,.85,'P/B Ratios (' + tickers[-1] + ' = Black Line)',\n horizontalalignment='center',\n transform=ax3.transAxes)\nfor i,c in zip(range(len(tickers) - 1),colors_line):\n ax4.bar(index[i], PCF_store[i], bar_width, color=c,\n alpha=opacity) # Bar plot of PCF ratios\n# Some formatting\nax4.set_ylim([0, np.amax(PCF_store) + 2])\nax4.yaxis.set_tick_params(labelsize=6)\nax4.set_xticks(index[:-1])\nax4.set_xticklabels(tickers[:-1], rotation=90, fontsize=10)\nax4.axhline(PCF_store[-1], color=\"black\")\nax4.text(.5,.9,'P/CF Ratios (' + tickers[-1] + ' = Black Line)',\n horizontalalignment='center',\n transform=ax4.transAxes)\nfor i,c in zip(range(len(tickers) - 1),colors_line):\n ax5.bar(index[i], yield_store[i], bar_width, color=c,\n alpha=opacity) # Bar plot of yields\n# Some formatting\nax5.set_ylim([0, np.amax(yield_store) + 1])\nax5.yaxis.set_tick_params(labelsize=6)\nax5.set_xticks(index[:-1])\nax5.set_xticklabels(tickers[:-1], rotation=90, fontsize=10)\nax5.axhline(yield_store[-1], color=\"black\")\nax5.text(.5,.9,'Div Yield (' + tickers[-1] + ' = Black Line)',\n horizontalalignment='center',\n transform=ax5.transAxes)\n# Plot the table of relative performance\ntabax = plt.subplot(gs1[0:4, -4:])\ntabax.axis('off')\ntabax.table(cellText=df_abs_pf.values,\n rowLabels=df_abs_pf.index,\n colLabels=df_abs_pf.columns,\n cellLoc='center', rowLoc='center',\n loc='center')\ntabax.axis('off')\ntabax.grid('off')\ntabax.text(.5,1,'ETF Value Monitor',\n horizontalalignment='center',\n transform=tabax.transAxes, fontsize=18)\nplt.show()\n","sub_path":"ETFRelVal.py","file_name":"ETFRelVal.py","file_ext":"py","file_size_in_byte":11388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"555950164","text":"import numpy as np\nfrom scipy.special import poch\nfrom scipy.special import factorial\n\nfrom .zmarray import zmarray\nfrom .zmarray import construct_complex_matrix\n\ndef gpzp_j2nm(j):\n \"\"\"Convert single index j to pair (n, m)\"\"\"\n n = np.floor(np.sqrt(j)).astype(np.int)\n # Here m can be negative\n m = j - n * (n + 1)\n return np.array([n, m]).T\n\n\ndef gpzp_nm2j(n, m):\n \"\"\"Convert pair (n, m) to single index j\"\"\"\n return n * (n + 1) + m\n\ndef grid_rt(size):\n \"\"\"Get grid array r and t\"\"\"\n y, x = np.ogrid[-1:1:1j * size, -1:1:1j * size]\n r = np.sqrt(x * x + y * y)\n t = np.arctan2(y, x)\n return r, t\n\ndef gpzp_get_coeffs(n, m, alpha=0, normalize=True):\n n = np.array(n)\n m = np.array(m)\n n_max = n.max()\n N = n + np.zeros_like(m)\n M = m + np.zeros_like(n)\n if not np.iterable(N):\n N = np.array([N])\n M = np.array([M])\n M_abs = np.abs(M)\n\n A = N + M_abs\n B = N - M_abs\n C = factorial(A + 1) / poch(alpha + 1, A + 1)\n\n l = []\n for i in range(len(N)):\n a, b, c, n = A[i], B[i], C[i], N[i]\n coeffs = [\n c * (-1) ** k * poch(alpha + 1, 2 * n + 1 - k) / (factorial(k) * factorial(a + 1 - k) * factorial(b - k))\n for k in np.arange(0, b + 1)]\n coeffs = [0] * (n_max - n) + coeffs + [0] * (n - b)\n l.append(coeffs)\n c_matrix = np.array(l)\n if normalize:\n c_matrix = c_matrix * (gpzp_norm(N, M, alpha=alpha)[:, np.newaxis])\n return c_matrix\n\n\ndef gpzp_norm(n, m, alpha=0):\n n = np.array(n)\n m = np.array(m)\n n = n + np.zeros_like(m)\n m = m + np.zeros_like(n)\n if not np.iterable(n):\n n = np.array([n])\n m = np.array([m])\n m_abs = np.abs(m)\n a = n + m_abs\n b = n - m_abs\n mask = (m == 0)\n norm = np.zeros(len(n))\n norm[mask] = (np.sqrt((n + alpha / 2 + 1) * poch(b + alpha + 1, 2 * m_abs + 1) / (poch(b + 1, 2 * m_abs + 1))))[\n mask]\n norm[~mask] = (np.sqrt((2 * n + alpha + 2) * poch(b + alpha + 1, 2 * m_abs + 1) / (poch(b + 1, 2 * m_abs + 1))))[\n ~mask]\n return norm\n\n\ndef gpzp_get_Rn_matrix(n, size, alpha=0, weighted=True):\n if not np.iterable(n):\n n = np.array([n])\n else:\n n = np.array(n)\n n_max = n.max()\n\n r, t = grid_rt(size)\n mask = (r <= 1) * 1\n r, t = r * mask, t * mask\n r, mask = r.ravel(), mask.ravel()\n if weighted:\n w = (1 - r) ** (alpha / 2) * mask\n else:\n w = 1.\n # shape of Rn_matrix is (n_max+1, size*size)\n Rn_matrix = np.array([np.power(r, i) * w for i in np.arange(n_max + 1)[::-1]])\n return Rn_matrix\n\n\ndef gpzp_get_mt_matrix(m, size):\n if not np.iterable(m):\n m = np.array([m])\n else:\n m = np.array(m)\n t = grid_rt(size)[1].ravel()\n f = np.array([np.sin, np.cos])\n mask = (m >= 0) * 1\n funcs = f[mask]\n return np.array([func(e * t) for e, func in zip(np.abs(m), funcs)])\n\n\ndef gpzps_from_nm(n, m, size, alpha=0, reshape=True):\n c_matrix = gpzp_get_coeffs(n, m, alpha)\n Rn_matrix = gpzp_get_Rn_matrix(n, size, alpha)\n Rnm_ = c_matrix.dot(Rn_matrix)\n mt_matrix = gpzp_get_mt_matrix(m, size)\n if reshape:\n return (Rnm_ * mt_matrix).reshape((c_matrix.shape[0], size, size))\n else:\n return Rnm_ * mt_matrix\n\n\n\ndef _get_m_states(m, states=None):\n if not np.iterable(states):\n if states is None:\n states = list(set(np.abs(m)))\n elif states < 0: # states is negative number\n uniq = np.unique(np.abs(m))\n states = uniq[np.nonzero(uniq)]\n else:\n states = [states]\n else:\n states = np.unique(np.abs(states))\n return states\n\n\nclass GPZPs:\n def __init__(self, n_max, size, alpha=0, states=None):\n self.alpha = alpha\n self.j_max = gpzp_nm2j(n_max, n_max)\n nm = gpzp_j2nm(np.arange(self.j_max+1))\n self.n, self.m = nm[:, 0], nm[:, 1]\n\n states = _get_m_states(self.m, states)\n ind = np.where(np.in1d(np.abs(self.m), states))[0]\n self.n = self.n[ind]\n self.m = self.m[ind]\n\n self.data = gpzps_from_nm(self.n, self.m, size, self.alpha)\n self.moments = None\n\n\n def fit(self, X):\n if len(X.shape) == 3:\n X = X.reshape(X.shape[0], -1)\n N = self.data.shape[0]\n data_ = self.data.reshape(N, -1)\n self.moments = zmarray(X.dot(np.linalg.pinv(data_)), self.n, self.m, mode='pzm')\n\n\n def complex(self):\n if self.dtype != np.complex:\n c_matrix = construct_complex_matrix(self.n, self.m, mode='pzm')\n zm_complex = c_matrix.dot(self.T).T\n # update n and m\n c_matrix[c_matrix == 1j] = 0\n m = c_matrix.dot(np.abs(self.m)).real.astype(np.int)\n n = c_matrix.dot(np.abs(self.n)).real.astype(np.int)\n return zmarray(zm_complex, n, m, mode='pzm')\n else:\n return self\n\n def abs(self):\n if self.dtype != np.complex:\n return self\n else:\n return zmarray(np.abs(self), self.n, self.m, mode='pzm')\n","sub_path":"feature/gpzp_matrix.py","file_name":"gpzp_matrix.py","file_ext":"py","file_size_in_byte":5058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"11750745","text":"from numpy import mean, sum\n\ndef ent_tuple(ent):\n return (ent[\"start\"], ent[\"end\"], ent[\"type\"])\n\ndef ent_tuple_typeless(ent):\n return (ent[\"start\"], ent[\"end\"])\n\n\ndef convert(jdata):\n rels = {\"|micro\": set()}\n ents = {\"|micro\": set()}\n for i,dic in enumerate(jdata):\n entities = dic[\"entities\"]\n for ent in entities:\n start = ent[\"start\"]\n end = ent[\"end\"]\n label = ent[\"type\"]\n if label not in ents:\n ents[label] = set()\n cls = (i, ent_tuple(ent))\n ents[label].add(cls)\n ents[\"|micro\"].add(cls)\n\n for rel in dic[\"relations\"]:\n head = rel[\"head\"]\n tail = rel[\"tail\"]\n label = rel[\"type\"]\n if label not in rels:\n rels[label] = set()\n cls = (i, ent_tuple_typeless(entities[head]), ent_tuple_typeless(entities[tail]), label)\n rels[label].add(cls)\n rels[\"|micro\"].add(cls)\n return ents,rels\n\n\n#Eliminates wrong overlapping entities\ndef convert_pred_jdata(jdata, ents_gt):\n rels = {\"|micro\": set()}\n ents = {\"|micro\": set()}\n for i,dic in enumerate(jdata):\n entities = dic[\"entities\"]\n chosen = [True for i in range(len(entities))]\n if len(entities) == 0:\n continue\n current_end = -1 #entities[0][\"end\"]\n for j,ent in enumerate(entities):\n start = ent[\"start\"]\n end = ent[\"end\"]\n label = ent[\"type\"]\n cls = (i, ent_tuple(ent))\n\n overlaps = False\n if j == 0:\n if len(entities) > 1:\n overlaps = (entities[j+1][\"start\"] < end)\n else:\n if start < current_end: #overlaps\n overlaps = True\n current_end = end\n if overlaps:\n if cls not in ents_gt[label]:\n chosen[j] = False\n continue\n if label not in ents:\n ents[label] = set()\n ents[label].add(cls)\n ents[\"|micro\"].add(cls)\n\n for rel in dic[\"relations\"]:\n head = rel[\"head\"]\n tail = rel[\"tail\"]\n label = rel[\"type\"]\n if label not in rels:\n rels[label] = set()\n if chosen[head] and chosen[tail]:\n cls = (i, ent_tuple_typeless(entities[head]), ent_tuple_typeless(entities[tail]), label)\n rels[label].add(cls)\n rels[\"|micro\"].add(cls)\n return ents,rels\n\n\n\n\ndef evaluate(pred, gt):\n gt_ents, gt_rels = convert(gt)\n pred_ents, pred_rels = convert_pred_jdata(pred, gt_ents)\n print(\"Entities\")\n print_metrics(pred_ents, gt_ents)\n print(\"Relations\")\n print_metrics(pred_rels, gt_rels)\n\n\ndef print_metrics(pred, gt):\n total = 0\n precs = []\n recs = []\n f1s = []\n\n for label,s in sorted(gt.items()):\n if label in pred:\n p = pred[label]\n else:\n p = set()\n intersec = p.intersection(s)\n prec = 0\n rec = 0\n f1 = 0\n if len(p) > 0:\n prec = len(intersec) / len(p)\n if len(s) > 0:\n rec = len(intersec) / len(s)\n pr = prec + rec\n if pr > 0:\n f1 = (2 * prec * rec) / pr\n print(label, prec, rec, f1, len(s))\n if label != \"|micro\":\n precs.append(prec)\n recs.append(rec)\n f1s.append(f1)\n total += len(s)\n #print(\"micro\", sum(wprecs)/total, sum(wrecs)/total, sum(wf1s)/total, total)\n print(\"|macro\", mean(precs), mean(recs), mean(f1s), total)\n\n\n#Main\n\nimport sys\nimport json\n\nfile1 = open(sys.argv[1], encoding=\"utf-8\")\nfile2 = open(sys.argv[2], encoding=\"utf-8\")\npred = json.load(file1)\ngt = json.load(file2)\n\nevaluate(pred, gt)\n\n\n\n\n\n\n\n","sub_path":"nerre/spert/json_soft_evaluator.py","file_name":"json_soft_evaluator.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"370578512","text":"\"\"\"\nmake geometry adpative ground truth\n\n\"\"\"\nimport os\nimport sys\nimport cv2\nimport numpy as np\nimport scipy.io as io\nsys.path.append(os.getcwd())\nfrom data.prepare_dataset.density_map_utils import generate_ga_density_map\n\n\"\"\"--------------------config--------------------\"\"\"\ndata_root_path='data/SHB'\ndata_name_list=['train','test']\n\n\"\"\"--------------------Processing---------------\"\"\"\n# process image one by one\nfor data_name in data_name_list:\n print(\"-------------------[{}]-------------------\".format(data_name))\n image_folder_path=os.path.join(data_root_path,data_name,'images')\n gt_folder_path=os.path.join(data_root_path,data_name,'npy_ga_gt')\n if not os.path.exists(gt_folder_path):\n os.mkdir(gt_folder_path)\n image_names=os.listdir(image_folder_path)\n image_names.sort()\n print(\"image num=\",len(image_names))\n\n for i,img_name in enumerate(image_names):\n print(\"[{}/{}]:{}-{}\".format(i+1,len(image_names),data_name,img_name))\n img_path=os.path.join(image_folder_path,img_name)\n point_gt_path=img_path.replace('images','point_gt').replace('jpg','mat').replace('IMG_','GT_IMG_')\n\n # load img\n img = cv2.imread(img_path)\n\n # load point_gt\n mat=io.loadmat(point_gt_path)\n point_gt=mat[\"image_info\"][0,0][0,0][0]\n\n # generate ga_density_map\n point_map=np.zeros((img.shape[0],img.shape[1]))\n for i in range(0, len(point_gt)):\n if int(point_gt[i][1]) < img.shape[0] and int(point_gt[i][0]) < img.shape[1]:\n point_map[int(point_gt[i][1]), int(point_gt[i][0])] = 1\n density_map=generate_ga_density_map(point_map,max_size=48,dis_sigma_rate=7)\n\n # save density map\n gt_path = os.path.join(gt_folder_path, img_name.replace('jpg', 'npy'))\n np.save(gt_path,density_map)\n\n","sub_path":"data/prepare_dataset/SHB/make_ga_gt.py","file_name":"make_ga_gt.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"600638059","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef stationary_distribution(transition_matrix):\n # Correct very small numbers to zero\n transition_matrix[np.abs(transition_matrix) < 1e-15] = 0\n \n # Calculate eigenvalues and eigenvectors\n eigvalvec = np.linalg.eig(transition_matrix.T)\n\n # Get position of eigenvalue, where eigenvalue equals 1\n eigval_pos = int(np.argwhere(np.isclose(eigvalvec[0], 1)))\n\n # Get eigenvectors for chosen eigenvalue and norm it, such that the sum equals 1\n vec = np.abs(eigvalvec[1][:, eigval_pos])\n return vec / np.sum(vec)\n\ndef transition_entropy(transition): # entropy rate\n P = transition\n # Correct very small numbers to zero\n P[np.abs(P) < 1e-15] = 0\n\n # Calculate stationary distribution\n m = stationary_distribution(P)\n\n H = 0\n for i in range(np.shape(P)[0]):\n for j in range(np.shape(P)[1]):\n # Only calculate entropy, if P_ij is non zero, otherwise numpy will throw an error\n if P[i,j] != 0:\n H += m[i]*P[i,j]*np.log(P[i,j])\n return -H\n\n#ar = np.array([[0.25, 0.25, 0.25, 0.25],\n# [0.25, 0.25, 0.25, 0.25],\n# [0.25, 0.25, 0.25, 0.25],\n# [0.25, 0.25, 0.25, 0.25]])\n#ar = np.array([[0.05, 0.85, 0.05, 0.05],\n# [0.05, 0.05, 0.85, 0.05],\n# [0.05, 0.05, 0.05, 0.85],\n# [0.85, 0.05, 0.05, 0.05]])\n#st = transition_entropy(ar)\n#print(st)\n\n\ntransitions = []\niterate = np.array([0., 0.0001, 0.0005, 0.001, 0.002, 0.005, 0.01, 0.015, 0.02, 0.03, 0.04, 0.05, 0.075, 0.1, 0.125, 0.15, 0.2, 0.23, 0.24, 0.245, 0.25])\nfor it in iterate:\n transitions.append([[it, 1.-(3*it), it, it],\n [it, it, 1.-(3*it), it],\n [it, it, it, 1.-(3*it)],\n [1.-(3*it), it, it, it]])\n\nit_signal = np.array([[0, 1, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n [1, 0, 0, 0]])\nit_noise = np.array([[0.25, 0.25, 0.25, 0.25],\n [0.25, 0.25, 0.25, 0.25],\n [0.25, 0.25, 0.25, 0.25],\n [0.25, 0.25, 0.25, 0.25]])\n\ntt = np.array(transitions)\n\np2_signal = np.sqrt(np.sum(np.square(tt-it_signal), axis=(1,2)))\np2_noise = np.sqrt(np.sum(np.square(tt-it_noise), axis=(1,2)))\n\n# Calculate entropy\ntransition_entropy = np.array([transition_entropy(t) for t in tt])\n\n# Plot\nplt.errorbar(transition_entropy, p2_signal, label='Signal error')\nplt.errorbar(transition_entropy, p2_noise, label='Noise error')\nplt.ylim(ymin=0)\nplt.xlabel('entropy rate')\nplt.ylabel('p2 error regarding signal/noise')\nplt.savefig('correlation_errors_entropy_signalnoise.svg', format='svg', transparent=True)\nplt.close()\n","sub_path":"michaelis/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"561668633","text":"import pandas as pd\nimport numpy as np\n\nfrom time import sleep\nfrom time import time\n\nfrom warnings import filterwarnings\n\nfrom concurrent.futures import ProcessPoolExecutor, as_completed\nfrom multiprocessing import cpu_count\n\nfrom scipy.optimize import minimize as minimize\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv('datapoints.csv')\n\n\ndef fun_RBF(X, omega, N, sigma):\n\n n = X.shape[1]\n v = omega[0:N].reshape(N, 1)\n c = omega[N:].reshape(N, n)\n\n c_array = np.tile(c.reshape(-1), X.shape[0])\n X_array = np.tile(X, N).reshape(-1)\n\n mat = ((c_array - X_array).reshape(X.shape[0], N, 2)) ** 2\n\n col = mat[:, :, 0] + mat[:, :, 1]\n col = np.exp(-col / (sigma ** 2))\n\n return np.dot(col, v)\n\n\ndef fun_grad_RBF(omega, X, y_true, N, sigma, rho):\n\n n = X.shape[1]\n v = omega[0:N].reshape(N, 1)\n c = omega[N:].reshape(N, n)\n\n ### dE_dv\n c_array = np.tile(c.reshape(-1), X.shape[0])\n X_array = np.tile(X, N).reshape(-1)\n\n # ||X-c||**2 matrix\n mat = ((c_array - X_array).reshape(X.shape[0], N, 2)) ** 2\n col = mat[:, :, 0] + mat[:, :, 1]\n\n # activation function\n col = np.exp(-col / (sigma ** 2))\n\n # dE_dv\n dE_dv = np.dot((fun_RBF(X, omega, N, sigma).T - y_true), col) / X.shape[0] + 2 * rho * v.T\n dE_dv = dE_dv.reshape(-1, 1)\n\n ### dE_dc\n mat1 = (-(c_array - X_array)).reshape(X.shape[0], N, 2)\n mat1 = mat1[:, :, 0]\n mat1 = 2 * (col * v.T * mat1) / (sigma ** 2)\n mat1 = np.dot((fun_RBF(X, omega, N, sigma).T - y_true), mat1) / X.shape[0]\n\n mat2 = (-(c_array - X_array)).reshape(X.shape[0], N, 2)\n mat2 = mat2[:, :, 1]\n mat2 = 2 * (col * v.T * mat2) / (sigma ** 2)\n mat2 = np.dot((fun_RBF(X, omega, N, sigma).T - y_true), mat2) / X.shape[0]\n\n fusion = np.append(mat1.T, mat2.T, axis=1)\n\n # dE_dc\n dE_dc = fusion + 2 * rho * c\n\n return np.concatenate((dE_dv.reshape(1, -1), dE_dc.reshape(1, -1)), axis=1).reshape(-1)\n\ndef loss(omega, X, y_true, N, sigma, rho):\n y_pred = fun_RBF(X,omega, N, sigma).reshape(1,-1)\n l = np.sum((y_pred - y_true)**2)/(2 * X.shape[0]) + rho * np.linalg.norm(omega)**2\n return l\n\ndef MSE(y_true, y_pred):\n \"\"\"\n Compute the Mean Squared Error from y_true and y_predicted\n \"\"\"\n # reshape y's in order to do not have errors\n y_true = y_true.reshape(-1,)\n y_pred = y_pred.reshape(-1,)\n return np.mean(np.square(y_true - y_pred)) / 2\n\n\nX = df[['x1', 'x2']].to_numpy()\n\nX_train = np.copy(X)\n\ny_true = df[['y']].to_numpy().reshape(1,-1)\n\n\n\ndef grid_search(X, y, N, K=5):\n # reshaping y array (for Kfold split)\n y = y.reshape(-1, 1)\n\n n = X.shape[1]\n res_list = []\n sigma_list = [0.2, 0.4, 0.6, 0.8, 1, 1.2, 1.5, 1.8]\n rho_list = [0.00001, 0.00005, 0.0001, 0.0005, 0.001]\n\n print('----------- Started routine for N = {}\\n'.format(N), end='')\n\n for sigma in sigma_list:\n for rho in rho_list:\n # ---- DEBUG\n # print('N', N, 'rho:', rho, 'sigma', sigma)\n print('N: {}, sigma: {}, rho: {}' .format(N,sigma,rho))\n ### parameters initialization\n omega = np.random.randn(N + N * n)\n\n # create result list for k_train and k_validation\n train_error = loss(omega, X, y, N, sigma, rho)\n k_train_err = []\n k_val_err = []\n func_eval = []\n time_exec = []\n nfev = []\n nit = []\n njev = []\n\n # ---- DEBUG\n # print('N: {}, simga: {}'.format(N, sigma))\n\n k_fold = KFold(K, shuffle=True)\n for train_indices, val_indices in k_fold.split(X):\n X_train, y_train = X[train_indices], y[train_indices]\n X_val, y_val = X[val_indices], y[val_indices]\n\n # now that we split, we need to readjust y vectors\n\n y_train, y_val = y_train.reshape(1, -1), y_val.reshape(1, -1)\n\n # train the model with gradient\n t1 = time()\n res = minimize(loss, omega, jac=fun_grad_RBF, args=(X_train, y_train, N, sigma, rho))\n time_exec.append(time() - t1)\n\n # error on the train and validation\n k_train_err.append(loss(res.x, X_train, y_train, N, sigma, rho))\n\n # on the validation we use MSE\n y_pred = fun_RBF(X_val, res.x, N, sigma)\n k_val_err.append(MSE(y_val, y_pred))\n\n # store results\n nfev.append(res.nfev)\n nit.append(res.nit)\n njev.append(res.njev)\n\n # ---- DEBUG ----\n # print('N', N)\n # print('rho', rho)\n # print('res.success', res.success)\n # print('k_train', k_train_err)\n # print('k_val_err', k_val_err)\n # print('time_exec', time_exec)\n # print('nfev', nfev)\n # print('nit', nit)\n # print('njev', njev)\n\n # create a list and append it to res_list\n res_list.append(['Grad', K, N, sigma, rho, res.success, train_error, np.mean(k_train_err),\n np.mean(k_val_err), np.mean(time_exec),\n int(np.mean(nfev)), int(np.mean(nit)), int(np.mean(njev))])\n sleep(0.4)\n print('----------- N: {} ===> end' .format(N))\n return res_list\n\n# split train and test sets\nX_train, X_test, y_train, y_test = train_test_split(X, y_true.reshape(-1,1), test_size=0.15, random_state=1869097)\n\n\nfilterwarnings('ignore')\nN_list = [5, 10, 15, 15, 20, 25, 30]\nK = 5\nncpus = cpu_count()\nresults = []\nwith ProcessPoolExecutor(max_workers=ncpus) as executor:\n futures = list((executor.submit(grid_search, X_train, y_train, N, K) for N in N_list))\n\nfor future in as_completed(futures):\n results += future.result()\n\n\nfinal_res = pd.DataFrame(results, columns=['gradient', 'K', 'N', 'sigma', 'rho', 'success', 'train_error', 'train_error_fit',\n 'validation_error', 'time_exec(s)', 'nfev', 'nit', 'njev'])\n\n\nfinal_res.to_csv('KFOLD_RBF.csv', index=False)","sub_path":"GridSearch_KFoldCV/k_fold_RBF.py","file_name":"k_fold_RBF.py","file_ext":"py","file_size_in_byte":6133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"465178057","text":"# return platform.uname().node\n# import os\n# return os.uname().nodename\nfrom collections import namedtuple\nimport platform\n\nNodeConfig = namedtuple(\"NodeConfig\", [\"workingDir\", \"IMAGES_PER_GPU\", \"visualize\"])\n\nnodesConfigs = {\n \"trevol-gpu-nb\": NodeConfig(\n workingDir=\"/HDD_DATA/nfs_share/mask-rcnn/pins/rough_dataset\",\n IMAGES_PER_GPU=1,\n visualize=True\n ),\n \"trevol-gpu-server\": NodeConfig(\n workingDir=\"/trevol_gpu_nb_share/mask-rcnn/pins/rough_dataset\",\n IMAGES_PER_GPU=2,\n visualize=False\n ),\n}\n\nnodeConfig = nodesConfigs.get(platform.node())\n","sub_path":"samples/iterative_training/pins/rough_dataset/node_config.py","file_name":"node_config.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"358061951","text":"from linkedqueue import *\n\n\nclass Condition(object):\n def __init__(self, rank):\n self._rank = rank\n\n def __ge__(self, other):\n \"\"\"Used for comparisons.\"\"\"\n return self._rank >= other._rank\n\n def __str__(self):\n if self._rank == 1: return \"critical\"\n elif self._rank == 2: return \"serious\"\n else: return \"fair\"\n\n\nclass Patient(object):\n def __init__(self, name, condition):\n self._name = name\n self._condition = condition\n\n def __ge__(self, other):\n \"\"\"Used for comparisons.\"\"\"\n return self._condition >= other._condition\n\n def __str__(self):\n return self._name + \" / \" + str(self._condition)\n\n\nclass ERModel:\n def __init__(self):\n self.patient_list = LinkedQueue()\n self.fair_patients = LinkedQueue()\n self.serious_patients = LinkedQueue()\n self.critical_patients = LinkedQueue()\n\n def isEmpty(self):\n res = len(self.fair_patients) + len(self.serious_patients) + len(self.critical_patients)\n print(res)\n if res > 0:\n return False\n else:\n return True\n\n def _treatNext(self):\n if len(self.critical_patients) != 0:\n p = self.critical_patients.peek()\n self.critical_patients.pop()\n return p\n elif len(self.serious_patients) != 0:\n p = self.serious_patients.peek()\n self.serious_patients()\n return p\n elif len(self.fair_patients ) != 0:\n p = self.fair_patients.peek()\n self.fair_patients.pop()\n return p\n\n def schedule(self, patient):\n if str(patient._condition) == 'fair':\n self.fair_patients.add(patient._name)\n return self.fair_patients\n elif str(patient._condition) == 'serious':\n self.serious_patients.add(patient._name)\n return self.serious_patients\n elif str(patient._condition) == 'critical':\n self.critical_patients.add(patient._name)\n return self.critical_patients\n","sub_path":"ticketcounter/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"232963910","text":"import os\n# noinspection PyMethodMayBeStatic,PyMethodMayBeStatic,PyMethodMayBeStatic\n\nclass Utility:\n\n def __init__(self):\n self.data_agg = {}\n self.type_switch = {\n 'list': self._append_item,\n 'float': self._add_float,\n 'string': self._add_string\n }\n self.header_type = []\n self.header = []\n self.key_index = 0\n\n def _append_item(self, key, sub_key, value):\n \"\"\"\n Appends an item to the sub key value which is\n a value of key. If a key does not exist it\n is added\n\n :param key: 'key' of dict\n :param sub_key:'sub_key' of 'key' value\n :param value: Value to append to 'sub_key'\n \"\"\"\n self.data_agg.setdefault(key, {}).setdefault(sub_key, []).append(value)\n\n def _add_float(self, key, sub_key, value):\n \"\"\"\n Adds a float value to the sub key value which is\n a value of key. If a key does not exist it\n is added\n\n :param key: 'key' of dict\n :param sub_key: 'sub_key' of 'key' value\n :param value: Value to add to 'sub_key'\n \"\"\"\n self.data_agg[key][sub_key] = self.data_agg.setdefault(key, {})\\\n .get(sub_key, 0.0) + value\n\n def _add_string(self, key, sub_key, value):\n \"\"\"\n Assigns a string to the sub key value which is\n a value of key. If a key does not exist it\n is added\n\n :param key: 'key' of dict\n :param sub_key: 'sub_key' of 'key' value\n :param value: String to assign to 'sub_key'\n \"\"\"\n self.data_agg.setdefault(key, {}).setdefault(sub_key, value)\n\n def aggregate_data(self, data):\n \"\"\"\n Groups data based on the 'key' value\n\n :param data: List of data elements\n \"\"\"\n if data:\n key = data[self.key_index]\n for i, v in enumerate(data):\n header_name = self.header_type[i]\n if (header_name == 'ignore') or (header_name == 'key'):\n continue\n else:\n sub_key = self.header[i]\n self.type_switch[self.header_type[i]](key, sub_key, v)\n\n def count_items(self, data, key=None):\n \"\"\"\n Calculates the frequency of items in data. Data\n must be iterable, and key a must be provided if\n iterating through a list of dictionaries\n\n :param data: Iterable data structure\n :param key: Key of dictionary\n :return: Dictionary of {string: integer}\n \"\"\"\n counts = {}\n if key:\n for _ in data:\n counts[data[key]] = counts.get(data[key], 0) + 1\n else:\n for i in data:\n counts[i] = counts.get(i, 0) + 1\n return counts\n\n def get_files_dir(self, f_dir, ext=None):\n \"\"\"\n Returns the files in a given directory. ext can be\n assigned to a file extension to return only these\n file types\n\n :param f_dir: Directory path\n :param ext: String of file extension\n :return: list of strings\n \"\"\"\n if ext:\n return [i for i in os.listdir(f_dir) if i.endswith(ext)]\n else:\n return os.listdir(f_dir)\n\n def get_csv_header(self, f_obj, delimiter=None):\n \"\"\"\n Retrieves the header of a CSV file. Assumes\n the header is the first line of the file.\n\n :param f_obj: Opened file object\n :param delimiter: Delimiter fo file\n :return: list of strings\n \"\"\"\n header = next(f_obj).strip()\n if delimiter:\n return header.split(delimiter)\n else:\n return header\n","sub_path":"DrDataScience/python/utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"500170801","text":"# Copyright (c) 2017 UFCG-LSD.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport configparser\nfrom monitor.utils.logger import Log\n\nLOG_FILE = \"progress.log\"\nLOG_NAME = \"kubejobs-progress\"\nLOG = Log(LOG_NAME, LOG_FILE)\n\nCONFIG_PATH = \"./data/conf\"\n\ntry:\n # Conf reading\n config = configparser.RawConfigParser()\n config.read('./monitor.cfg')\n\n \"\"\" General configuration \"\"\"\n address = config.get('general', 'host')\n port = config.getint('general', 'port')\n plugins = config.get('general', 'plugins').split(',')\n use_debug = config.get('general', 'debug')\n retries = config.getint('general', 'retries')\n\n \"\"\" Validate if really exists a section to listed plugins \"\"\"\n for plugin in plugins:\n if plugin != '' and plugin not in config.sections():\n raise Exception(\"plugin '%s' section missing\" % plugin)\n\n # Setting default value\n k8s_manifest = CONFIG_PATH\n if 'kubejobs' in plugins:\n # If explicitly stated in the cfg file, overwrite the variable\n if(config.has_section('kubejobs')):\n if(config.has_option('kubejobs', 'k8s_manifest')):\n k8s_manifest = config.get('kubejobs', 'k8s_manifest')\n\n if 'monasca' in plugins:\n \"\"\" Monasca parameters \"\"\"\n monasca_endpoint = config.get('monasca', 'monasca_endpoint')\n monasca_username = config.get('monasca', 'username')\n monasca_password = config.get('monasca', 'password')\n monasca_auth_url = config.get('monasca', 'auth_url')\n monasca_project_name = config.get('monasca', 'project_name')\n monasca_api_version = config.get('monasca', 'api_version')\n\nexcept Exception as e:\n LOG.log(\"Error: %s\" % e)\n quit()\n","sub_path":"monitor/service/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"182223264","text":"import numpy as np \nimport matplotlib.pyplot as plt \nfrom data import *\nfrom activation import *\nfrom loss import *\nfrom utils import *\n\nnp.random.seed(0) # deterministic randomization\n\n# hyperparameters\ninput_unit = 2 # number of input units\nhidden_unit = 8 # number of hidden units\noutput_unit = 1 # number of output units\nbinary_bit = 4 # number of bits \ndisplay_step = 10000 # when model displays results\nlargest_num = 2**binary_bit-1 #the largest number with given number of bits\nnum_ex=50 # number of training examples we want to generate\nalpha = 0.001 # learning rate\nmaxVal = 10\nminVal = -10\n# generate data\nm,n,p = datagen(num_ex,binary_bit)\n\nm_seq = np.zeros((m.shape[0],binary_bit))\nn_seq = np.zeros((n.shape[0],binary_bit))\np_seq = np.zeros((p.shape[0],binary_bit))\n\nfor ex_index in range(m.shape[0]):\n\tm_seq[ex_index] = [float(bits) for bits in int2binary(m[ex_index][0],binary_bit)]\n\tn_seq[ex_index] = [float(bits) for bits in int2binary(n[ex_index][0],binary_bit)]\n\tp_seq[ex_index] = [float(bits) for bits in int2binary(p[ex_index][0],binary_bit)]\n\n# parameters\n## memory cell parameters\nwcx = 2*np.random.random((hidden_unit,input_unit)) - 1\nwca = 2*np.random.random((hidden_unit,hidden_unit)) - 1\nbc = 2*np.random.random((hidden_unit,1)) - 1\n## update-gate parameters\nwux = 2*np.random.random((hidden_unit,input_unit)) - 1\nwua = 2*np.random.random((hidden_unit,hidden_unit)) - 1\nbu = 2*np.random.random((hidden_unit,1)) - 1\n## forget-gate parameters\nwfx = 2*np.random.random((hidden_unit,input_unit)) - 1\nwfa = 2*np.random.random((hidden_unit,hidden_unit)) - 1\nbf = 2*np.random.random((hidden_unit,1)) - 1\n## output-gate parameters\nwox = 2*np.random.random((hidden_unit,input_unit)) - 1\nwoa = 2*np.random.random((hidden_unit,hidden_unit)) - 1\nbo = 2*np.random.random((hidden_unit,1)) - 1\n## predict parameters\nwya = 2*np.random.random((output_unit,hidden_unit)) - 1\nby = 2*np.random.random((output_unit,1)) - 1\n\na = {0: np.zeros((hidden_unit,num_ex))}\nc_tol = {}\nc = {0: np.zeros((hidden_unit,num_ex))}\npred = {}\n\nda = {}\ndc = {}\ndc_tol = {}\n\nj = 0\nerr = []\ntry:\n\twhile True:\n\t\toverall = 0.\n\t\t# derivative\n\t\tdwcx = np.zeros_like(wcx)\n\t\tdwca = np.zeros_like(wca)\n\t\tdbc = np.zeros_like(bc)\n\n\t\tdwux = np.zeros_like(wux)\n\t\tdwua = np.zeros_like(wua)\n\t\tdbu = np.zeros_like(bu)\n\n\t\tdwfx = np.zeros_like(wfx)\n\t\tdwfa = np.zeros_like(wfa)\n\t\tdbf = np.zeros_like(bf)\n\n\t\tdwox = np.zeros_like(wox)\n\t\tdwoa = np.zeros_like(woa)\n\t\tdbo = np.zeros_like(bo)\n\n\t\tdwya = np.zeros_like(wya)\n\t\tdby = np.zeros_like(by)\n\n\t\tfor time in range(1,binary_bit+1):\n\t\t\t# Forward pass\n\t\t\tx = np.array([m_seq[:,binary_bit-time],n_seq[:,binary_bit-time]])\n\t\t\ty = np.expand_dims(p_seq[:,binary_bit-time],axis=0)\n\n\t\t\tc_tol[time] = tanh(np.dot(wca,a[time-1]) + np.dot(wcx,x) + bc)\n\n\t\t\tu_gate = sigmoid(np.dot(wua,a[time-1]) + np.dot(wux,x) + bu) \n\t\t\tf_gate = sigmoid(np.dot(wfa,a[time-1]) + np.dot(wfx,x) + bf) \n\t\t\to_gate = sigmoid(np.dot(woa,a[time-1]) + np.dot(wox,x) + bo) \n\n\t\t\tc[time] = u_gate*c_tol[time] + f_gate*c[time-1]\n\n\t\t\ta[time] = o_gate*tanh(c[time])\n\n\t\t\tpred[binary_bit-time] = sigmoid(np.dot(wya,a[time])+by)\n\n\t\t\toverall += crossentropy(pred[binary_bit-time],y)\n\n\t\t\t# Backpropagation\n\t\t\terror = pred[binary_bit-time] - y\n\t\t\tdwya_update = error.dot(a[time].T)\n\t\t\tnp.clip(dwya_update,minVal,maxVal,out=dwya_update)\n\t\t\tdwya += dwya_update\n\t\t\tdby_update = np.sum(error,axis=1,keepdims=True)\n\t\t\tnp.clip(dby_update,minVal,maxVal,out=dby_update)\n\t\t\tdby += dby_update\n\n\t\t\tda[time] = wya.T.dot(error)\n\t\t\tdo_gate = da[time]*tanh(c[time])*sigmoid(np.dot(woa,a[time-1]) + np.dot(wox,x) + bo, deriv=True)\n\t\t\tdwoa_update = do_gate.dot(a[time-1].T)\n\t\t\tnp.clip(dwoa_update,minVal,maxVal,out=dwoa_update)\n\t\t\tdwoa += dwoa_update\n\t\t\tdwox_update = do_gate.dot(x.T)\n\t\t\tnp.clip(dwox_update,minVal,maxVal,out=dwox_update)\n\t\t\tdwox += dwox_update\n\t\t\tdbo_update = np.sum(do_gate,axis=1,keepdims=True)\n\t\t\tnp.clip(dbo_update,minVal,maxVal,out=dbo_update)\n\t\t\tdbo += dbo_update\n\n\t\t\tdc[time] = da[time]*o_gate\n\t\t\tdf_gate = dc[time]*c[time-1]*sigmoid(np.dot(wfa,a[time-1]) + np.dot(wfx,x) + bf, deriv=True)\n\t\t\tdwfa_update = df_gate.dot(a[time-1].T)\n\t\t\tnp.clip(dwfa_update,minVal,maxVal,out=dwfa_update)\n\t\t\tdwfa += dwfa_update\n\t\t\tdwfx_update = df_gate.dot(x.T)\n\t\t\tnp.clip(dwfx_update,minVal,maxVal,out=dwfx_update)\n\t\t\tdwfx += dwfx_update\n\t\t\tdbf_update = np.sum(df_gate,axis=1,keepdims=True)\n\t\t\tnp.clip(dbf_update,minVal,maxVal,out=dbf_update)\n\t\t\tdbf += dbf_update\n\n\t\t\tdu_gate = dc[time]*c_tol[time]*sigmoid(np.dot(wua,a[time-1]) + np.dot(wux,x) + bu, deriv=True)\n\t\t\tdwua_update = du_gate.dot(a[time-1].T)\n\t\t\tnp.clip(dwua_update,minVal,maxVal,out=dwua_update)\n\t\t\tdwua += dwua_update\n\t\t\tdwux_update = du_gate.dot(x.T)\n\t\t\tnp.clip(dwux_update,minVal,maxVal,out=dwux_update)\n\t\t\tdwux += dwux_update\n\t\t\tdbu_update = np.sum(du_gate,axis=1,keepdims=True)\n\t\t\tnp.clip(dbu_update,minVal,maxVal,out=dbu_update)\n\t\t\tdbu += dbu_update\n\n\t\t\tdc_tol[time] = dc[time]*u_gate*tanh(np.dot(wca,a[time-1]) + np.dot(wcx,x) + bc, deriv=True)\n\t\t\tdwca_update = dc_tol[time].dot(a[time-1].T)\n\t\t\tnp.clip(dwca_update,minVal,maxVal,out=dwca_update)\n\t\t\tdwca += dwca_update\n\t\t\tdwcx_update = dc_tol[time].dot(x.T)\n\t\t\tnp.clip(dwcx_update,minVal,maxVal,out=dwcx_update)\n\t\t\tdwcx += dwcx_update\n\t\t\tdbc_update = np.sum(dc_tol[time],axis=1,keepdims=True)\n\t\t\tnp.clip(dbc_update,minVal,maxVal,out=dbc_update)\n\t\t\tdbc += dbu_update\n\n\t\twcx -= alpha*dwcx \n\t\twca -= alpha*dwca \n\t\tbc -= alpha*dbc\n\n\t\twux -= alpha*dwux \n\t\twua -= alpha*dwua \n\t\tbu -= alpha*dbu\n\n\t\twfx -= alpha*dwfx \n\t\twfa -= alpha*dwfa \n\t\tbf -= alpha*dbf\n\n\t\twox -= alpha*dwox \n\t\twoa -= alpha*dwoa \n\t\tbo -= alpha*dbo\n\n\t\twya -= alpha*dwya\n\t\tby -= alpha*dby\n\n\t\terr.append(overall)\n\n\t\tif j%display_step==0:\n\t\t\tprint('--------------------------')\n\t\t\tprint('Iteration %d'%j)\n\t\t\tprint('Loss %s'%overall)\n\t\t\ttest = np.random.randint(m.shape[0])\n\t\t\tprediction = [int(pred[i][0,test] >= 0.5) for i in range(binary_bit)]\n\t\t\tprint('%d + %d = %d'%(int(m[test]),int(n[test]),binary2int(prediction)))\n\t\t\tprint('--------------------------')\n\t\tj+=1\nexcept KeyboardInterrupt as e:\n\tplt.figure(figsize=(20,10))\n\tplt.title('loss through each iteration')\n\tplt.plot(np.arange(1,1+len(err)),err,label='loss')\n\tplt.xlabel('iteration')\n\tplt.ylabel('loss')\n\tplt.legend()\n\tplt.savefig('w_gradient_clipping.png',dpi=300)\n\tplt.show()\n\nelse:\n\tpass\nfinally:\n\tpass\n\n\t\t\n\t\n\n\n\n\n\n\n","sub_path":"demo_w_gradient_clipping.py","file_name":"demo_w_gradient_clipping.py","file_ext":"py","file_size_in_byte":6316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"519213034","text":"Sec = input('Digite o valor em segundos a ser convertido: ')\n\nSec = int(Sec)\n\nHr = Sec // 3600\nHrR = Sec % 3600\nMin = Sec // 60\nSecR = Sec % 60\n\nwhile (Hr > 24):\n Day = 0\n Hr = Hr - 24\n Day = Day + 1\n\nprint(Day, 'dias, ', Hr, 'horas, ', Min, 'minutos e ', SecR, 'segundos')","sub_path":"Python/Converter.py","file_name":"Converter.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"356141502","text":"# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport pickle\nfrom logging import getLogger\nfrom typing import List, Dict, Tuple, Optional\nimport itertools\n\nfrom fuzzywuzzy import fuzz\nimport pymorphy2\nimport nltk\n\nfrom deeppavlov.core.common.registry import register\nfrom deeppavlov.core.models.serializable import Serializable\nfrom deeppavlov.core.models.component import Component\nfrom deeppavlov.models.spelling_correction.levenshtein.levenshtein_searcher import LevenshteinSearcher\n\nlog = getLogger(__name__)\n\n\n@register('entity_linker')\nclass EntityLinker(Component, Serializable):\n \"\"\"\n This class extracts from Wikidata candidate entities for the entity mentioned in the question and then extracts\n triplets from Wikidata for the extracted entity. Candidate entities are searched in the dictionary where keys\n are titles and aliases of Wikidata entities and values are lists of tuples (entity_title, entity_id,\n number_of_relations). First candidate entities are searched in the dictionary by keys where the keys are\n entities extracted from the question, if nothing is found entities are searched in the dictionary using\n Levenstein distance between the entity and keys (titles) in the dictionary.\n \"\"\"\n\n LANGUAGES = set(['rus'])\n\n def __init__(self, load_path: str, wiki_filename: str, entities_filename: str, inverted_index_filename: str,\n id_to_name_file: str, lemmatize: bool = True, debug: bool = False, rule_filter_entities: bool = True,\n use_inverted_index: bool = True, language: str = 'rus', *args, **kwargs) -> None:\n \"\"\"\n\n Args:\n load_path: path to folder with wikidata files\n wiki_filename: file with Wikidata triplets\n entities_filename: file with dict of entity titles (keys) and entity ids (values)\n inverted_index_filename: file with dict of words (keys) and entities containing these words (values)\n id_to_name_file: file with dict of entity ids (keys) and entities names and aliases (values)\n lemmatize: whether to lemmatize tokens of extracted entity\n debug: whether to print entities extracted from Wikidata\n rule_filter_entities: whether to filter entities which do not fit the question\n use_inverted_index: whether to use inverted index for entity linking\n language - the language of the linker (used for filtration of some questions to improve overall performance)\n *args:\n **kwargs:\n \"\"\"\n super().__init__(save_path=None, load_path=load_path)\n self.morph = pymorphy2.MorphAnalyzer()\n self.lemmatize = lemmatize\n self.debug = debug\n self.rule_filter_entities = rule_filter_entities\n self.use_inverted_index = use_inverted_index\n self._language = language\n if language not in self.LANGUAGES:\n log.warning(f'EntityLinker supports only the following languages: {self.LANGUAGES}')\n\n self._wiki_filename = wiki_filename\n self._entities_filename = entities_filename\n self.inverted_index_filename = inverted_index_filename\n self.id_to_name_file = id_to_name_file\n\n self.name_to_q: Optional[Dict[str, List[Tuple[str]]]] = None\n self.wikidata: Optional[Dict[str, List[List[str]]]] = None\n self.inverted_index: Optional[Dict[str, List[Tuple[str]]]] = None\n self.id_to_name: Optional[Dict[str, Dict[List[str]]]] = None\n self.load()\n if self.use_inverted_index:\n alphabet = \"abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя1234567890-_()=+!?.,/;:&@<>|#$%^*\"\n dictionary_words = list(self.inverted_index.keys())\n self.searcher = LevenshteinSearcher(alphabet, dictionary_words)\n\n def load(self) -> None:\n if self.use_inverted_index:\n with open(self.load_path / self.inverted_index_filename, 'rb') as inv:\n self.inverted_index = pickle.load(inv)\n self.inverted_index: Dict[str, List[Tuple[str]]]\n with open(self.load_path / self.id_to_name_file, 'rb') as i2n:\n self.id_to_name = pickle.load(i2n)\n self.id_to_name: Dict[str, Dict[List[str]]]\n else:\n with open(self.load_path / self._entities_filename, 'rb') as e:\n self.name_to_q = pickle.load(e)\n self.name_to_q: Dict[str, List[Tuple[str]]]\n with open(self.load_path / self._wiki_filename, 'rb') as w:\n self.wikidata = pickle.load(w)\n self.wikidata: Dict[str, List[List[str]]]\n\n def save(self) -> None:\n pass\n\n def __call__(self, entity: str, question_tokens: List[str]) -> Tuple[List[List[List[str]]], List[str]]:\n confidences = []\n srtd_cand_ent = []\n if not entity:\n wiki_entities = ['None']\n else:\n if self.use_inverted_index:\n candidate_entities = self.candidate_entities_inverted_index(entity)\n candidate_names = self.candidate_entities_names(candidate_entities)\n wiki_entities, confidences, srtd_cand_ent = self.sort_found_entities(candidate_entities,\n candidate_names, entity)\n else:\n candidate_entities = self.find_candidate_entities(entity)\n\n srtd_cand_ent = sorted(candidate_entities, key=lambda x: x[2], reverse=True)\n if len(srtd_cand_ent) > 0:\n wiki_entities = [ent[1] for ent in srtd_cand_ent]\n confidences = [1.0 for i in range(len(srtd_cand_ent))]\n srtd_cand_ent = [(ent[0], ent[1], conf, ent[2]) for ent, conf in zip(srtd_cand_ent, confidences)]\n else:\n candidates = self.fuzzy_entity_search(entity)\n candidates = list(set(candidates))\n srtd_cand_ent = [(ent[0][0], ent[0][1], ent[1], ent[0][2]) for ent in candidates]\n srtd_cand_ent = sorted(srtd_cand_ent, key=lambda x: (x[2], x[3]), reverse=True)\n\n if len(srtd_cand_ent) > 0:\n wiki_entities = [ent[1] for ent in srtd_cand_ent]\n confidences = [float(ent[2]) * 0.01 for ent in srtd_cand_ent]\n else:\n wiki_entities = [\"None\"]\n confidences = [0.0]\n\n entity_triplets = self.extract_triplets_from_wiki(wiki_entities)\n if self.rule_filter_entities and self._language == 'rus':\n filtered_entities, filtered_entity_triplets = self.filter_triplets_rus(entity_triplets,\n question_tokens, srtd_cand_ent)\n if self.debug:\n self._log_entities(filtered_entities[:10])\n\n return filtered_entity_triplets, confidences\n\n def _log_entities(self, srtd_cand_ent):\n entities_to_print = []\n for name, q, ratio, n_rel in srtd_cand_ent:\n entities_to_print.append(f'{name}, http://wikidata.org/wiki/{q}, {ratio}, {n_rel}')\n log.debug('\\n'+'\\n'.join(entities_to_print))\n\n def find_candidate_entities(self, entity: str) -> List[str]:\n candidate_entities = list(self.name_to_q.get(entity, []))\n entity_split = entity.split(' ')\n if len(entity_split) < 6 and self.lemmatize:\n entity_lemm_tokens = []\n for tok in entity_split:\n morph_parse_tok = self.morph.parse(tok)[0]\n lemmatized_tok = morph_parse_tok.normal_form\n entity_lemm_tokens.append(lemmatized_tok)\n masks = itertools.product([False, True], repeat=len(entity_split))\n for mask in masks:\n entity_lemm = []\n for i in range(len(entity_split)):\n if mask[i]:\n entity_lemm.append(entity_split[i])\n else:\n entity_lemm.append(entity_lemm_tokens[i])\n entity_lemm = ' '.join(entity_lemm)\n if entity_lemm != entity:\n candidate_entities += self.name_to_q.get(entity_lemm, [])\n candidate_entities = list(set(candidate_entities))\n\n return candidate_entities\n\n def fuzzy_entity_search(self, entity: str) -> List[Tuple[Tuple, str]]:\n word_length = len(entity)\n candidates = []\n for title in self.name_to_q:\n length_ratio = len(title) / word_length\n if length_ratio > 0.75 and length_ratio < 1.25:\n ratio = fuzz.ratio(title, entity)\n if ratio > 70:\n entity_candidates = self.name_to_q.get(title, [])\n for cand in entity_candidates:\n candidates.append((cand, fuzz.ratio(entity, cand[0])))\n return candidates\n\n def extract_triplets_from_wiki(self, entity_ids: List[str]) -> List[List[List[str]]]:\n entity_triplets = []\n for entity_id in entity_ids:\n if entity_id in self.wikidata and entity_id.startswith('Q'):\n triplets_for_entity = self.wikidata[entity_id]\n entity_triplets.append(triplets_for_entity)\n else:\n entity_triplets.append([])\n\n return entity_triplets\n\n @staticmethod\n def filter_triplets_rus(entity_triplets: List[List[List[str]]], question_tokens: List[str],\n srtd_cand_ent: List[Tuple[str]]) -> Tuple[List[Tuple[str]], List[List[List[str]]]]:\n\n question = ' '.join(question_tokens).lower()\n what_template = 'что '\n found_what_template = False\n found_what_template = question.find(what_template) > -1\n filtered_entity_triplets = []\n filtered_entities = []\n for wiki_entity, triplets_for_entity in zip(srtd_cand_ent, entity_triplets):\n entity_is_human = False\n entity_is_asteroid = False\n entity_is_named = False\n entity_title = wiki_entity[0]\n if entity_title[0].isupper():\n entity_is_named = True\n property_is_instance_of = 'P31'\n id_for_entity_human = 'Q5'\n id_for_entity_asteroid = 'Q3863'\n for triplet in triplets_for_entity:\n if triplet[0] == property_is_instance_of and triplet[1] == id_for_entity_human:\n entity_is_human = True\n break\n if triplet[0] == property_is_instance_of and triplet[1] == id_for_entity_asteroid:\n entity_is_asteroid = True\n break\n if found_what_template and (entity_is_human or entity_is_named or entity_is_asteroid or wiki_entity[2]<90):\n continue\n filtered_entity_triplets.append(triplets_for_entity)\n filtered_entities.append(wiki_entity)\n\n return filtered_entities, filtered_entity_triplets\n\n def candidate_entities_inverted_index(self, entity: str) -> List[Tuple[str]]:\n word_tokens = nltk.word_tokenize(entity)\n candidate_entities = []\n\n for tok in word_tokens:\n if len(tok) > 1:\n found = False\n if tok in self.inverted_index:\n candidate_entities += self.inverted_index[tok]\n found = True\n morph_parse_tok = self.morph.parse(tok)[0]\n lemmatized_tok = morph_parse_tok.normal_form\n if lemmatized_tok != tok and lemmatized_tok in self.inverted_index:\n candidate_entities += self.inverted_index[lemmatized_tok]\n found = True\n if not found:\n words_with_levens_1 = self.searcher.search(tok, d=1)\n for word in words_with_levens_1:\n candidate_entities += self.inverted_index[word[0]]\n candidate_entities = list(set(candidate_entities))\n\n return candidate_entities\n\n def candidate_entities_names(self, candidate_entities: List[Tuple[str]]) -> List[List[str]]:\n candidate_names = []\n for candidate in candidate_entities:\n entity_id = candidate[0]\n entity_names = [self.id_to_name[entity_id][\"name\"]]\n if \"aliases\" in self.id_to_name[entity_id].keys():\n aliases = self.id_to_name[entity_id][\"aliases\"]\n for alias in aliases:\n entity_names.append(alias)\n candidate_names.append(entity_names)\n\n return candidate_names\n\n def sort_found_entities(self, candidate_entities: List[Tuple[str]],\n candidate_names: List[List[str]],\n entity: str) -> Tuple[List[str], List[str], List[Tuple[str]]]:\n entities_ratios = []\n for candidate, entity_names in zip(candidate_entities, candidate_names):\n entity_id = candidate[0]\n num_rels = candidate[1]\n entity_name = entity_names[0]\n morph_parse_entity = self.morph.parse(entity)[0]\n lemm_entity = morph_parse_entity.normal_form\n fuzz_ratio_lemm = max([fuzz.ratio(name.lower(), lemm_entity.lower()) for name in entity_names])\n fuzz_ratio_nolemm = max([fuzz.ratio(name.lower(), entity.lower()) for name in entity_names])\n fuzz_ratio = max(fuzz_ratio_lemm, fuzz_ratio_nolemm)\n entities_ratios.append((entity_name, entity_id, fuzz_ratio, num_rels))\n\n srtd_with_ratios = sorted(entities_ratios, key=lambda x: (x[2], x[3]), reverse=True)\n wiki_entities = [ent[1] for ent in srtd_with_ratios if ent[2] > 84]\n confidences = [float(ent[2])*0.01 for ent in srtd_with_ratios if ent[2] > 84]\n \n return wiki_entities, confidences, srtd_with_ratios\n","sub_path":"deeppavlov/models/kbqa/entity_linking.py","file_name":"entity_linking.py","file_ext":"py","file_size_in_byte":14521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"455918820","text":"import sys\nimport os\nfrom semgroups import semanticGroupTypeDict\nfrom mod_python import apache, Session, util\nfrom ws import indexText, translateUnicode\n \ndef handler(req):\n # obtain the posted data\n # print the header of the html page\n req.send_http_header()\n \n fields = util.FieldStorage(req).list\n cmd = ''\n text = \"\"\n for field in fields:\n if field.name == \"cmd\": cmd = field.value\n if field.name == \"text\": text = translateUnicode(field.value)\n\n if cmd == \"terms\":\n result = ws.getTerms(text)\n content = RemoveHeader(result['response'])\n content = content.replace(\"'\",\"\\\\'\" )\n # indexing request\n else:\n content = \"<response>\" + indexText(text) + \"</response>\"\n\n req.content_type = 'text/xml'\n req.write( content )\n return apache.OK\n\n ","sub_path":"index_ws.py","file_name":"index_ws.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"588780989","text":"from selenium.webdriver.common.desired_capabilities import DesiredCapabilities\ncaps = DesiredCapabilities.FIREFOX\nimport os\nfrom selenium import webdriver\ncaps[\"marionette\"] = True\ncaps[\"binary\"] = \"C:\\\\Program Files (x86)\\\\Mozilla Firefox\\\\firefox.exe\"\nfp = webdriver.FirefoxProfile()\nfp.set_preference(\"browser.download.folderList\",2)\nfp.set_preference(\"browser.download.manager.showWhenStarting\",False)\nfp.set_preference(\"browser.download.dir\", 'C:\\\\Users\\\\chloe')\nfp.set_preference(\"browser.helperApps.neverAsk.saveToDisk\",\n\"application/octet-stream\")\nbrowser = webdriver.Firefox(firefox_profile=fp,capabilities=caps,)\nbrowser.get(\"http://pypi.python.org/pypi/selenium\")\nbrowser.find_element_by_partial_link_text(\"selenium-3.0.0b3.tar.gz\").click()","sub_path":"download1.py","file_name":"download1.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"206818317","text":"import logging\nimport socket\nimport os\n\n\nlogger = logging.getLogger()\n\n\ndef get_ipv4_address(dest=\"8.8.8.8\", port=80):\n \"\"\"\n Get ipv4 address for a given destination. By default use Google's ipv4 DNS\n assress.\n \"\"\"\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n s.connect((dest, port))\n return s.getsockname()[0]\n except OSError:\n pass\n return \"127.0.0.1\"\n\n\ndef get_ipv6_address(dest=\"2001:4860:4860::8888\", port=80):\n \"\"\"\n Get ipv6 address for a given destination. By default use Google's ipv6 DNS\n assress.\n \"\"\"\n s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)\n try:\n s.connect((dest, port))\n return s.getsockname()[0]\n except OSError:\n pass\n return \"::1\"\n\n\ndef apply_middlewares(routes, next_function):\n # Apply middlewares\n _func = next_function\n for middleware, _ in routes[::-1]:\n _middleware_module_name = middleware.get(\"module\", None)\n if _middleware_module_name is None:\n logger.error(\"No module name specified for middleware\")\n continue\n\n _middleware_function_name = middleware.get(\"function\", None)\n if _middleware_function_name is None:\n logger.error(\"No function name specified for middleware\")\n continue\n\n try:\n _middleware_module = exec_cached_script(_middleware_module_name)\n except Exception:\n logger.exception(\"Unable to import middleware '{}'\".format(_middleware_module_name))\n continue\n\n _middleware_func = _middleware_module.get(_middleware_function_name, None)\n if _middleware_func is None:\n logger.error(\"Middleware funcion '{}' does not exist in '{}'\".format(_middleware_function_name, _middleware_module_name))\n continue\n\n _args = middleware.get(\"args\", [])\n _kwargs = middleware.get(\"kwargs\", {})\n\n _func = (lambda m, f, a, k: lambda r: m(r, f, *a, **k))(_middleware_func, _func, _args, _kwargs)\n return _func\n\nscript_cache = {}\ndef exec_cached_script(path):\n path = os.path.abspath(os.path.join(os.path.split(__file__)[0], \"files\", path))\n cache = script_cache.setdefault(path, {\"mtime\": 0, \"vars\": {}})\n if cache[\"mtime\"] < os.path.getmtime(path):\n with open(path) as f:\n code = compile(f.read(), path, \"exec\")\n _vars = {\"__file__\": path}\n exec(code, _vars, _vars)\n cache[\"vars\"] = _vars\n cache[\"mtime\"] = os.path.getmtime(path)\n return cache[\"vars\"]\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"75422582","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 26 23:09:35 2013\n\n@author: Santiago\n\"\"\"\n\nfrom plant import *\nfrom jac_sym import *\n\np3=Matrix([0,m3*g0,0])\ng3=-Jlm3.T*p3\n\np4=Matrix([0,m4*g0,0])\ng4=-Jlm4.T*p4\n\np5=Matrix([0,m5*g0,0])\ng5=-Jlm5.T*p5\n\np6=Matrix([0,m6*g0,0])\ng6=-Jlm6.T*p6\n\n#G vector\ng=g3+g4+g5+g6\n","sub_path":"LModel/matrix/gmat.py","file_name":"gmat.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"460484196","text":"from flask import Flask\nimport dokyo\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n return 'api:update'\n\n@app.route('/update/')\ndef update():\n dokyo.rebuild()\n return 'done'\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", debug=True)","sub_path":"web/main/dokyo_app.py","file_name":"dokyo_app.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"499970403","text":"def ulam(x):\n if x % 2 == 0:\n return x/2\n else:\n return (3*x)+1\n\ndef suc(x):\n i=0\n while x!=1:\n x=ulam(x)\n i=i+1\n return i\n","sub_path":"Clases/Programas/Trabajo_en_clase/Ulam.py","file_name":"Ulam.py","file_ext":"py","file_size_in_byte":166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"29450303","text":"\"\"\"\nModule authors:\nRomanov Andrey, xanter@granit.io\n\nThis file contains object, which are used in messages\n\"\"\"\n\nfrom .Interface_Classes import (IDict, IList)\n\n\nclass BaseDictGUISOC(IDict):\n type_name = \"base_struct\"\n attributes = {}\n check_names = []\n translate_names = {}\n translate_names_rev = {v: k for k, v in translate_names.items()}\n\n def check(self):\n pass\n\n\nclass BaseListGUISOC(IList):\n ATTR_TYPE_NAME = \"type_name\"\n\n\nclass StrFactoryGUISOC(BaseListGUISOC):\n classes = {\n str.__name__: str,\n unicode.__name__: unicode\n }\n\n\nclass NetworkInterfaceGUISOC(BaseDictGUISOC):\n type_name = \"netinterface\"\n attributes = {\n \"port_id\": str,\n \"ip_addr\": str,\n \"network\": str,\n \"mac_addr\": str\n }\n attributes.update(BaseDictGUISOC.attributes)\n check_names = [\"port_id\"]\n\n\nclass NetworkInterfaceFactoryGUISOC(BaseListGUISOC):\n classes = {\n \"netinterface\": NetworkInterfaceGUISOC\n }\n\n\nclass IntQuotaGUISOC(BaseDictGUISOC):\n type_name = \"quota\"\n attributes = {\n \"total\": int,\n \"used\": int,\n }\n attributes.update(BaseDictGUISOC.attributes)\n check_names = [\"total\"]\n check_names.extend(BaseDictGUISOC.check_names)\n\n\nclass FloatQuotaGUISOC(BaseDictGUISOC):\n type_name = \"quota\"\n attributes = {\n \"total\": float,\n \"used\": float,\n }\n attributes.update(BaseDictGUISOC.attributes)\n check_names = [\"total\"]\n check_names.extend(BaseDictGUISOC.check_names)\n\n\nclass StrQuotaGUISOC(BaseDictGUISOC):\n type_name = \"quota\"\n attributes = {\n \"total\": str,\n \"used\": str,\n }\n attributes.update(BaseDictGUISOC.attributes)\n check_names = [\"total\"]\n check_names.extend(BaseDictGUISOC.check_names)\n\n\n\nclass QuotasGUISOC(BaseDictGUISOC):\n pass\n\n\nclass VMQuotasGUISOC(QuotasGUISOC):\n attributes = {\n \"cores\": IntQuotaGUISOC,\n \"ram\": FloatQuotaGUISOC,\n \"disk\": FloatQuotaGUISOC,\n }\n check_names = attributes.keys()\n\n\nclass BaseMongoElementGUISOC(BaseDictGUISOC):\n type_name = \"mongo_element\"\n attributes = {\n \"name\": str\n }\n attributes.update(BaseDictGUISOC.attributes)\n\n\nclass BaseNodeGUISOC(BaseMongoElementGUISOC):\n type_name = \"node\"\n attributes = {\n \"netinterfaces\": NetworkInterfaceFactoryGUISOC\n }\n attributes.update(BaseMongoElementGUISOC.attributes)\n\n\nclass VirtNodeGUISOC(BaseNodeGUISOC):\n type_name = \"virt_node\"\n attributes = {\n \"assigned_to\": str,\n \"uuid\": str,\n # \"ip_addr\": str,\n \"data_center\": str\n }\n attributes.update(BaseNodeGUISOC.attributes)\n\nclass PhysNodeGUISOC(BaseNodeGUISOC):\n type_name = \"phys_node\"\n\n\nclass VMGUISOC(VirtNodeGUISOC):\n type_name = \"vm\"\n attributes = {\n \"service\": bool,\n \"image_uuid\": str,\n \"floating_ip\": str,\n \"quotas\": VMQuotasGUISOC,\n \"need_floating_ip\": bool\n }\n attributes.update(VirtNodeGUISOC.attributes)\n # check_names = [ ]\n\n\nclass VNFGUISOC(VirtNodeGUISOC):\n type_name = \"vnf\"\n attributes = {\n \"netinterfaces\": NetworkInterfaceFactoryGUISOC,\n \"quotas\": VMQuotasGUISOC\n }\n attributes.update(VirtNodeGUISOC.attributes)\n\n\nclass StorageQuotasGUISOC(QuotasGUISOC):\n attributes = {\n \"capacity\": FloatQuotaGUISOC\n }\n\n\nclass StorageGUISOC(BaseDictGUISOC):\n type_name = \"storage\"\n attributes = {\n \"quotas\": StorageQuotasGUISOC,\n }\n attributes.update(BaseDictGUISOC.attributes)\n\n\nclass VirtStorageGUISOC(VirtNodeGUISOC, StorageGUISOC):\n type_name = \"virt_storage\"\n attributes = {\n\n }\n attributes.update(VirtNodeGUISOC.attributes)\n attributes.update(StorageGUISOC.attributes)\n\n\nclass NetworkNodeGUISOC(BaseNodeGUISOC):\n type_name = \"netelement\"\n attributes = {\n # \"netinterfaces\": NetworkInterfaceFactoryGUISOC,\n \"subnet\": str,\n \"element_type\": str,\n \"gateway_ip\": str,\n \"shared_id\": str,\n \"shared\": bool,\n }\n attributes.update(BaseNodeGUISOC.attributes)\n\n\nclass VirtNetworkElementGUISOC(NetworkNodeGUISOC, VirtNodeGUISOC):\n type_name = \"virt_netelement\"\n attributes = {}\n attributes.update(NetworkNodeGUISOC.attributes)\n attributes.update(VirtNodeGUISOC.attributes)\n\n\nclass VirtNodeFactoryGUISOC(BaseListGUISOC):\n classes = {\n \"virt_storage\": VirtStorageGUISOC,\n \"virt_netelement\": VirtNetworkElementGUISOC,\n \"vm\": VMGUISOC,\n \"vnf\": VNFGUISOC\n }\n\n\nclass ServerGUISOC(PhysNodeGUISOC):\n type_name = \"server\"\n attributes = {\n \"quotas\": VMQuotasGUISOC\n }\n attributes.update(PhysNodeGUISOC.attributes)\n\n\nclass PhysNetworkElementGUISOC(PhysNodeGUISOC, NetworkNodeGUISOC):\n type_name = \"phys_netelement\"\n attributes = {}\n attributes.update(PhysNodeGUISOC.attributes)\n attributes.update(NetworkNodeGUISOC.attributes)\n check_names = []\n check_names.extend(PhysNodeGUISOC.check_names)\n check_names.extend(NetworkNodeGUISOC.check_names)\n\n\nclass PhysStorageGUISOC(StorageGUISOC, PhysNodeGUISOC):\n type_name = \"phys_storage\"\n attributes = {}\n attributes.update(PhysNodeGUISOC.attributes)\n attributes.update(StorageGUISOC.attributes)\n check_names = []\n check_names.extend(PhysNodeGUISOC.check_names)\n check_names.extend(StorageGUISOC.check_names)\n\n\nclass PhysNodeFactoryGUISOC(BaseListGUISOC):\n classes = {\n \"server\": ServerGUISOC,\n \"phys_netelement\": PhysNetworkElementGUISOC,\n \"phys_storage\": PhysStorageGUISOC\n }\n\n\nclass LinkGUISOC(BaseMongoElementGUISOC):\n type_name = \"link\"\n attributes = {\n \"node1\": str,\n \"node2\": str,\n \"port1\": str,\n \"port2\": str,\n \"bandwidth\": str\n }\n attributes.update(BaseMongoElementGUISOC.attributes)\n\n\nclass PhysLinkGUISOC(LinkGUISOC):\n type_name = \"phys_link\"\n\n\nclass VirtLinkGUISOC(LinkGUISOC):\n type_name = \"virt_link\"\n\n\nclass PhysLinkFactoryGUISOC(BaseListGUISOC):\n classes = {\n \"phys_link\": PhysLinkGUISOC\n }\n\n\nclass VirtLinkFactoryGUISOC(BaseListGUISOC):\n classes = {\n \"virt_link\": VirtLinkGUISOC\n }\n\n\nclass TenantMetadataGUISOC(BaseMongoElementGUISOC):\n type_name = \"metatenant\"\n attributes = {\n \"deployed\": bool,\n \"expiration_time\": str,\n \"tenant_type\": str,\n \"data_center\": str\n }\n attributes.update(BaseMongoElementGUISOC.attributes)\n\n\nclass TenantMetadataFactoryGUISOC(BaseListGUISOC):\n classes = {\n \"tenant\": TenantMetadataGUISOC\n }\n\n\nclass TenantStatusGUISOC(BaseDictGUISOC):\n type_name = \"tenant_status\"\n TENANTS_STATUS = ['deployed', 'deleted', 'deploying', 'deleting', 'to_deploy', 'to_delete']\n attributes = {\n \"name\": str,\n \"status\": str\n }\n attributes.update(BaseDictGUISOC.attributes)\n\n\nclass TenantStatusFactoryGUISOC(BaseListGUISOC):\n classes = {\n \"tenant_status\": TenantStatusGUISOC\n }\n\n\nclass VNFTenantGUISOC(BaseDictGUISOC):\n type_name = \"vnf_tenant\"\n attributes = {\n \"tenant\": str,\n \"role\": str,\n }\n attributes.update(BaseDictGUISOC.attributes)\n check_names = [\"tenant\"]\n check_names.extend(BaseDictGUISOC.check_names)\n\n\nclass VNFTenantFactoryGUISOC(BaseListGUISOC):\n classes = {\n \"vnf_tenant\": VNFTenantGUISOC\n }\n\n\nclass TenantGUISOC(TenantMetadataGUISOC):\n type_name = \"tenant\"\n attributes = {\n \"nodes\": VirtNodeFactoryGUISOC,\n \"links\": VirtLinkFactoryGUISOC\n }\n attributes.update(TenantMetadataGUISOC.attributes)\n\n\nclass TenantFactoryGUISOC(BaseListGUISOC):\n classes = {\n \"tenant\": TenantGUISOC\n }\n\n\nclass RequirementsGUISOC(QuotasGUISOC):\n type_name = \"requirements\"\n\n\nclass ImageRequirementsGUISOC(RequirementsGUISOC):\n type_name = \"image_requirements\"\n attributes = {\n \"ram\": int,\n \"disk\": int\n }\n attributes.update(RequirementsGUISOC.attributes)\n\n\nclass ImageGUISOC(BaseDictGUISOC):\n type_name = \"image\"\n IMAGE_TYPES = [\"vm\", \"vnf\", \"murano\", \"docker\"]\n attributes = {\n \"name\": str,\n \"uuid\": str,\n \"type\": str,\n \"requirements\": ImageRequirementsGUISOC\n }\n\n\nclass ImageFactoryGUISOC(BaseListGUISOC):\n classes = {\n \"image\": ImageGUISOC\n }\n\n\nclass FilterTenantsRequestGUISOC(BaseDictGUISOC):\n FILTER_TYPES = [\"all\", \"tenants\"]\n attributes = {\n \"filter\": str,\n \"tenants\": StrFactoryGUISOC\n }\n attributes.update(BaseDictGUISOC.attributes)\n\n\nclass FilterResourcesRequestGUISOC(BaseDictGUISOC):\n FILTER_TYPES = [\"all\", \"nodes\"]\n attributes = {\n \"filter\": str,\n \"nodes\": StrFactoryGUISOC\n }\n attributes.update(BaseDictGUISOC.attributes)\n\n\nclass FilterTenantsResponseGUISOC(BaseDictGUISOC):\n FILTER_TYPES = [\"all\", \"tenants\"]\n attributes = {\n \"filter\": str,\n \"tenants\": TenantFactoryGUISOC\n }\n attributes.update(BaseDictGUISOC.attributes)\n\n\nclass FilterResourcesResponseGUISOC(BaseDictGUISOC):\n FILTER_TYPES = [\"all\", \"nodes\"]\n attributes = {\n \"filter\": str,\n \"nodes\": PhysNodeFactoryGUISOC,\n \"links\": PhysLinkFactoryGUISOC\n }\n attributes.update(BaseDictGUISOC.attributes)\n","sub_path":"libsocapi/elements_guisoc.py","file_name":"elements_guisoc.py","file_ext":"py","file_size_in_byte":9598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"287195191","text":"import numpy as np\n\ndef noisy_signal(func, mean, std):\n '''\n Inputs random normal noise to a chosen signal.\n \n input:\n func: array - vector containing the original data signal.\n mean: float - float designating the mean value for the noise.\n std: float - float designating the standard deviation value for the noise. \n \n output:\n noisy_data: array - vector containing the noisy data signal.\n '''\n noise = np.random.normal(mean,std,len(func))\n noisy_data = func + ruido\n return noisy_data\n \ndef moving_average(func, window):\n '''\n Smoothes a noisy signal according to the averages of the values of points embodied\n by a window.\n \n input:\n func: array - vector containing the noisy data signal.\n window: int - integer designating the window size.\n \n output:\n filt: array - vector containing the smoothed data signal.\n '''\n position = window//2 # position where the average will be allocated\n Nwin = len(func)-(window-position) # Number of windows fitted on the data size\n filt = np.zeros(Nwin) # Initiating filtered data variable having the size of the\n \t\t\t\t\t\t# number of windows\n \n for i in range(Nwin):\n filt[i] = np.sum(func[i:i+window])/window\n# print i, func[i], filt[i]\n return filt","sub_path":"Aula 1/signal_maneuvers.py","file_name":"signal_maneuvers.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"131016497","text":"from cv2 import cv2\nimport win32api, win32con\nimport ctypes\nimport socketio\nimport numpy as np\nimport time\n\ncam = cv2.VideoCapture(0)\nsio = socketio.Client()\n\n@sio.event\ndef buttonPressed(data):\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n\n@sio.event\ndef notPressed(data):\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n\n@sio.event\ndef placePressed(data):\n win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN, 0, 0)\n print('placed')\n\n@sio.event\ndef notPlace(data):\n win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP, 0, 0)\n print('not placed')\n\nsio.connect('http://ipadress:3000')\nprint('connected')\n\nwhile 1:\n try:\n ret, frame = cam.read()\n if not ret:\n print('failed to grab frame')\n break\n\n frame = cv2.flip(frame, 1)\n \n screenWidth = frame.shape[1]\n screenHeight = frame.shape[0]\n x_medium = int(screenHeight / 2)\n y_medium = int(screenWidth / 2)\n center = int(screenHeight / 2)\n\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n\n lower = np.array([0, 0, 250])\n higher = np.array([0, 0, 255])\n mask = cv2.inRange(hsv, lower, higher)\n res = cv2.bitwise_and(frame, frame, mask=mask)\n\n contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contours = sorted(contours, key=lambda x:cv2.contourArea(x), reverse=True)\n\n for cnt in contours:\n (x, y, w, h) = cv2.boundingRect(cnt)\n\n x_medium = int((x + x + w) / 2)\n y_medium = int((y + y + h) / 2)\n break\n \n cv2.line(frame, (x_medium, 0), (x_medium, 480), (0, 255, 0), 2)\n cv2.line(frame, (0, y_medium), (645, y_medium), (0, 0, 255), 2)\n\n coords = (x_medium, y_medium)\n\n ctypes.windll.user32.SetCursorPos(x_medium * 2, y_medium * 2)\n\n cv2.imshow('frame', frame)\n\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n except Exception as e:\n print(f'oopsie woopsie i did a doodie. UwU {e}')\n break\n\ncam.release()\ncv2.destroyAllWindows()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"477696235","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport http.cookiejar\nimport json\nimport os\nimport re\nimport sys\nimport urllib.request, urllib.parse, urllib.error\nimport urllib.request, urllib.error, urllib.parse\n\nfrom bs4 import BeautifulSoup\n\n\nEDX_HOMEPAGE = 'https://www.edx.org'\nLOGIN_API = 'https://www.edx.org/login'\nDASHBOARD = 'https://www.edx.org/dashboard'\nYOUTUBE_VIDEO_ID_LENGTH = 11\n\n\ndef get_initial_token():\n \"\"\"\n Create initial connection to get authentication token for future requests.\n\n Returns a string to be used in subsequent connections with the\n X-CSRFToken header or the empty string if we didn't find any token in\n the cookies.\n \"\"\"\n cj = http.cookiejar.CookieJar()\n opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))\n urllib.request.install_opener(opener)\n opener.open(EDX_HOMEPAGE)\n\n for cookie in cj:\n if cookie.name == 'csrftoken':\n return cookie.value\n\n return ''\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 3:\n sys.exit(1)\n\n user_email = sys.argv[1]\n user_pswd = sys.argv[2]\n\n # Prepare Headers\n headers = {\n 'User-Agent': 'edX-downloader/0.01',\n 'Accept': 'application/json, text/javascript, */*; q=0.01',\n 'Content-Type': 'application/x-www-form-urlencoded;charset=utf-8',\n 'Referer': EDX_HOMEPAGE,\n 'X-Requested-With': 'XMLHttpRequest',\n 'X-CSRFToken': get_initial_token(),\n }\n\n # Login\n post_data = urllib.parse.urlencode({'email': user_email,\n 'password': user_pswd,\n 'remember': False}).encode('utf-8')\n request = urllib.request.Request(LOGIN_API, post_data, headers)\n response = urllib.request.urlopen(request)\n resp = json.loads(response.read().decode('utf-8'))\n if not resp.get('success', False):\n print('Wrong Email or Password.')\n exit(2)\n\n\n # Get user info/courses\n req = urllib.request.Request(DASHBOARD, None, headers)\n resp = urllib.request.urlopen(req)\n dash = resp.read()\n soup = BeautifulSoup(dash)\n data = soup.find_all('ul')[1]\n USERNAME = data.find_all('span')[1].string\n USEREMAIL = data.find_all('span')[3].string\n COURSES = soup.find_all('article', 'my-course')\n courses = []\n for COURSE in COURSES:\n c_name = COURSE.h3.string\n c_link = 'https://www.edx.org' + COURSE.a['href']\n if c_link.endswith('info') or c_link.endswith('info/'):\n state = 'Started'\n else:\n state = 'Not yet'\n courses.append((c_name, c_link, state))\n numOfCourses = len(courses)\n\n # Welcome and Choose Course\n\n print('Welcome ', USERNAME)\n print('You can access ', numOfCourses, ' Courses on edX')\n\n c = 0\n for course in courses:\n c += 1\n print(c, '-', course[0], ' -> ', course[2])\n\n c_number = int(input('Enter Course Number: '))\n while c_number > numOfCourses or courses[c_number - 1][2] != 'Started':\n print('Enter a valid Number for a Started Course ! between 1 and ', \\\n numOfCourses)\n c_number = int(input('Enter Course Number: '))\n selected_course = courses[c_number - 1]\n COURSEWARE = selected_course[1].replace('info', 'courseware')\n\n\n ## Getting Available Weeks\n req = urllib.request.Request(COURSEWARE, None, headers)\n resp = urllib.request.urlopen(req)\n courseware = resp.read()\n soup = BeautifulSoup(courseware)\n data = soup.section.section.div.div.nav\n WEEKS = data.find_all('div')\n weeks = [(w.h3.a.string, ['https://www.edx.org' + a['href'] for a in\n w.ul.find_all('a')]) for w in WEEKS]\n numOfWeeks = len(weeks)\n\n\n # Choose Week or choose all\n print(selected_course[0], ' has ', numOfWeeks, ' Weeks so far')\n w = 0\n for week in weeks:\n w += 1\n print(w, '- Download ', week[0], ' videos')\n print(numOfWeeks + 1, '- Download them all')\n\n w_number = int(input('Enter Your Choice: '))\n while w_number > numOfWeeks + 1:\n print('Enter a valid Number between 1 and ', numOfWeeks + 1)\n w_number = int(input('Enter Your Choice: '))\n\n if w_number == numOfWeeks + 1:\n links = [link for week in weeks for link in week[1]]\n else:\n links = weeks[w_number - 1][1]\n\n\n video_id = []\n for link in links:\n print('Processing \\'%s\\'...' % link)\n req = urllib.request.Request(link, None, headers)\n resp = urllib.request.urlopen(req)\n page = resp.read()\n splitter = re.compile('data-streams=(?:"|\").*:')\n id_container = splitter.split(page)[1:]\n video_id += [link[:YOUTUBE_VIDEO_ID_LENGTH] for link in\n id_container]\n\n video_link = ['http://youtube.com/watch?v=' + v_id for v_id in video_id]\n\n\n # Get Available Video_Fmts\n os.system('youtube-dl -F ' + video_link[-1])\n video_fmt = int(input('Choose Format code: '))\n\n # Get subtitles\n subtitles = input('Download subtitles (y/n)? ') == 'y'\n \n # Download Videos\n c = 0\n for v in video_link:\n c += 1\n cmd = 'youtube-dl -o \"Downloaded/' + selected_course[0] + '/' + str(c).zfill(2) + '-%(title)s.%(ext)s\" -f ' + str(video_fmt)\n if(subtitles):\n cmd += ' --write-srt'\n cmd += ' ' + v\n os.system(cmd)\n\n # Say Good Bye :)\n print('Videos have been downloaded, thanks for using our tool, Good Bye :)')\n","sub_path":"edx-downloader.py","file_name":"edx-downloader.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"419400493","text":"#!/usr/local/Python-2.4.1/bin/python\n\nimport string, sets\n\n# filter those highly oscillating short prefixes (mostly /30-/28) from AS15290\n# According to their policy, we will filter out any prefix that is shorter than /24\n\n# For announcement, filter any prefix that is shorter than /24 and originated from AS15290\n# We use the \"block list\" trick to filter out withdrawal\n\nclass PrefixFilter:\n \n def __init__ (self):\n self.prefix_list = sets.Set() # a block list for short prefixes\n\n\n # the caller has checked that this update is from AS15290\n def checkFilter(self, update):\n if update.type == 'A':\n if len(update.as_path) == 1 and update.as_path[0] == '15290':\n temp_list = []\n for pref in update.prefix:\n fields = string.split(pref, '/')\n if int(fields[1]) <= 24: # not short prefix, keep it\n temp_list.append(pref)\n else: # short prefix originated from AS15290\n # add to the block list which will be used when checking withdrawal \n # because for withdrawal, we have no AS-PATH information\n # This is a trick to filter out those prefixes\n self.prefix_list.add(pref) \n update.prefix = temp_list\n\n else: # 'W'\n temp_list = []\n for pref in update.prefix:\n if pref not in self.prefix_list: # not in the block list\n temp_list.append(pref) # keep it\n update.prefix = temp_list\n\n def filterStats(self):\n ret_str = '%d prefixes in block list\\n' % len(self.prefix_list)\n return ret_str\n","sub_path":"lib/PrefixFilter.py","file_name":"PrefixFilter.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"423267482","text":"#!/usr/bin/env python3\nfrom __future__ import annotations\n\nfrom typing import Optional\n\nfrom lxml import etree\n\nfrom capybara_tw.model.capy_body import CapyBody\nfrom capybara_tw.util import xml_util\nfrom capybara_tw.util.xliff_util import Xliff12Tag\n\n\nclass CapyFile(object):\n body: Optional[CapyBody]\n original: Optional[str]\n source_language: Optional[str]\n target_language: Optional[str]\n datatype: Optional[str]\n\n def __init__(self):\n self.body = None\n self.original = None\n self.source_language = None\n self.target_language = None\n self.datatype = None\n\n @classmethod\n def from_element(cls, elem) -> CapyFile:\n obj = cls()\n obj.original = elem.get('original')\n obj.source_language = elem.get('source-language')\n obj.target_language = elem.get('target-language')\n obj.datatype = elem.get('datatype')\n obj.body = CapyBody.from_element(xml_util.first(elem, Xliff12Tag.body))\n return obj\n\n def to_element(self):\n root = etree.Element(Xliff12Tag.file)\n root.set('original', self.original)\n root.set('source-language', self.source_language)\n root.set('target-language', self.target_language)\n root.set('datatype', self.datatype)\n root.append(self.body.to_element())\n return root\n","sub_path":"capybara_tw/model/capy_file.py","file_name":"capy_file.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"445849459","text":"#!/bin/python\n\nimport sys\nimport io\nfrom enum import Enum\n\n\n\nclass ParamMode(Enum):\n indirect = 0\n direct = 1\n relative = 2\n\n @staticmethod\n def from_int(x):\n if x == 0:\n return ParamMode.indirect\n elif x == 1:\n return ParamMode.direct\n elif x == 2:\n return ParamMode.relative\n else:\n raise Exception(\"Bad param mode {}\".format(x))\n\n\nclass ComputerState(Enum):\n ready = 0\n waiting_for_input = 1\n halted = 99\n\n\nclass IntCodeComputer:\n def __init__(self, init_mem, inputs):\n # copy memory using slice\n self.mem = init_mem[:]\n self.ip = 0\n self.relative_base = 0\n self.inputs = inputs\n self.outputs = []\n self.state = ComputerState.ready\n\n def peek(self, address):\n if address < len(self.mem):\n return self.mem[address]\n else:\n return 0\n\n def read(self, param_address, mode):\n if mode == ParamMode.indirect:\n return self.peek(self.peek(param_address))\n elif mode == ParamMode.direct:\n return self.peek(param_address)\n elif mode == ParamMode.relative:\n return self.peek(self.relative_base + self.peek(param_address))\n else:\n raise Exception(\"Bad read mode {}\".format(mode))\n\n def read_param_1(self):\n return self.read(self.ip + 1, ParamMode.from_int((self.peek(self.ip) // 100) % 10))\n\n def read_param_2(self):\n return self.read(self.ip + 2, ParamMode.from_int((self.peek(self.ip) // 1000) % 10))\n\n def address_from_param(self, param_address, mode):\n if mode == ParamMode.indirect:\n return self.peek(param_address)\n elif mode == ParamMode.direct:\n raise Exception(\"Tried to take address of direct parameter\")\n elif mode == ParamMode.relative:\n return self.relative_base + self.peek(param_address)\n else:\n raise Exception(\"Bad param mode {}\".format(mode))\n\n def address_from_param_1(self):\n return self.address_from_param(self.ip + 1, ParamMode.from_int((self.peek(self.ip) // 100) % 10))\n\n def address_from_param_2(self):\n return self.address_from_param(self.ip + 2, ParamMode.from_int((self.peek(self.ip) // 1000) % 10))\n\n def address_from_param_3(self):\n return self.address_from_param(self.ip + 3, ParamMode.from_int((self.peek(self.ip) // 10000) % 10))\n\n def poke(self, address, value):\n if len(self.mem) <= address:\n while len(self.mem) < address:\n self.mem.append(0)\n self.mem.append(value)\n else:\n self.mem[address] = value\n\n def run(self):\n self.state = ComputerState.ready\n self.outputs = []\n while True:\n opcode = self.peek(self.ip) % 100\n if opcode == 1: # add\n a = self.read_param_1()\n b = self.read_param_2()\n self.poke(self.address_from_param_3(), a + b)\n self.ip += 4\n elif opcode == 2: # mul\n a = self.read_param_1()\n b = self.read_param_2()\n self.poke(self.address_from_param_3(), a * b)\n self.ip += 4\n elif opcode == 3: # input\n if self.inputs:\n self.poke(self.address_from_param_1(), self.inputs.pop(0))\n self.ip += 2\n else:\n self.state = ComputerState.waiting_for_input\n break\n elif opcode == 4: # output\n a = self.read_param_1()\n self.outputs.append(a)\n self.ip += 2\n elif opcode == 5: # jump if true\n a = self.read_param_1()\n b = self.read_param_2()\n if a == 0:\n self.ip += 3\n else:\n self.ip = b\n elif opcode == 6: # jump if false\n a = self.read_param_1()\n b = self.read_param_2()\n if a == 0:\n self.ip = b\n else:\n self.ip += 3\n elif opcode == 7: # less than\n a = self.read_param_1()\n b = self.read_param_2()\n self.poke(self.address_from_param_3(), 1 if a < b else 0)\n self.ip += 4\n elif opcode == 8: # equals\n a = self.read_param_1()\n b = self.read_param_2()\n self.poke(self.address_from_param_3(), 1 if a == b else 0)\n self.ip += 4\n elif opcode == 9: # add to relative base\n a = self.read_param_1()\n self.relative_base += a\n self.ip += 2\n elif opcode == 99: # halt\n self.state = ComputerState.halted\n break\n else:\n raise Exception(\"Bad opcode {}\".format(opcode))\n\n\nfile_path = sys.argv[1]\ninit_mem = []\nwith io.open(file_path, \"r\") as f:\n line = f.readline()\n if line:\n init_mem = [int(x.strip()) for x in line.split(\",\")]\n else:\n raise Exception(\"No program in file!\")\n\ncomputer = IntCodeComputer(init_mem, [1])\ncomputer.run()\nprint(\"Output: {}\\n\".format(computer.outputs))\n","sub_path":"day9_boost/python/day9_boost_part_1.py","file_name":"day9_boost_part_1.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"234780856","text":"import importlib\n\n\nclass Warrior:\n def __init__(self):\n self.health = 50\n self.defense = 0\n self.attack = 5\n self.vampirism = 0\n\n @property\n def is_alive(self):\n return self.health > 0\n\n\nclass Knight(Warrior):\n def __init__(self):\n self.attack = 7\n self.defense = 0\n self.health = 50\n self.vampirism = 0\n\n\nclass Defender(Warrior):\n def __init__(self):\n self.attack = 3\n self.health = 60\n self.defense = 2\n self.vampirism = 0\n\n\nclass Vampire(Warrior):\n def __init__(self):\n self.attack = 4\n self.health = 40\n self.defense = 0\n self.vampirism = 0.5\n\n\ndef fight(unit_1, unit_2):\n while 1:\n if unit_1.attack > unit_2.defense: # 判断 防御值 ----\n unit_2.health = unit_2.health - unit_1.attack + unit_2.defense # 造成伤害\n unit_1.health = unit_1.health + (unit_1.attack - unit_2.defense) * unit_1.vampirism # 吸血\n if unit_2.health <= 0:\n return True\n if unit_2.attack > unit_1.defense: # 判断 防御值 ----\n unit_1.health = unit_1.health - unit_2.attack + unit_1.defense # 造成伤害\n unit_2.health = unit_2.health + (unit_2.attack - unit_1.defense) * unit_2.vampirism # 吸血\n if unit_1.health <= 0:\n return False\n\n\nclass Army:\n def __init__(self):\n self.ar = []\n\n def add_units(self, type, num):\n for i in range(0, num):\n obj = type()\n self.ar.append(obj)\n\n\nclass Battle:\n def fight(self, unit_1, unit_2):\n # assert isinstance(unit_1, Army)\n while 1:\n if fight(unit_1.ar[0], unit_2.ar[0]):\n del (unit_2.ar[0])\n if len(unit_2.ar) == 0:\n return True\n else:\n del (unit_1.ar[0])\n if len(unit_1.ar) == 0:\n return False\n\n\n# 以下代码将核查Python代码的正确性\nchuck = Warrior()\nbruce = Warrior()\ncarl = Knight()\ndave = Warrior()\nmark = Warrior()\nbob = Defender()\nmike = Knight()\nrog = Warrior()\nlancelot = Defender()\neric = Vampire()\nadam = Vampire()\nrichard = Defender()\nogre = Warrior()\n\nassert fight(chuck, bruce) == True\nassert fight(dave, carl) == False\nassert chuck.is_alive == True\nassert bruce.is_alive == False\nassert carl.is_alive == True\nassert dave.is_alive == False\nassert fight(carl, mark) == False\nassert carl.is_alive == False\nassert fight(bob, mike) == False\nassert fight(lancelot, rog) == True\nassert fight(eric, richard) == False\nassert fight(ogre, adam) == True\n\nmy_army = Army()\nmy_army.add_units(Defender, 2)\nmy_army.add_units(Vampire, 2)\nmy_army.add_units(Warrior, 1)\n\nenemy_army = Army()\nenemy_army.add_units(Warrior, 2)\nenemy_army.add_units(Defender, 2)\nenemy_army.add_units(Vampire, 3)\n\narmy_3 = Army()\narmy_3.add_units(Warrior, 1)\narmy_3.add_units(Defender, 4)\n\narmy_4 = Army()\narmy_4.add_units(Vampire, 3)\narmy_4.add_units(Warrior, 2)\n\nbattle = Battle()\n\nassert battle.fight(my_army, enemy_army) == False\nassert battle.fight(army_3, army_4) == True\n","sub_path":"test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":3114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"453564154","text":"import pytest\nfrom dotenv import load_dotenv\n\nfrom geoeditor.app import create_app\nfrom geoeditor.extensions import db as _db\n\n\n@pytest.fixture(scope=\"session\")\ndef app():\n load_dotenv(\".testenv\")\n app = create_app(testing=True)\n return app\n\n\n@pytest.fixture\ndef db(app):\n _db.app = app\n\n with app.app_context():\n _db.create_all()\n\n yield _db\n\n _db.session.close()\n _db.drop_all()\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"498272397","text":"import collections\n\nfrom elasticsearch import ElasticsearchException\n\nfrom .compat import string_types\nfrom .document import DynamicDocument\n\n\nclass DelayedElasticsearchException(ElasticsearchException):\n pass\n\n\nclass Result(object):\n def __init__(self, raw_result):\n self.raw = raw_result\n\n\nclass SearchResult(Result):\n def __init__(self, raw_result, aggregations=None,\n doc_cls=None, instance_mapper=None):\n super(SearchResult, self).__init__(raw_result)\n\n self._query_aggs = aggregations or {}\n\n if doc_cls is None:\n doc_classes = ()\n elif not isinstance(doc_cls, collections.Iterable):\n doc_classes = (doc_cls,)\n else:\n doc_classes = doc_cls\n self._doc_cls_map = {doc_cls.__doc_type__: doc_cls for doc_cls in doc_classes}\n\n self._mapper_registry = {}\n if isinstance(instance_mapper, dict):\n self._instance_mappers = instance_mapper\n else:\n self._instance_mappers = {doc_cls: instance_mapper for doc_cls in doc_classes}\n\n self.error = raw_result.get('error')\n\n if not self.error or 'took' in raw_result:\n self.took = raw_result.get('took')\n \n if not self.error or 'timed_out' in raw_result:\n self.timed_out = raw_result.get('timed_out')\n \n if not self.error or 'hits' in raw_result:\n self.total = raw_result['hits']['total']\n self.max_score = raw_result['hits']['max_score']\n self.hits = []\n for hit in raw_result['hits']['hits']:\n doc_cls = self._doc_cls_map.get(hit['_type'], DynamicDocument)\n self.hits.append(doc_cls(_hit=hit, _result=self))\n\n if not self.error or 'aggregations' in raw_result:\n self.aggregations = {}\n for agg_name, agg_expr in self._query_aggs.items():\n raw_agg_data = raw_result['aggregations'][agg_name]\n agg_result = agg_expr.build_agg_result(raw_agg_data, self._doc_cls_map, mapper_registry=self._mapper_registry)\n self.aggregations[agg_name] = agg_result\n\n if not self.error or '_scroll_id' in raw_result:\n self.scroll_id = raw_result.get('_scroll_id')\n \n def __iter__(self):\n return iter(self.hits)\n\n def __len__(self):\n return len(self.hits)\n\n def __getattr__(self, name):\n if self.error and name in ('took', 'timed_out', 'total', 'hits', 'max_score', 'aggregations', 'scroll_id'):\n raise DelayedElasticsearchException(self.error)\n return super(SearchResult, self).__getattr__(name)\n\n def get_aggregation(self, name):\n return self.aggregations.get(name)\n\n def _populate_instances(self, doc_cls):\n docs = [doc for doc in self.hits if isinstance(doc, doc_cls)]\n instances = self._instance_mappers.get(doc_cls)([doc._id for doc in docs])\n for doc in docs:\n doc.__dict__['instance'] = instances.get(doc._id)\n\n\nclass CountResult(Result):\n def __init__(self, raw_result):\n super(CountResult, self).__init__(raw_result)\n self.count = raw_result['count']\n\n\nclass ExistsResult(Result):\n def __init__(self, raw_result):\n super(ExistsResult, self).__init__(raw_result)\n self.exists = raw_result['exists']\n\n\nclass ActionResult(Result):\n def __init__(self, raw_result):\n super(ActionResult, self).__init__(raw_result)\n self.name = next(iter(raw_result.keys()))\n data = next(iter(raw_result.values()))\n self.status = data['status']\n self.found = data.get('found')\n raw_error = data.get('error')\n if raw_error:\n if isinstance(raw_error, string_types):\n self.error = raw_error\n else:\n self.error = ErrorReason(raw_error)\n else:\n self.error = None\n self._index = data['_index']\n self._type = data['_type']\n self._id = data['_id']\n self._version = data.get('_version')\n\n\nclass ErrorReason(object):\n def __init__(self, raw_error):\n self.type = raw_error['type']\n self.reason = raw_error['reason']\n\n\nclass BulkResult(Result):\n def __init__(self, raw_result):\n super(BulkResult, self).__init__(raw_result)\n self.took = raw_result['took']\n self.errors = raw_result['errors']\n self.items = list(map(ActionResult, raw_result['items']))\n\n def __iter__(self):\n return iter(self.items)\n\n\nclass DeleteResult(Result):\n def __init__(self, raw_result):\n super(DeleteResult, self).__init__(raw_result)\n self.found = raw_result['found']\n self._index = raw_result['_index']\n self._type = raw_result['_type']\n self._id = raw_result['_id']\n self._version = raw_result['_version']\n\n\nclass DeleteByQueryResult(Result):\n pass\n\n\nclass RefreshResult(Result):\n pass\n\n\nclass FlushResult(Result):\n pass\n","sub_path":"elasticmagic/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":4980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"130213270","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 2 17:40:16 2019\n\n@author: weetee\n\"\"\"\nfrom src.tasks.trainer import train_and_fit\nfrom src.tasks.infer import infer_from_trained, FewRel\nimport logging\n# from absl import logging\n\nfrom argparse import ArgumentParser\nimport pdb\nimport sh\n\n\n'''\nThis fine-tunes the BERT model on SemEval task \n'''\n# logger = logging.getLogger('simple_example')\n# logger.setLevel(logging.INFO)\n# # create file handler which logs even debug messages\n# fh = logging.FileHandler('spam.log')\n# fh.setLevel(logging.INFO)\n# # create console handler with a higher log level\n# ch = logging.StreamHandler()\n# ch.setLevel(logging.INFO)\n# # create formatter and add it to the handlers\n# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n# ch.setFormatter(formatter)\n# fh.setFormatter(formatter)\n# # add the handlers to logger\n# logger.addHandler(ch)\n# logger.addHandler(fh)\n\nlogging.basicConfig(format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',\n level=logging.INFO,\n handlers=[logging.FileHandler(\"{0}\".format('mylogfile'), mode='a'),logging.StreamHandler()])\n\nlogger = logging.getLogger('__name__')\n\n\n# logger.info('hello') \n# exit(1)\n# import IPython ; IPython.embed() ; exit(1)\n\n# logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \\\n # datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)\n# logger = logging.getLogger('__file__')\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n # parser.add_argument(\"--task\", type=str, default='semeval', help='semeval, fewrel')\n parser.add_argument(\"--task\", type=str, default='semeval_debug', help='semeval, fewrel, semeval_debug')\n parser.add_argument(\"--train_data\", type=str, default='./data/SemEval2010_task8_all_data_debug/SemEval2010_task8_training/TRAIN_FILE.TXT', \\\n help=\"training data .txt file path, \\\n ./data/SemEval2010_task8_all_data/SemEval2010_task8_training/TRAIN_FILE.TXT,\\\n ./data/SemEval2010_task8_all_data_debug/SemEval2010_task8_training/TRAIN_FILE.TXT\")\n parser.add_argument(\"--test_data\", type=str, default='./data/SemEval2010_task8_all_data/SemEval2010_task8_testing_keys/TEST_FILE_FULL.TXT', \\\n help=\"test data .txt file path\")\n parser.add_argument(\"--use_pretrained_blanks\", type=int, default=0, help=\"0: Don't use pre-trained blanks model, 1: use pre-trained blanks model\")\n parser.add_argument(\"--num_classes\", type=int, default=19, help='number of relation classes')\n parser.add_argument(\"--batch_size\", type=int, default=32, help=\"Training batch size\")\n parser.add_argument(\"--gradient_acc_steps\", type=int, default=1, help=\"No. of steps of gradient accumulation\")\n parser.add_argument(\"--max_norm\", type=float, default=1.0, help=\"Clipped gradient norm\")\n parser.add_argument(\"--fp16\", type=int, default=0, help=\"1: use mixed precision ; 0: use floating point 32\") # mixed precision doesn't seem to train well\n parser.add_argument(\"--num_epochs\", type=int, default=10, help=\"No of epochs\")\n parser.add_argument(\"--lr\", type=float, default=0.00005, help=\"learning rate\")\n # parser.add_argument(\"--model_no\", type=int, default=2, help='''Model ID: 0 - BERT\\n\n # 1 - ALBERT,\\\n # 2 - SCIBERT''')\n parser.add_argument(\"--model_no\", type=int, default=1, help='''Model ID: 0 - BERT\\n\n 1 - ALBERT,\\\n 2 - SCIBERT''')\n # parser.add_argument(\"--model_size\", type=str, default='allenai/scibert_scivocab_uncased', help=\"For BERT: 'bert-base-uncased', \\\n # 'bert-large-uncased',\\\n # For ALBERT: 'albert-base-v2',\\\n # 'albert-large-v2',\\\n # For SCIBERT: 'allenai/scibert_scivocab_uncased'\")\n parser.add_argument(\"--model_size\", type=str, default='albert-base-v2', help=\"For BERT: 'bert-base-uncased', \\\n 'bert-large-uncased',\\\n For ALBERT: 'albert-base-v2',\\\n 'albert-large-v2',\\\n For SCIBERT: 'allenai/scibert_scivocab_uncased'\")\n parser.add_argument(\"--train\", type=int, default=1, help=\"0: Don't train, 1: train\")\n parser.add_argument(\"--infer\", type=int, default=1, help=\"0: Don't infer, 1: Infer\")\n \n args = parser.parse_args()\n \n if (args.train == 1) and (args.task != 'fewrel'):\n net = train_and_fit(args)\n \n if (args.infer == 1) and (args.task != 'fewrel'):\n inferer = infer_from_trained(args, detect_entities=True)\n test = \"The surprise [E1]visit[/E1] caused a [E2]frenzy[/E2] on the already chaotic trading floor.\"\n inferer.infer_sentence(test, detect_entities=False)\n test2 = \"After eating the chicken, he developed a sore throat the next morning.\"\n inferer.infer_sentence(test2, detect_entities=True)\n \n while True:\n sent = input(\"Type input sentence ('quit' or 'exit' to terminate):\\n\")\n if sent.lower() in ['quit', 'exit']:\n break\n inferer.infer_sentence(sent, detect_entities=False)\n \n if args.task == 'fewrel':\n fewrel = FewRel(args)\n meta_input, e1_e2_start, meta_labels, outputs = fewrel.evaluate()\n","sub_path":"main_task.py","file_name":"main_task.py","file_ext":"py","file_size_in_byte":6178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"622122031","text":"import asyncio\nimport json\nfrom collections import namedtuple\nfrom typing import List\n\n\"\"\"\nCallback Listener is a tuple with `event` and `callback` \n\"\"\"\nCallbackListener = namedtuple(\"CallbackListener\", \"event callback\")\n\n\nclass Channel:\n \"\"\"\n `Channel` is an abstraction for a topic listener for an existing socket connection.\n Each Channel has its own topic and a list of event-callbacks that responds to messages.\n Should only be instantiated through `connection.Socket().set_chanel(topic)`\n Topic-Channel has a 1-many relationship.\n \"\"\"\n\n def __init__(self, socket, topic: str, params: dict = {}):\n \"\"\"\n :param socket: Socket object\n :param topic: Topic that it subscribes to on the realtime server\n :param params:\n \"\"\"\n self.socket = socket\n self.topic: str = topic\n self.params: dict = params\n self.listeners: List[CallbackListener] = []\n self.joined: bool = False\n\n def join(self):\n \"\"\"\n Wrapper for async def _join() to expose a non-async interface\n Essentially gets the only event loop and attempt joining a topic\n :return: None\n \"\"\"\n loop = asyncio.get_event_loop()\n loop.run_until_complete(self._join())\n return self\n\n async def _join(self):\n \"\"\"\n Coroutine that attempts to join Phoenix Realtime server via a certain topic\n :return: Channel.channel\n \"\"\"\n join_req = dict(topic=self.topic, event=\"phx_join\", payload={}, ref=None)\n\n try:\n await self.socket.ws_connection.send(json.dumps(join_req))\n\n except Exception as e:\n print(str(e))\n return\n\n def on(self, event: str, callback):\n \"\"\"\n :param event: A specific event will have a specific callback\n :param callback: Callback that takes msg payload as its first argument\n :return: None\n \"\"\"\n\n cl = CallbackListener(event=event, callback=callback)\n self.listeners.append(cl)\n return self\n\n def off(self, event: str):\n \"\"\"\n :param event: Stop responding to a certain event\n :return: None\n \"\"\"\n self.listeners = [callback for callback in self.listeners if callback.event != event]\n","sub_path":"plugin/vendor/realtime_py/channel.py","file_name":"channel.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"229998785","text":"import cv2\nimport os\n\nclass Image2Video(object):\n def __init__(self, img_dir, img_type, vid_name, vid_type):\n self.img_dir = img_dir\n self.img_type = img_type\n self.vid_name = vid_name\n self.vid_type = vid_type\n\n def convert(self):\n images = [img for img in os.listdir(self.img_dir) if img.endswith('.' + self.img_type)]\n os.chdir(self.img_dir)\n print('gathering images...')\n images.sort(key=lambda x: os.path.getmtime(x))\n os.chdir('..')\n\n frame = cv2.imread(os.path.join(self.img_dir, images[0]))\n height, width, layers = frame.shape\n\n video = cv2.VideoWriter(self.vid_name + '.' + self.vid_type, 0, 1, (width,height))\n\n print('writting video...')\n for image in images:\n video.write(cv2.imread(os.path.join(self.img_dir, image)))\n\n cv2.destroyAllWindows()\n video.release()\n\n print('image2video complete')\n","sub_path":"image2video.py","file_name":"image2video.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"487589715","text":"from distutils.core import setup, Extension\n\nstemmermodule = Extension(\"porterstemmer\",\n sources=[\"porterstemmer.c\"])\n\nsetup(name = \"PorterStemmer\",\n version = \"0.5\",\n description = \"Porter Stemmer for Python\",\n author = \"Naoki INADA\",\n author_email = \"inada-n@klab.jp\",\n ext_modules = [stemmermodule],\n url = \"http://bitbucket.org/methane/porterstemmer/\",\n\n long_description = \"\"\"\\\nPython module for Porter Stemmer\n<http://tartarus.org/~martin/PorterStemmer/>\n\nThis is Python wrapped Porter's original C implementation.\"\"\",\n\n classifiers = [\n 'Development Status :: 4 - Beta',\n 'License :: Public Domain',\n 'Topic :: Text Processing :: General',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 2',\n ]\n )\n","sub_path":"pypi_install_script/PorterStemmer-0.5.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"625095700","text":"# Copyright (C) 2018 Greenweaves Software Limited\n\n# This is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This software is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with GNU Emacs. If not, see <http://www.gnu.org/licenses/>\n\nimport math\n\n# Nbr\n#\n# Find neighbours in rectangular array indexed: 0, 1, ... m*n-1\n#\ndef Nbr(k,m,n,periodic=False):\n neighbours = []\n def add_periodic(k,incr):\n candidate = k + incr\n if abs(incr)==1:\n while candidate//n < k//n:\n candidate += n\n while candidate//n > k//n:\n candidate -= n \n else:\n while candidate<0:\n candidate += m*n\n while candidate>=m*n:\n candidate -= m*n\n if candidate != k and not candidate in neighbours:\n neighbours.append(candidate)\n \n def add_if_possible(k,incr):\n candidate = k + incr\n if -1 < candidate and candidate < m*n:\n if abs(incr)==1:\n if candidate//n == k//n: # Same row?\n neighbours.append(candidate) \n else:\n if abs(incr)==n: # Same column?\n neighbours.append(candidate) \n \n def add(k,incr):\n if periodic:\n add_periodic(k,incr)\n else:\n add_if_possible(k,incr)\n \n if k<m*n and k>-1:\n add(k,-n)\n add(k,+n)\n add(k,-1)\n add(k,+1)\n \n return neighbours\n \ndef gray_flip(tau,N):\n k = tau[0]\n if k<N:\n tau[k-1] = tau[k]\n tau[k] = k+1\n if (k != 1): tau[0] = 1\n return k,tau\n\ndef flip(ch):\n return '+' if ch =='-' else '-'\n\ndef enumerate_ising(m,n,periodic=True):\n N = m * n\n Ns = {}\n sigma = [-1] *N\n tau = list(range(1,(N+1)+1))\n E = -2 * N\n Ns[E] = 2\n spins = ''.join([('+' if sigma[j]>0 else '-') for j in range(N)])\n for i in range(2**(N-1)-1):\n k,tau = gray_flip(tau,N)\n k -= 1\n h = sum(sigma[j] for j in Nbr(k,m,n,periodic=periodic))\n E += (2*sigma[k] * h)\n if not E in Ns:\n Ns[E] = 0\n Ns[E] += 2\n\n sigma[k] = -sigma[k]\n spins = ''.join([('+' if sigma[j]>0 else '-') for j in range(N)])\n return [(E,Ns[E]) for E in sorted(Ns.keys())]\n\nif __name__=='__main__':\n for E,Ns in enumerate_ising(6,6):\n print (E,Ns)\n","sub_path":"book/ising.py","file_name":"ising.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"624193587","text":"#This program is used to create a madlib story\r\n#@course ICS3UC\r\n#date 2019/02/20\r\n#@author Nathaniel Fernandes\r\n\r\n#gathers details for the story\r\nadjective_1 = input(\"Choose an adjective:\") \r\nadjective_2 = input(\"Choose an adjective:\") \r\nnoun_1 = input(\"Choose a type of bird:\") \r\nnoun_2 = input(\"Choose a room in a house:\") \r\nverb_1 = input(\"Choose a verb(past tense):\") \r\nverb_2 = input(\"Choose a verb:\") \r\nname_1 = input(\"Choose a relatives name:\") \r\nnoun_3 = input(\"Choose a noun:\")\r\nnoun_4 = input(\"Choose a liquid:\")\r\nverb_3 = input(\"Choose a verb ending in (ing):\") \r\nnoun_5 = input(\"Choose a part of the body(plural):\") \r\nnoun_6 = input(\"Choose a plural noun:\")\r\nverb_4 = input(\"Choose a verb ending in (ing):\") \r\nnoun_7 = input(\"Choose a noun:\") \r\n\r\nprint(\" \")\r\nprint(\"----------------------------------------------\")\r\nprint(\"Here is your Story!\")\r\nprint(\" \")\r\n\r\n#prints out the story with all the details you have inputed\r\nprint(\"It was a\",adjective_1,\",cold October day. I woke up to the\",adjective_2,\"smell of\",noun_1,\"roasting in the\",noun_2,\"downstairs. I\",verb_1,\"down the stairs to see if I could help\",verb_2,\"the dinner. My mom said to See if\",name_1,\"needs a fresh\",noun_3,\". So I carried a tray of glasses full of\",noun_4,\"into the\",verb_3,\"room. When I got there, I couldn't believe my\",noun_5,\"! There were\",noun_6,verb_4,\"on the\",noun_7,\"!\")\r\n\r\n ","sub_path":"adLibs.py","file_name":"adLibs.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"468826466","text":"# -*- coding: utf-8 -*-\nimport unicodedata\nimport urllib\nimport re\n\nLATEX_ACCENT_X = [\n [ u\"à\", \"`a\" ], # Grave accent\n [ u\"è\", \"`e\" ],\n [ u\"ì\", \"`i\" ],\n [ u\"ò\", \"`o\" ],\n [ u\"ù\", \"`u\" ],\n [ u\"ỳ\", \"`y\" ],\n [ u\"À\", \"`A\" ],\n [ u\"È\", \"`E\" ],\n [ u\"Ì\", \"`I\" ],\n [ u\"Ò\", \"`O\" ],\n [ u\"Ù\", \"`U\" ],\n [ u\"Ỳ\", \"`Y\" ],\n [ u\"á\", \"'a\" ], # Acute accent\n [ u\"é\", \"'e\" ],\n [ u\"í\", \"'i\" ],\n [ u\"ó\", \"'o\" ],\n [ u\"ú\", \"'u\" ],\n [ u\"ý\", \"'y\" ],\n [ u\"Á\", \"'A\" ],\n [ u\"É\", \"'E\" ],\n [ u\"Í\", \"'I\" ],\n [ u\"Ó\", \"'O\" ],\n [ u\"Ú\", \"'U\" ],\n [ u\"Ý\", \"'Y\" ],\n [ u\"â\", \"\\\\^a\" ], # Circumflex\n [ u\"ê\", \"\\\\^e\" ],\n [ u\"î\", \"\\\\^i\" ],\n [ u\"ô\", \"\\\\^o\" ],\n [ u\"û\", \"\\\\^u\" ],\n [ u\"ŷ\", \"\\\\^y\" ],\n [ u\"Â\", \"\\\\^A\" ],\n [ u\"Ê\", \"\\\\^E\" ],\n [ u\"Î\", \"\\\\^I\" ],\n [ u\"Ô\", \"\\\\^O\" ],\n [ u\"Û\", \"\\\\^U\" ],\n [ u\"Ŷ\", \"\\\\^Y\" ],\n [ u\"ä\", \"\\\"a\" ], # Umlaut or dieresis\n [ u\"ë\", \"\\\"e\" ],\n [ u\"ï\", \"\\\"i\" ],\n [ u\"ö\", \"\\\"o\" ],\n [ u\"ü\", \"\\\"u\" ],\n [ u\"ÿ\", \"\\\"y\" ],\n [ u\"Ä\", \"\\\"A\" ],\n [ u\"Ë\", \"\\\"E\" ],\n [ u\"Ï\", \"\\\"I\" ],\n [ u\"Ö\", \"\\\"O\" ],\n [ u\"Ü\", \"\\\"U\" ],\n [ u\"Ÿ\", \"\\\"Y\" ],\n ]\n\nLATEX_ACCENT = [\n [ u\"à\", \"\\\\`a\" ], # Grave accent\n [ u\"è\", \"\\\\`e\" ],\n [ u\"ì\", \"\\\\`\\\\i\" ],\n [ u\"ò\", \"\\\\`o\" ],\n [ u\"ù\", \"\\\\`u\" ],\n [ u\"ỳ\", \"\\\\`y\" ],\n [ u\"À\", \"\\\\`A\" ],\n [ u\"È\", \"\\\\`E\" ],\n [ u\"Ì\", \"\\\\`\\\\I\" ],\n [ u\"Ò\", \"\\\\`O\" ],\n [ u\"Ù\", \"\\\\`U\" ],\n [ u\"Ỳ\", \"\\\\`Y\" ],\n [ u\"á\", \"\\\\'a\" ], # Acute accent\n [ u\"é\", \"\\\\'e\" ],\n [ u\"í\", \"\\\\'\\\\i\" ],\n [ u\"ó\", \"\\\\'o\" ],\n [ u\"ú\", \"\\\\'u\" ],\n [ u\"ý\", \"\\\\'y\" ],\n [ u\"Á\", \"\\\\'A\" ],\n [ u\"É\", \"\\\\'E\" ],\n [ u\"Í\", \"\\\\'\\\\I\" ],\n [ u\"Ó\", \"\\\\'O\" ],\n [ u\"Ú\", \"\\\\'U\" ],\n [ u\"Ý\", \"\\\\'Y\" ],\n [ u\"â\", \"\\\\^a\" ], # Circumflex\n [ u\"ê\", \"\\\\^e\" ],\n [ u\"î\", \"\\\\^\\\\i\" ],\n [ u\"ô\", \"\\\\^o\" ],\n [ u\"û\", \"\\\\^u\" ],\n [ u\"ŷ\", \"\\\\^y\" ],\n [ u\"Â\", \"\\\\^A\" ],\n [ u\"Ê\", \"\\\\^E\" ],\n [ u\"Î\", \"\\\\^\\\\I\" ],\n [ u\"Ô\", \"\\\\^O\" ],\n [ u\"Û\", \"\\\\^U\" ],\n [ u\"Ŷ\", \"\\\\^Y\" ],\n [ u\"ä\", \"\\\\\\\"a\" ], # Umlaut or dieresis\n [ u\"ë\", \"\\\\\\\"e\" ],\n [ u\"ï\", \"\\\\\\\"\\\\i\" ],\n [ u\"ö\", \"\\\\\\\"o\" ],\n [ u\"ü\", \"\\\\\\\"u\" ],\n [ u\"ÿ\", \"\\\\\\\"y\" ],\n [ u\"Ä\", \"\\\\\\\"A\" ],\n [ u\"Ë\", \"\\\\\\\"E\" ],\n [ u\"Ï\", \"\\\\\\\"\\\\I\" ],\n [ u\"Ö\", \"\\\\\\\"O\" ],\n [ u\"Ü\", \"\\\\\\\"U\" ],\n [ u\"Ÿ\", \"\\\\\\\"Y\" ],\n [ u\"ç\", \"\\\\c{c}\" ], # Cedilla\n [ u\"Ç\", \"\\\\c{C}\" ],\n [ u\"œ\", \"{\\\\oe}\" ], # Ligatures\n [ u\"Œ\", \"{\\\\OE}\" ],\n [ u\"æ\", \"{\\\\ae}\" ],\n [ u\"Æ\", \"{\\\\AE}\" ],\n [ u\"å\", \"{\\\\aa}\" ],\n [ u\"Å\", \"{\\\\AA}\" ],\n [ u\"ø\", \"{\\\\o}\" ], # Misc latin-1 letters\n [ u\"Ø\", \"{\\\\O}\" ],\n [ u\"ß\", \"{\\\\ss}\" ],\n [ u\"¡\", \"{!`}\" ],\n [ u\"¿\", \"{?`}\" ],\n [ u\"≥\", \"$\\\\ge$\" ], # Math operators\n [ u\"≤\", \"$\\\\le$\" ],\n [ u\"≠\", \"$\\\\neq$\" ],\n [ u\"©\", \"\\copyright\" ], # Misc\n [ u\"ı\", \"{\\\\i}\" ],\n [ u\"µ\", \"$\\\\mu$\" ],\n [ u\"°\", \"$\\\\deg$\" ],\n# [ u\"\\\\\", \"\\\\\\\\\" ], # Characters that should be quoted\n [ u\"~\", \"\\\\~\" ],\n [ u\"&\", \"\\\\&\" ],\n [ u\"$\", \"\\\\$\" ],\n [ u\"{\", \"\\\\{\" ],\n [ u\"}\", \"\\\\}\" ],\n [ u\"%\", \"\\\\%\" ],\n [ u\"#\", \"\\\\#\" ],\n [ u\"_\", \"\\\\_\" ],\n [ u\"–\", \"--\" ], # Dashes\n [ u\"—\", \"---\" ],\n [ u\"‘\", \"`\" ], #Quotes\n [ u\"’\", \"'\" ],\n [ u\"“\", \"``\" ],\n [ u\"”\", \"''\" ],\n [ u\"‚\", \",\" ],\n [ u\"„\", \",,\" ],\n ]\ndef unicode2latex(str):\n for pattern in LATEX_ACCENT:\n #print pattern\n str = str.replace(pattern[0],pattern[1])\n return str\n\ndef latex2unicode(str):\n for pattern in LATEX_ACCENT_X:\n #print pattern\n str = str.replace(pattern[1],pattern[0])\n return str\n \ndef create_ascii_localname(name, escape=False):\n \"\"\"\n escape=False\n e.g. http://data.semanticweb.org/person/juergen-umbrich/html\n e.g. http://data.semanticweb.org/person/jerome-euzenat/html\n input: u\"Klüft skräms inför på fédéral électoral große\"\n output: 'klueft-skraems-infoer-pa-federal-electoral-groe'\n\n escape=True\n e.g. http://dbpedia.org/resource/J%C3%BCrgen_Melzer\n\n input: u\"Klüft skräms inför på fédéral électoral große\"\n output: 'Kl%C3%BCft_skr%C3%A4ms_inf%C3%B6r_p%C3%A5_f%C3%A9d%C3%A9ral_%C3%A9lectoral_gro%C3%9Fe'\n\n also see:http://stackoverflow.com/questions/2700859 \n \"\"\" \n if escape:\n \n name = name.encode(\"utf-8\")\n name = urllib.quote(name,safe='=/ ')\n name = re.sub(\"[ ]+\",\"_\", name)\n else:\n \n name = unicodedata.normalize('NFKD', name)\n table = {}\n name = re.sub(u\"\\u0308\",\"e\",name)\n #print name\n name = name.encode('ascii','ignore')\n name = re.sub(\"[ ]+\",\"-\", name)\n# name = re.sub(\"`\",\"'\", name)\n# name = re.sub(\"[\\.'\\(\\)\\\"]\",\"\", name)\n name = re.sub(\"[^-a-zA-Z0-9]\",\"\", name)\n name = re.sub(\"-+\",\"-\", name)\n name = name.lower()\n\n return name\n\n\nclass MyCounter(object):\n def __init__(self):\n self.data = {}\n \n def inc(self, key, cnt=1):\n if not key in self.data:\n self.data[key]=0\n self.data[key] += cnt\n\n def list(self, min_count=0):\n ret = {}\n for k,v in self.data:\n if v >= min_count:\n ret[k]=v\n \n return ret\n\"\"\"\nclass MyCounterKeyValue(object):\n def __init__(self):\n self.data = {}\n \n def inc(self, key, value, ref):\n key = UtilSyntax.convert(key)\n value = UtilSyntax.convert(value)\n if not key in self.data:\n self.data[key]={} \n if not value in self.data[key]:\n self.data[key][value]=set()\n self.data[key][value].add(ref)\n\n def show(self, min_value_count=0):\n total =0\n for k in self.data:\n v = self.data[k]\n if len(v) >= min_value_count:\n msg = \"\"\n for v in self.data[k]:\n msg +=\"{0}={1},\".format(v, len(self.data[k][v]))\n msg = \"{0}--[{1}]--[{2}]\".format(k, len(self.data[k]), msg)\n print msg\n total +=1\n print \"total {0} item with >={1} values\".format(total, min_value_count)\n\"\"\"","sub_path":"iswc-metadata/src/iswc2014/lib_ext.py","file_name":"lib_ext.py","file_ext":"py","file_size_in_byte":6546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"151539011","text":"\nimport Libraries.ECG_lib.ecg_lib as ecgread\nimport Libraries.ECG_lib.ecg_processing as ecgprocess\nimport time\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.interpolate import splrep, splev\nfrom scipy.signal import detrend\nimport Libraries.ECG_lib.tools as st\nfrom scipy.signal import butter, lfilter, freqz,cheby2,sosfilt,lfilter_zi\nfrom Libraries.PPG_lib.hrcalc import PeakFinder\n\n\n\n\n\ndef interp_cubic_spline(rri, sf_up=4):\n \"\"\"\n Interpolate R-R intervals using cubic spline.\n Taken from the `hrv` python package by Rhenan Bartels.\n\n Parameters\n ----------\n rri : np.array\n R-R peak interval (in ms)\n sf_up : float\n Upsampling frequency.\n\n Returns\n -------\n rri_interp : np.array\n Upsampled/interpolated R-R peak interval array\n \"\"\"\n rri_time = np.cumsum(rri) / 1000.0\n time_rri = rri_time - rri_time[0]\n time_rri_interp = np.arange(0, time_rri[-1], 1 / float(sf_up))\n #print(time_rri)\n #print(rri)\n tck = splrep(time_rri, rri, s=0)\n rri_interp = splev(time_rri_interp, tck, der=0)\n return rri_interp\n\ndef get_respiration(ecgraw,sampling_rate,count,lastPeak=None):\n if lastPeak:\n rpeaks_array = [lastPeak]\n else:\n rpeaks_array = []\n ts,filtered,rpeaks=ecgprocess.ecg(ecgraw, 300, False,corr_rpeaks=True,calc_heartrate=False)\n rpeaks=rpeaks+300*(count)\n rpeaks=(rpeaks/sampling_rate)*1000\n rpeaks_array.extend(rpeaks)\n rri = np.diff(rpeaks_array)\n rri_interp = interp_cubic_spline(rri,4)\n hr=1000*(60/rri_interp)\n edr = detrend(hr)\n edr = (edr - edr.mean()) / edr.std()\n heart_rate = int(len(rpeaks)*60/(len(ecgraw)/sampling_rate))\n return edr,heart_rate,count+1,rpeaks_array[-1]\n\ndef get_rr(edr,time_interval):\n resp_peaks=PeakFinder.get(edr)\n rr=len(resp_peaks)*60/time_interval\n return rr\n","sub_path":"Pi/Libraries/RESP_lib/resp_processing.py","file_name":"resp_processing.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"224826070","text":"#!/usr/bin/env python\n\nimport sys\n\nargs=str(sys.argv[1])\n\n#print(args)\n\nfile1 = open(args,\"r+\")\nsome_data=file1.read()\nfile1.close()\n\n#print(some_data)\n\nsome_data=some_data.replace(\"[noise]\",\"\").replace(\"[laughter]\",\"\").replace(\"utterance-id1\",\"\").strip()\ncomplete_transcript=str(\"\")\n\nlist_some_data=some_data.split()\nsome_data_len=len(list_some_data)\njoint_string=[' '.join(list_some_data)] \n#print(joint_string)\n\nfor a in range(0,some_data_len,13):\n joint_string=[' '.join(list_some_data[a-13:a])] \n complete_transcript=complete_transcript + \"\\n\" + str(joint_string).replace(\"utterance-id1 \",\"\").replace('[','').replace(']','').replace('\\'','').replace('\\\"','') + \"\\n\"\n \nrem=len(list_some_data)%13\n#print(len(list_some_data))\n#print(rem)\n\nif rem != 0:\n complete_transcript=complete_transcript+' '.join(list_some_data[-rem:])+\"\\n\"\n \n\nprint(complete_transcript)\n","sub_path":"segment_transcripts.py","file_name":"segment_transcripts.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"482314607","text":"def sol(horses, horse_index, index, score):\n global board, answer, order\n if score + (10 - index)* 40 < answer:\n return\n if index == 10:\n answer = max(score, answer)\n return\n x, y = horses[horse_index]\n move = order[index]\n if x == 0:\n if y + move == len(board[x]) - 1:\n x, y = 4, 0\n elif y + move > len(board[x]) - 1:\n x, y = 4, 1\n elif (y + move) == 5:\n x, y = 1, 0\n elif (y + move) == 10:\n x, y = 2, 0\n elif (y + move) == 15:\n x, y = 3, 0\n else:\n x, y = 0, y + move\n\n elif x == 1 or x == 2 or x == 3:\n if y + move == len(board[x]) - 1:\n x, y = 4, 0\n elif y + move > len(board[x]) - 1:\n x, y = 4, 1\n else:\n x, y = x, y + move\n else:\n x, y = 4, 1\n\n if (x,y) != (4,1):\n if (x, y) in horses:\n return\n if (x,y) == (1, 4) or (x,y) == (2,3) or (x,y) == (3, 4):\n if (1,4) in horses or (2,3) in horses or (3,4) in horses:\n return\n if (x, y) == (1, 5) or (x, y) == (2, 4) or (x, y) == (3, 5):\n if (1, 5) in horses or (2, 4) in horses or (3, 5) in horses:\n return\n if (x,y) == (1, 6) or (x,y) == (2,5) or (x,y) == (3, 6):\n if (1,6) in horses or (2,5) in horses or (3,6) in horses:\n return\n horses[horse_index] = (x, y)\n score += board[x][y]\n found = False\n for h_i in range(4):\n if horses[h_i] != (4, 1):\n found = True\n sol(horses.copy(), h_i, index + 1, score)\n\n if not found:\n answer = max(score, answer)\n return\n\n\n\n\nboard = [[0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40],\n[10, 13, 16, 19, 25, 30, 35, 40],\n[20, 22, 24, 25, 30, 35, 40],\n[30, 28, 27, 26, 25, 30, 35, 40],\n[40, 0]]\n\n\norder = list(map(int, input().split()))\nhorses = [(0,0) for i in range(4)]\nanswer = 0\nsol(horses, 0, 0, 0)\nprint(answer)","sub_path":"BOJ/주사위 윷놀이.py","file_name":"주사위 윷놀이.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"452829767","text":"#!/usr/bin/env python \nfrom tkinter import *\ndef button_test():\n global text\n if label[\"text\"] == text:\n label[\"text\"] = \"НАЙДУТСЯ И ТЕ, КТО ПРИДУТ ЗА ТОБОЙ\"\n else:\n label[\"text\"] = \"ТАК ЖЕ СКОВАННЫЕ ОДНОЙ ЦЕПЬЮ\"\ndef exit():\n sys.exit()\nroot = Tk()\n#label = Label(text=\"пиветь\")\n#label.grid()\n#button = Button(root)\n#button.configure(text=\"жми1\", command=button_test)\n#button.grid()\nroot.title(\"во славу аир\")\nroot.geometry('500x400')\nbutton = Button(root, text=\"узнать истину\", command=button_test)\nbutton1 = Button(root, text=\"выход тут\", command=exit)\ntext = \"И ЕСЛИ ЕСТЬ ТЕ, КТО ПРИХОДЯТ К ТЕБЕ\"\nlabel = Label(text=text)\nlabel.pack()\nbutton1.pack()\nbutton.pack()\nroot.mainloop()\n","sub_path":"tests/gui3.py","file_name":"gui3.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"99598010","text":"import numpy as np\nimport tensorflow as tf\nimport csv\nimport random\nimport re\nfrom sklearn.utils import shuffle\nfrom matplotlib import pyplot as plt\nfrom keras.models import Sequential\nfrom keras.layers import LSTM, Dense, Dropout, Activation, Flatten, Bidirectional, Conv1D, MaxPooling1D\nfrom keras.utils import plot_model\nfrom sklearn.metrics import roc_curve, auc\n\ntraining_exon_file = 'highconfidencetargetintron012'\ntraining_intron_file = 'highconfidencetargetintron1_randomized'\n\ndef accuracy(test_x, test_y, model):\n result = model.predict(test_x)\n predicted_class = np.argmax(result, axis=1)\n true_class = np.argmax(test_y, axis=1)\n num_correct = np.sum(predicted_class == true_class)\n accuracy = float(num_correct) / result.shape[0]\n return (accuracy * 100)\n\n# Plot data\ndef generate_results(y_true, y_predict):\n fpr, tpr, _ = roc_curve(y_true, y_predict)\n roc_auc = auc(fpr, tpr)\n plt.plot(fpr, tpr, color='#3261a1', label='ROC curve (area = %0.2f)' % roc_auc)\n plt.plot([0, 1], [0, 1], 'k--')\n plt.xlim([0.0, 1.05])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver Operating Characteristic')\n plt.legend(loc=\"lower right\")\n plt.savefig('eval/auc.png')\n plt.close()\n\ndef padding(mylist):\n\tlistlength = len(mylist)\n\titeration = 60 - listlength\n\tfor a in range(iteration):\n\t\tmylist.append(np.zeros(4))\n\treturn mylist\n\ndef randomize_except(c):\n chars = ['A', 'C', 'G', 'T']\n new_c = random.sample(chars, 1)\n while c == new_c:\n new_c = random.sample(chars, 1)\n return new_c\n\ndef preprocess_modify(filename):\n f = open(filename, 'r')\n training_x = list()\n mod_k = 20\n training_size = 0\n strlist = list()\n chars = {'A': [1, 0, 0, 0], 'C': [0, 1, 0, 0], 'G': [0, 0, 1, 0], 'T': [0, 0, 0, 1]}\n\n for l in f.readlines():\n searched = re.search('^>', l)\n if searched != None and searched.group(0) == '>':\n continue\n else:\n tmp = list()\n l = l.strip()\n start_index = random.randint(0, len(l) - mod_k)\n end_index = start_index + mod_k - 1\n\n while i < len(l):\n c = l[i]\n if i >= start_index and i <= end_index:\n c = randomize_except(c)\n if c in chars:\n tmp.append(chars[c])\n if len(tmp) == 60:\n training_size += 1\n tmp = padding(tmp)\n strlist.append(l)\n training_x.append(tmp)\n\n f.close()\n return training_x, training_size, strlist\n\n\ndef preprocess_fa(filename, sample_size=None):\n f = open(filename, 'r')\n training_x = list()\n training_size = 0\n strlist = list()\n chars = {'A': [1, 0, 0, 0], 'C': [0, 1, 0, 0], 'G': [0, 0, 1, 0], 'T': [0, 0, 0, 1]}\n\n for l in f.readlines():\n searched = re.search('^>', l)\n if searched != None and searched.group(0) == '>':\n continue\n else:\n tmp = list()\n for c in l.strip():\n if c in chars:\n tmp.append(chars[c])\n if len(tmp) == 60:\n training_size += 1\n strlist.append(l)\n training_x.append(tmp)\n if training_size == sample_size:\n break\n f.close()\n return training_x, training_size, strlist\n\ndef csvwriter(filename, data):\n with open(filename, 'w') as f:\n #fieldnames = ['y1', 'y2']\n #writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n #writer.writeheader()\n writer = csv.writer(f, delimiter=',')\n writer.writerows(data)\n\ndef rawwriter(filename, data):\n with open(filename, 'w') as f:\n for e in data:\n f.write(e)\n\n\nexon_x_train, training_exon_size, strtmp = preprocess_fa(training_exon_file)\nintron_x_train, training_intron_size, strtmp = preprocess_fa(training_intron_file)\nexon_y_train = [[1, 0] for _ in range(training_exon_size)]\nintron_y_train = [[0, 1] for _ in range(training_intron_size)]\n\nprint('# of exons: %d' % training_exon_size)\nprint('# of introns: %d' % training_intron_size)\n\ntraining_all_x = np.concatenate((exon_x_train, intron_x_train), axis=0).astype(dtype=np.float32)\ntraining_all_y = np.concatenate((exon_y_train, intron_y_train), axis=0).astype(dtype=np.float32)\n\n\nindex = np.random.permutation(len(training_all_x))\ntrain_data, train_labels = training_all_x[index], training_all_y[index]\n\nmetrics = list()\nall_target_prediction = list()\nall_test_prediction = list()\n\ndef contains(input_str):\n for s in target_exon_str:\n if s == input_str:\n return True\n return False\n\ndef attack_eval(target_prediction, test_prediction, threshold):\n target_size = len(target_prediction)\n test_size = len(test_prediction)\n precision = 0\n accuracy = 0\n recall = 0\n tp = 0\n tn = 0\n i = target_size - 1\n while i >= 0:\n target_val = target_prediction[i][0]\n test_val = test_prediction[i][0]\n if target_val >= threshold:\n tp += 1\n if test_val < threshold:\n tn += 1\n i -= 1\n total_count = float(tp + target_size - tp)\n if total_count <= 0:\n precision = 0.0\n else:\n precision = tp / total_count\n recall = tp / float(tp + test_size - tn)\n accuracy = (tp + tn) / float(target_size + test_size)\n return accuracy, precision, recall\n\ndef plot_figs(target_prediction, test_prediction):\n target_size = len(target_prediction)\n test_size = len(test_prediction)\n titles = [['5kmer Teset', '10kmer Test', '20kmer Test'], ['30kmer Test', '40kmer Test', 'Randomized Test']]\n f1, axes1 = plt.subplots(2, 3, sharex='all', sharey='all', tight_layout=True, figsize=(15, 10))\n f2, axes2 = plt.subplots(2, 3, sharex='all', sharey='all', tight_layout=True, figsize=(15, 10))\n pred_i = 0\n for i in range(2):\n for j in range(3):\n tar_pred = target_prediction[pred_i]\n tes_pred = test_prediction[pred_i]\n target_size = len(tar_pred)\n test_size = len(tes_pred)\n cur_fig = axes1[i, j]\n cur_fig.plot(np.linspace(1, target_size, num=target_size), tar_pred, marker='+', color='#909090', linestyle='None', label='Member')\n cur_fig.plot(np.linspace(1, test_size, num=test_size), tes_pred, marker='+', color='#3261a1', linestyle='None', label='Non-member')\n cur_fig.legend(prop={'size': 6})\n cur_fig.set_ylabel('Prediction')\n cur_fig.set_ylim(-0.05, 1.05)\n cur_fig.set_title(titles[i][j])\n\n cur_fig = axes2[i, j]\n cur_fig.hist([tar_pred, tes_pred], bins=20, color=['#909090', '#3261a1'], label=['Member', 'Non-member'])\n cur_fig.legend(prop={'size': 6})\n cur_fig.set_xlabel('Prediction')\n cur_fig.set_ylabel('Frequency')\n cur_fig.set_title(titles[i][j])\n pred_i += 1\n\n f1.savefig('figures/cmp_intron.png')\n f2.savefig('figures/hist_intron.png')\n\ndef test_eval(target_file, test_file):\n target_x, target_size, target_str = preprocess_fa(target_file)\n target_y = [[1, 0] for _ in range(target_size)]\n test_x, test_size, test_str = preprocess_fa(test_file, target_size)\n test_y = [[0, 1] for _ in range(test_size)]\n print('start testing %s' % test_file)\n print('# of target: %d' % target_size)\n print('# of test: %d' % test_size)\n\n target_prediction = model.predict(np.asarray(target_x, dtype=np.float32))\n test_prediction = model.predict(np.asarray(test_x, dtype=np.float32))\n\n all_eval = list()\n threshold = 0.99\n interval = 0.01\n num = 15\n while num >= 0:\n print(\"testing selected threshold: %.2f\" % threshold)\n acc, pre, rec = attack_eval(target_prediction, test_prediction, threshold)\n all_eval.append(acc)\n all_eval.append(pre)\n all_eval.append(rec)\n threshold -= interval\n num -= 1\n metrics.append(all_eval)\n all_target_prediction.append(target_prediction[:, 0])\n all_test_prediction.append(test_prediction[:, 0])\n\nmodel = Sequential()\nmodel.add(\n Conv1D(256, activation='relu', input_shape=(60, 4), strides=1, kernel_size=30))\nmodel.add(MaxPooling1D(strides=1, pool_size=30))\nmodel.add(Dropout(0.3))\nmodel.add(Bidirectional(LSTM(256, input_shape=(256,), return_sequences=True)))\nmodel.add(Bidirectional(LSTM(256, input_shape=(256,), return_sequences=True)))\nmodel.add(Flatten())\nmodel.add(Dropout(0.3))\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(0.3))\nmodel.add(Dense(2, activation='softmax'))\n\nprint('Compiling model......')\nmodel.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\nprint(model.summary())\nhistory = model.fit(train_data, train_labels, batch_size=128, epochs=20, validation_split=0.1, shuffle=True)\n\n# summarize history for accuracy\nplt.plot(history.history['acc'], marker='+', color='#909090', label='Training')\nplt.plot(history.history['val_acc'], marker='+', color='#3261a1', label='Validation')\nplt.title('Model Accuracy')\nplt.ylim(0, 1.05)\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(loc='lower left', prop={'size': 6}, borderpad=1)\nplt.savefig('figures/model_accuracy.png')\nplt.close()\n\n# summarize history for loss\nplt.plot(history.history['loss'], marker='+', color='#909090', label='Training')\nplt.plot(history.history['val_loss'], marker='+', color='#3261a1', label='Validation')\nplt.title('Model Loss')\nplt.ylim(0, 1.0)\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(loc='upper right', prop={'size': 6}, borderpad=1)\nplt.savefig('figures/model_loss.png')\nplt.close()\n\nprint(\"Attack evaluating...\")\n\ntarget_file = '../../datasets/intron/intron-target.fa'\n\ntest_file = 'highconfintron5mod'\ntest_eval(target_file, test_file)\n\ntest_file = 'highconfintron10mod'\ntest_eval(target_file, test_file)\n\ntest_file = 'highconfintron20mod'\ntest_eval(target_file, test_file)\n\ntest_file = 'highconfintron30mod'\ntest_eval(target_file, test_file)\n\ntest_file = 'highconfintron40mod'\ntest_eval(target_file, test_file)\n\ntest_file = 'highconfidencetargetintron1_randomized'\ntest_eval(target_file, test_file)\n\ncsvwriter('eval/metrics_all.csv', np.transpose(metrics))\nplot_figs(all_target_prediction, all_test_prediction)\ncsvwriter('eval/predict_target.csv', all_target_prediction)\ncsvwriter('eval/predict_test.csv', all_test_prediction)\n\nscore_trainingall = model.predict(train_data)\n\ngenerate_results(train_labels[:, 0], score_trainingall[:, 0])\n","sub_path":"src/attackmodel_intron.py","file_name":"attackmodel_intron.py","file_ext":"py","file_size_in_byte":10562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"511392155","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nimport MySQLdb\ndb=MySQLdb.connect(\"localhost\",\"root\",\"plutonian\",\"test1\")\ncursor2=db.cursor()\n#cursor3=db.cursor()\ncmd=\"\"\"select * from api_phdcounters\"\"\"\ncursor2.execute(cmd)\nalt=cursor2.fetchone()\ndb.commit()\ntitlef=alt[1]\nsrc=requests.get(\"http://www.du.ac.in/du/index.php?mact=News,cntnt01,detail,0&cntnt01articleid=8016&cntnt01showall=1&cntnt01returnid=83\").text\nsoup=bs(src,\"html.parser\")\nsoup1=soup.find_all('div',class_=\"content-inner.grid_12\")[1] \nsoup2=soup1.find_all('a') \nfile1=open('phdresults.txt','w+')\ncounter=alt[0]\n#print \"0\"\nfor i in soup2:\n link=i[\"href\"]\n #title=i[\"title\"]\n #print \"1\"\n if title == alt[1]:\n cursor2.execute(\"\"\"update api_phdcounter set phdtitle=%s\"\"\",(titlef,))\n cursor2.execute(\"\"\"update phdcounter set phdid=%d\"\"\",(counter))\n break\n if link==soup2[-1]:\n cursor2.execute(\"\"\"update phdcounter set phdtitle=%s\"\"\",(titlef,))\n cursor2.execute(\"\"\"update phdcounter set phdid=%d\"\"\",(counter))\n break\n titlef=title\n sql=\"\"\"insert into phdresults(id,title,linkf) values(NULL,'%s','%s')\"\"\"%(title,link)\n cursor2.execute(sql)\n file1.write('\\n%s'%link)\n #print \"3\"\n counter=counter+1\n \nfile1.close()\ndb.commit()\ncursor2.close()\n#cursor3.close()\ndb.close()\n","sub_path":"duphdresults.py","file_name":"duphdresults.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"453534931","text":"from guillotina.behaviors import apply_markers\nfrom guillotina.content import Folder\nfrom guillotina.content import Item\nfrom guillotina.db.storages.pg import PostgresqlStorage\nfrom guillotina.db.transaction_manager import TransactionManager\nfrom guillotina.exceptions import ConflictError\n\nimport pytest\nimport uuid\n\n\nasync def cleanup(aps):\n conn = await aps.open()\n txn = conn.transaction()\n await txn.start()\n await conn.execute(\"DROP TABLE IF EXISTS objects;\")\n await conn.execute(\"DROP TABLE IF EXISTS blobs;\")\n await conn.execute(\"ALTER SEQUENCE tid_seq RESTART WITH 1;\")\n await txn.commit()\n await aps._pool.release(conn)\n\n\nasync def get_aps():\n dsn = \"postgres://postgres:@localhost:5432/guillotina\"\n partition_object = \"guillotina.db.interfaces.IPartition\"\n aps = PostgresqlStorage(\n dsn=dsn, partition=partition_object, name='db')\n await aps.initialize()\n return aps\n\n\ndef create_ob():\n obj = Item()\n obj.type_name = 'Item'\n obj._p_oid = uuid.uuid4().hex\n obj.__name__ = obj.id = 'foobar'\n apply_markers(obj, None)\n return obj\n\n\ndef create_folder():\n obj = Folder()\n obj.type_name = 'Folder'\n obj._p_oid = uuid.uuid4().hex\n obj.__name__ = obj.id = 'foobar'\n apply_markers(obj, None)\n return obj\n\n\nasync def test_read_obs(postgres, dummy_request):\n \"\"\"Low level test checks that root is not there\"\"\"\n request = dummy_request # noqa so magically get_current_request can find\n\n aps = await get_aps()\n tm = TransactionManager(aps)\n await tm.begin()\n txn = tm._txn\n\n ob = create_ob()\n txn.register(ob)\n\n assert len(txn.modified) == 1\n\n await tm.commit()\n\n await tm.begin()\n txn = tm._txn\n\n lasttid = await aps.last_transaction(txn)\n assert lasttid is not None\n\n ob2 = await txn.get(ob._p_oid)\n\n assert ob2._p_oid == ob._p_oid\n await tm.commit()\n\n await aps.remove()\n await cleanup(aps)\n\n\nasync def test_deleting_parent_deletes_children(postgres, dummy_request):\n request = dummy_request # noqa so magically get_current_request can find\n\n aps = await get_aps()\n tm = TransactionManager(aps)\n await tm.begin()\n txn = tm._txn\n\n folder = create_folder()\n txn.register(folder)\n ob = create_ob()\n await folder.async_set('foobar', ob)\n\n assert len(txn.modified) == 2\n\n await tm.commit()\n await tm.begin()\n txn = tm._txn\n\n ob2 = await txn.get(ob._p_oid)\n folder2 = await txn.get(folder._p_oid)\n\n assert ob2._p_oid == ob._p_oid\n assert folder2._p_oid == folder._p_oid\n\n # delete parent, children should be gone...\n txn.delete(folder2)\n assert len(txn.deleted) == 1\n\n await tm.commit()\n await tm.begin()\n txn = tm._txn\n\n with pytest.raises(KeyError):\n await txn.get(ob._p_oid)\n with pytest.raises(KeyError):\n await txn.get(folder._p_oid)\n\n await tm.abort()\n\n await aps.remove()\n await cleanup(aps)\n\n\nasync def test_create_blob(postgres, dummy_request):\n request = dummy_request # noqa so magically get_current_request can find\n\n aps = await get_aps()\n tm = TransactionManager(aps)\n await tm.begin()\n txn = tm._txn\n\n ob = create_ob()\n txn.register(ob)\n\n await txn.write_blob_chunk('X' * 32, ob._p_oid, 0, b'foobar')\n\n await tm.commit()\n await tm.begin()\n txn = tm._txn\n\n blob_record = await txn.read_blob_chunk('X' * 32, 0)\n assert blob_record['data'] == b'foobar'\n\n # also get data from ob that started as a stub...\n ob2 = await txn.get(ob._p_oid)\n assert ob2.type_name == 'Item'\n assert ob2.id == 'foobar'\n\n await tm.abort()\n\n await aps.remove()\n await cleanup(aps)\n\n\nasync def test_delete_resource_deletes_blob(postgres, dummy_request):\n request = dummy_request # noqa so magically get_current_request can find\n\n aps = await get_aps()\n tm = TransactionManager(aps)\n await tm.begin()\n txn = tm._txn\n\n ob = create_ob()\n txn.register(ob)\n\n await txn.write_blob_chunk('X' * 32, ob._p_oid, 0, b'foobar')\n\n await tm.commit()\n await tm.begin()\n txn = tm._txn\n\n ob = await txn.get(ob._p_oid)\n txn.delete(ob)\n\n await tm.commit()\n await tm.begin()\n txn = tm._txn\n\n assert await txn.read_blob_chunk('X' * 32, 0) is None\n\n with pytest.raises(KeyError):\n await txn.get(ob._p_oid)\n\n await tm.abort()\n await aps.remove()\n await cleanup(aps)\n\n\nasync def test_should_raise_conflict_error(postgres, dummy_request):\n request = dummy_request # noqa so magically get_current_request can find\n\n aps = await get_aps()\n tm1 = TransactionManager(aps)\n tm2 = TransactionManager(aps)\n\n # create object first, commit it...\n await tm1.begin()\n txn = tm1._txn\n\n ob = create_ob()\n txn.register(ob)\n\n await tm1.commit()\n\n # 1 started before 2\n await tm1.begin()\n await tm2.begin()\n txn1 = tm1._txn\n txn2 = tm2._txn\n\n ob1 = await txn1.get(ob._p_oid)\n ob2 = await txn2.get(ob._p_oid)\n ob1.title = 'foobar1'\n ob2.title = 'foobar2'\n\n txn1.register(ob1)\n txn2.register(ob2)\n\n # commit 2 before 1\n await tm2.commit()\n # XXX this should raise exception. We need to figure out conflict resolution here...\n # with pytest.raises(ConflictError):\n # await tm1.commit()\n await tm1.abort()\n\n await aps.remove()\n await cleanup(aps)\n","sub_path":"guillotina/tests/test_postgres.py","file_name":"test_postgres.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"385367364","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport numpy as np\nfrom preprocess import *\nfrom model import Transformer_Model\nimport sys\nimport random\nfrom keras.models import load_model\nfrom preprocess import preprocess_sentence, convert_to_id\nfrom main import test\n\ndef main():\n\n if len(sys.argv) > 1:\n mode = 'TF'\n else:\n mode = 'no TF'\n\n train_inputs, test_inputs, train_labels, test_labels, vocab, pad_indx = get_data(mode = 'MT')\n model = Transformer_Model(WINDOW_SIZE, len(vocab), 1)\n model.load_weights('saved/my_model')\n\n # dictionary of val -> word\n lookup = dict([(value, key) for key, value in vocab.items()])\n\n try:\n while True:\n val = input(\"user: \")\n\n # process user input to match model input\n val = preprocess_sentence(val)\n val = val.split()\n val = pad_corpus_chatbot(val, 0) # input padding\n val = convert_to_id_single(vocab, val)\n\n val = np.reshape(val, (1, len(val)))\n\n # generate model response to user input\n res = generate_sentence(model, val, lookup, vocab, mode)\n res = convert_to_words(res, lookup)\n print('model: ' + res)\n\n except KeyboardInterrupt:\n print('\\ngoodbye!')\n\n# given encoder input and lookup dictionary, returns model-generated numeric sentence\ndef generate_sentence(model, encoder_input, lookup, vocab, mode):\n\n # decoder input starts as start token + padding\n decoder_input = [START_TOKEN] + [PAD_TOKEN] * (80-1)\n decoder_input = convert_to_id_single(vocab, decoder_input)\n decoder_input = np.reshape(decoder_input, (1, len(decoder_input)))\n\n for i in range(1, 80-1):\n if (mode == 'TF'):\n res = np.array(model.call(encoder_input, encoder_input, mode = 'MT', is_training=False))\n res = np.argmax(res, axis=2)[0]\n return res\n else:\n res = np.array(model.call(encoder_input, decoder_input, mode = 'MT', is_training=False)) # teacher forcing removed\n res = np.argmax(res, axis=2)[0]\n decoder_input[0][i] = res[i-1] # sets ith index of decoder\n converted_symbol = lookup[decoder_input[0][i]]\n if (converted_symbol == STOP_TOKEN): # reached end of sentence\n return res\n\n return res\n\n# converts model encoded sentence to English sentence\ndef convert_to_words(sentence, lookup):\n res = ''\n for val in sentence:\n converted = lookup[val]\n if (converted == STOP_TOKEN):\n return res\n res = res + converted + ' '\n return res\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"384987563","text":"#!/usr/bin/python3\n\"\"\" New engine DBStorage \"\"\"\n\nfrom os import getenv\nfrom sqlalchemy import (create_engine)\nfrom sqlalchemy.orm import sessionmaker, scoped_session\nfrom models.base_model import Base, BaseModel\nfrom models.user import User\nfrom models.city import City\nfrom models.state import State\nfrom models.place import Place\nfrom models.amenity import Amenity\nfrom models.review import Review\n\n\nclass DBStorage:\n \"\"\" DB class \"\"\"\n __engine = None\n __session = None\n\n def __init__(self):\n \"\"\" Instantiation \"\"\"\n self.__engine = create_engine('mysql+mysqldb://{}:{}@{}/{}'\n .format(getenv(\"HBNB_MYSQL_USER\"),\n getenv(\"HBNB_MYSQL_PWD\"),\n getenv(\"HBNB_MYSQL_HOST\"),\n getenv(\"HBNB_MYSQL_DB\")),\n pool_pre_ping=True)\n\n if getenv(\"HBNB_ENV\") == 'test':\n Base.metadata.drop_all(self.__engine)\n Session = sessionmaker(bind=self.__engine)\n self.__session = Session()\n\n def all(self, cls=None):\n \"\"\" query on the current db session all obj \"\"\"\n if cls is not None:\n new_dict = {}\n iterable = self.__session.query(cls)\n for ins in iterable:\n key = str(cls.__name__) + \".\" + str(ins.id)\n new_dict[key] = ins\n else:\n clases = [\"User\", \"State\", \"City\", \"Amenity\", \"Place\", \"Review\"]\n new_dict = {}\n for clase in clases:\n for ins in self.__session.query(clase):\n key = clase + \".\" + str(ins.id)\n new_dict[key] = ins\n return new_dict\n\n def new(self, obj):\n \"\"\" add the obj to the current db \"\"\"\n self.__session.add(obj)\n\n def save(self):\n \"\"\" commit all changes \"\"\"\n self.__session.commit()\n\n def delete(self, obj=None):\n \"\"\" delete from current db \"\"\"\n if obj is not None:\n self.__session.delete(obj)\n\n def reload(self):\n \"\"\" create all tables in db \"\"\"\n Base.metadata.create_all(self.__engine)\n new_session = sessionmaker(bind=self.__engine, expire_on_commit=False)\n Session = scoped_session(new_session)\n self.__session = Session()\n\n \"\"\"0x04. AirBnB clone - Web framework - Task 7\"\"\"\n def close(self):\n \"\"\"calls close\"\"\"\n self.__session.close()\n","sub_path":"models/engine/db_storage.py","file_name":"db_storage.py","file_ext":"py","file_size_in_byte":2504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"550934491","text":"#!/usr/bin/env python\n'''\nNode to receive data from Qualisys.\n\n'''\n\nimport rospy\nimport tf\nfrom mocap_source_2 import Mocap, Body\nfrom slip_control_communications.msg import mocap_data\n\nmocap = Mocap(host='SML', info=1)\ntruck_id = mocap.get_id_from_name(\"F1TenthB\")\n\n#-------------------------------------------------------------------------------\n# talker\n#-------------------------------------------------------------------------------\ndef talker():\n rospy.init_node('mocap_publisher_node', anonymous=True)\n pub = rospy.Publisher('car_state_topic', mocap_data, queue_size=1)\n rate = rospy.Rate(10) # 10hz\n\n data = mocap_data()\n\n while not rospy.is_shutdown():\n truck_state = mocap.get_body(truck_id)\n\n if truck_state == 'off':\n rospy.logwarn(\"Hardware not found!\")\n else:\n data.ts = truck_state['ts']\n data.id = truck_state['id']\n data.x = truck_state['x']\n data.y = truck_state['y']\n data.z = truck_state['z']\n data.roll = truck_state['roll']\n data.pitch = truck_state['pitch']\n data.yaw = truck_state['yaw']\n\n br = tf.TransformBroadcaster()\n br.sendTransform((data.x, data.y, 0),\n tf.transformations.quaternion_from_euler(0, 0, data.yaw),\n rospy.Time.now(),\n \"map\",\n \"base_frame\")\n\n pub.publish(data)\n rate.sleep()\n\n\n#-------------------------------------------------------------------------------\n# main\n#-------------------------------------------------------------------------------\nif __name__ == '__main__':\n try:\n talker()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"circular_mpc/src/mocap_publisher.py","file_name":"mocap_publisher.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"335483184","text":"import numpy as np\nfrom scipy.interpolate import CubicSpline\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# input\np_start = np.array([0., 0., 0.])\nn_start = np.array([0., 0., 1.])\np_goal = np.array([0.2, 0., 0.1])\nn_goal = np.array([-1, 0., 1.])\n\n# setup cublic spline\ntm_list = np.array([0, 1])\np_list = np.array([p_start, p_goal])\n\nnormal_scale = 0.5\nspline = CubicSpline(tm_list, p_list,\n bc_type=((1, normal_scale*n_start), (1, -1*normal_scale*n_goal)))\n\n# plot\ntm_traj = np.linspace(0,1)\np_traj = spline(tm_traj)\n\nfig = plt.figure(figsize=plt.figaspect(1))\nax = fig.gca(projection='3d')\nax.set_aspect('equal')\nax.plot(p_traj[:,0], p_traj[:,1], p_traj[:,2], '-o', label='data')\nplt.pause(0.01)\n","sub_path":"python/test_cubic_spline.py","file_name":"test_cubic_spline.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"364102850","text":"class Movie:\n \"\"\"Stores information related to movies.\n\n Attributes: \n movie_title: title of the movie\n poster_image_url: url to the poster image\n trailer_youtube_url: url to the youtube trailer\n actors: a list of actors that appear\n ratings: ratings of the movie out of 5\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialize Movie class\"\"\"\n self.title = \"Some Movie title\"\n self.poster_image_url = \"https://placehold.it/270x410?text=Poster+Placeholder\"\n self.trailer_youtube_url = \"https://www.youtube.com/watch?v=wCc2v7izk8w\"\n self.actors = [\"Actor1\", \"Actor2\", \"Actor3\"]\n self.rating = 5","sub_path":"movie-trailer/media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"633877759","text":"\n\nclass ExaMetaData(object):\n \"\"\"\n This class implements lock-free meta data requests using `/*snapshot execution*/` SQL hint described in IDEA-476\n https://www.exasol.com/support/browse/IDEA-476\n\n If you still get locks, please make sure to update Exasol server to the latest minor version\n \"\"\"\n def __init__(self, connection):\n self.connection = connection\n self.sql_keywords = None\n\n def sql_columns(self, query, query_params=None):\n \"\"\"\n Get result set columns of SQL query without executing it\n \"\"\"\n st = self.connection.cls_statement(self.connection, query, query_params, prepare=True)\n columns = st.columns()\n st.close()\n\n return columns\n\n def schema_exists(self, schema_name):\n object_name = self.connection.format.default_format_ident_value(schema_name)\n\n st = self._execute_snapshot(\"\"\"\n SELECT 1\n FROM sys.exa_schemas\n WHERE schema_name={object_name}\n \"\"\", {\n 'object_name': object_name,\n })\n\n return st.rowcount() > 0\n\n def table_exists(self, table_name):\n if isinstance(table_name, tuple):\n object_schema = self.connection.format.default_format_ident_value(table_name[0])\n object_name = self.connection.format.default_format_ident_value(table_name[1])\n else:\n object_schema = self.connection.current_schema()\n object_name = self.connection.format.default_format_ident_value(table_name)\n\n st = self._execute_snapshot(\"\"\"\n SELECT 1\n FROM sys.exa_all_tables\n WHERE table_schema={object_schema}\n AND table_name={object_name}\n \"\"\", {\n 'object_schema': object_schema,\n 'object_name': object_name,\n })\n\n return st.rowcount() > 0\n\n def view_exists(self, view_name):\n if isinstance(view_name, tuple):\n object_schema = self.connection.format.default_format_ident_value(view_name[0])\n object_name = self.connection.format.default_format_ident_value(view_name[1])\n else:\n object_schema = self.connection.current_schema()\n object_name = self.connection.format.default_format_ident_value(view_name)\n\n st = self._execute_snapshot(\"\"\"\n SELECT 1\n FROM sys.exa_all_views\n WHERE view_schema={object_schema}\n AND view_name={object_name}\n \"\"\", {\n 'object_schema': object_schema,\n 'object_name': object_name,\n })\n\n return st.rowcount() > 0\n\n def list_schemas(self, schema_name_pattern='%'):\n st = self._execute_snapshot(\"\"\"\n SELECT *\n FROM sys.exa_schemas\n WHERE schema_name LIKE {schema_name_pattern}\n ORDER BY schema_name ASC\n \"\"\", {\n 'schema_name_pattern': schema_name_pattern,\n })\n\n return st.fetchall()\n\n def list_tables(self, table_schema_pattern='%', table_name_pattern='%'):\n st = self._execute_snapshot(\"\"\"\n SELECT *\n FROM sys.exa_all_tables\n WHERE table_schema LIKE {table_schema_pattern}\n AND table_name LIKE {table_name_pattern}\n ORDER BY table_schema ASC, table_name ASC\n \"\"\", {\n 'table_schema_pattern': table_schema_pattern,\n 'table_name_pattern': table_name_pattern,\n })\n\n return st.fetchall()\n\n def list_views(self, view_schema_pattern='%', view_name_pattern='%'):\n st = self._execute_snapshot(\"\"\"\n SELECT *\n FROM sys.exa_all_views\n WHERE view_schema LIKE {view_schema_pattern}\n AND view_name LIKE {view_name_pattern}\n ORDER BY view_schema ASC, view_name ASC\n \"\"\", {\n 'view_schema_pattern': view_schema_pattern,\n 'view_name_pattern': view_name_pattern,\n })\n\n return st.fetchall()\n\n def list_columns(self, column_schema_pattern='%', column_table_pattern='%'\n , column_object_type_pattern='%', column_name_pattern='%'):\n st = self._execute_snapshot(\"\"\"\n SELECT *\n FROM sys.exa_all_columns\n WHERE column_schema LIKE {column_schema_pattern}\n AND column_table LIKE {column_table_pattern}\n AND column_object_type LIKE {column_object_type_pattern}\n AND column_name LIKE {column_name_pattern}\n \"\"\", {\n 'column_schema_pattern': column_schema_pattern,\n 'column_table_pattern': column_table_pattern,\n 'column_object_type_pattern': column_object_type_pattern,\n 'column_name_pattern': column_name_pattern,\n })\n\n return st.fetchall()\n\n def list_objects(self, object_name_pattern='%', object_type_pattern='%', owner_pattern='%', root_name_pattern='%'):\n st = self._execute_snapshot(\"\"\"\n SELECT *\n FROM sys.exa_all_objects\n WHERE object_name LIKE {object_name_pattern}\n AND object_type LIKE {object_type_pattern}\n AND owner LIKE {owner_pattern}\n AND root_name LIKE {root_name_pattern}\n \"\"\", {\n 'object_name_pattern': object_name_pattern,\n 'object_type_pattern': object_type_pattern,\n 'owner_pattern': owner_pattern,\n 'root_name_pattern': root_name_pattern,\n })\n\n return st.fetchall()\n\n def list_object_sizes(self, object_name_pattern='%', object_type_pattern='%', owner_pattern='%', root_name_pattern='%'):\n st = self._execute_snapshot(\"\"\"\n SELECT *\n FROM sys.exa_all_object_sizes\n WHERE object_name LIKE {object_name_pattern}\n AND object_type LIKE {object_type_pattern}\n AND owner LIKE {owner_pattern}\n AND root_name LIKE {root_name_pattern}\n \"\"\", {\n 'object_name_pattern': object_name_pattern,\n 'object_type_pattern': object_type_pattern,\n 'owner_pattern': owner_pattern,\n 'root_name_pattern': root_name_pattern,\n })\n\n return st.fetchall()\n\n def list_indices(self, index_schema_pattern='%', index_table_pattern='%', index_owner_pattern='%'):\n st = self._execute_snapshot(\"\"\"\n SELECT *\n FROM sys.exa_all_indices\n WHERE index_schema LIKE {index_schema_pattern}\n AND index_table LIKE {index_table_pattern}\n AND index_owner LIKE {index_owner_pattern}\n \"\"\", {\n 'index_schema_pattern': index_schema_pattern,\n 'index_table_pattern': index_table_pattern,\n 'index_owner_pattern': index_owner_pattern,\n })\n\n return st.fetchall()\n\n def list_sql_keywords(self):\n \"\"\"\n Get reserved SQL keywords which cannot be used as identifiers without double-quote escaping\n Never hardcode this list! It might change with next Exasol server version without warning\n \"\"\"\n if not self.sql_keywords:\n st = self._execute_snapshot(\"\"\"\n SELECT keyword\n FROM EXA_SQL_KEYWORDS\n WHERE reserved IS TRUE\n ORDER BY keyword\n \"\"\")\n\n self.sql_keywords = st.fetchcol()\n\n return self.sql_keywords\n\n def _execute_snapshot(self, query, query_params=None):\n \"\"\"\n Execute query in snapshot transaction mode using SQL hint\n fetch_dict=True is enforced to prevent users from relying on order of columns in system views\n \"\"\"\n options = {\n 'fetch_dict': True,\n }\n\n return self.connection.cls_statement(self.connection, f\"/*snapshot execution*/{query}\", query_params, **options)\n\n def __repr__(self):\n return f'<{self.__class__.__name__} session_id={self.connection.session_id()}>'\n","sub_path":"pyexasol/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":7934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"313005423","text":"# -*- coding: utf-8 -*-\n## Least Square Regression with norm2 regularization\n#\n# approximate y = sin(2*pi*[0:0.01:1]) by\n# y = w0 + w1*x + w2*x^2 + w3*x^3 + ...+ wn*x^n\n# min{ |y - t|^2 + lambda*w'*w }\n#\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom basic.regressor import *\nfrom bayesian.regressor import *\n\ndef polybasis(x, order):\n vector = np.zeros([len(x),order+1])\n for i in range(order+1):\n vector[:,i] = np.power(x, i)\n return vector\n\n\nif __name__ == \"__main__\":\n\n sigma = 0.3\n t = np.linspace(0.01, 1, 100)\n N = len(t)\n order = 9\n\n perm1 = np.random.permutation(N)\n x1 = t[perm1]\n y1 = np.sin(2 * np.pi * x1) + sigma * np.random.randn(N)\n PHI1 = polybasis(x1, order)\n\n # train the model\n # w, b = ridgereg(y1, PHI1, 1e-4)\n # w, b = bayesreg(y1, PHI1)\n w, b = bardreg(y1, PHI1)\n\n # generate testing samples\n perm2 = np.random.permutation(N)\n x2 = t[perm2]\n y2 = np.sin(2 * np.pi * x2) + sigma * np.random.randn(N)\n PHI2 = polybasis(x2, order)\n\n # predict\n yp = np.matmul(PHI2, w) + b\n\n tt = np.sin(2 * np.pi * t)\n PHIt = polybasis(t, order)\n tp = np.matmul(PHIt, w) + b\n\n # visualization\n plt.figure(1)\n plt.plot(t, tt, '-r')\n plt.plot(t, tp, '-b')\n plt.plot(x1, y1, 'ob')\n plt.plot(x2, y2, 'og')\n plt.legend(['standard sin(x)', 'predicted curve', 'trainset', 'testset'])\n plt.show(block=True)\n","sub_path":"regressor_demo.py","file_name":"regressor_demo.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"647903958","text":"from coapthon.client.helperclient import HelperClient\r\nimport time\r\n\r\ndef get_file():\r\n file_name = \"10MB_r\"\r\n\r\n file = open(file_name, 'wb')\r\n client=HelperClient(server=('127.0.0.1',5683))\r\n start_time=time.time()\r\n response= client.get('basic/')\r\n file.write(str(response.payload).encode(\"ISO-8859-1\"))\r\n total_time=time.time()-start_time\r\n client.stop()\r\n return(total_time)\r\n","sub_path":"HW3/CoAP/client_prog.py","file_name":"client_prog.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"182250767","text":"from .db import postgres_database\nfrom .models import Product\nimport settings\n\n\nclass PostgresWorker():\n def __init__(self):\n postgres_database.connect()\n\n def create_instance(self, name, price, description):\n self._create_table_if_not_exist()\n query = Product.select().where(Product.name == name)\n\n if query.exists():\n instance = query.get()\n self._update_instance(instance, price, description)\n else:\n instance = Product.create(\n name=name,\n price=price,\n description=description\n )\n return instance\n\n def _update_instance(self, pr, price, description):\n pr.price = price\n pr.description = description\n pr.save()\n\n def _create_table_if_not_exist(self):\n if settings.POSTGRES_TABLE_NAME not in postgres_database.get_tables():\n Product.create_table()\n","sub_path":"rabbitmq/data_to_postgres/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"245729358","text":"# Copyright (c) 2021 CNES\n#\n# All rights reserved. Use of this source code is governed by a\n# BSD-style license that can be found in the LICENSE file.\nimport os\nimport netCDF4\nimport numpy as np\nimport pytest\nimport pyinterp\nimport pyinterp.fill\n\nGRID = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"dataset\",\n \"mss.nc\")\n\n\ndef load_data(cube=False):\n ds = netCDF4.Dataset(GRID)\n x_axis = pyinterp.Axis(ds.variables[\"lon\"][::5], is_circle=True)\n y_axis = pyinterp.Axis(ds.variables[\"lat\"][::5])\n mss = ds.variables[\"mss\"][::5, ::5].T\n mss[mss.mask] = float(\"nan\")\n if cube:\n z_axis = pyinterp.Axis(np.arange(2))\n mss = np.stack([mss.data] * len(z_axis)).transpose(1, 2, 0)\n return pyinterp.grid.Grid3D(x_axis, y_axis, z_axis, mss)\n return pyinterp.grid.Grid2D(x_axis, y_axis, mss.data)\n\n\ndef test_loess():\n grid = load_data()\n filled0 = pyinterp.fill.loess(grid, num_threads=0)\n filled1 = pyinterp.fill.loess(grid, num_threads=1)\n data = np.copy(grid.array)\n data[np.isnan(data)] = 0\n filled0[np.isnan(filled0)] = 0\n filled1[np.isnan(filled1)] = 0\n assert (filled0 - filled1).mean() == 0\n assert np.ma.fix_invalid(grid.array - filled1).mean() == 0\n assert (data - filled1).mean() != 0\n\n with pytest.raises(ValueError):\n pyinterp.fill.loess(grid, value_type=\"x\")\n\n\ndef test_gauss_seidel():\n grid = load_data()\n _, filled0 = pyinterp.fill.gauss_seidel(grid, num_threads=0)\n _, filled1 = pyinterp.fill.gauss_seidel(grid, num_threads=1)\n _, filled2 = pyinterp.fill.gauss_seidel(grid,\n first_guess='zero',\n num_threads=0)\n data = np.copy(grid.array)\n data[np.isnan(data)] = 0\n filled0[np.isnan(filled0)] = 0\n filled1[np.isnan(filled1)] = 0\n filled2[np.isnan(filled2)] = 0\n assert (filled0 - filled1).mean() == 0\n assert np.ma.fix_invalid(grid.array - filled1).mean() == 0\n assert (data - filled1).mean() != 0\n assert (filled2 - filled1).mean() != 0\n\n with pytest.raises(ValueError):\n pyinterp.fill.gauss_seidel(grid, '_')\n\n x_axis = pyinterp.Axis(np.linspace(-180, 180, 10), is_circle=True)\n y_axis = pyinterp.Axis(np.linspace(-90, 90, 10), is_circle=False)\n data = np.random.rand(len(x_axis), len(y_axis))\n grid = pyinterp.Grid2D(x_axis, y_axis, data)\n _, filled0 = pyinterp.fill.gauss_seidel(grid, num_threads=0)\n assert isinstance(filled0, np.ndarray)\n\n\ndef test_loess_3d():\n grid = load_data(True)\n mask = np.isnan(grid.array)\n filled0 = pyinterp.fill.loess(grid, num_threads=0)\n filled0[mask] = np.nan\n assert np.nanmean(filled0 - grid.array) == 0\n\n with pytest.raises(ValueError):\n pyinterp.fill.loess(grid, num_threads=0, nx=0, ny=1)\n\n with pytest.raises(ValueError):\n pyinterp.fill.loess(grid, num_threads=0, nx=1, ny=0)\n\n\ndef test_gauss_seidel_3d():\n grid = load_data(True)\n _, filled0 = pyinterp.fill.gauss_seidel(grid, num_threads=0)\n assert (filled0[:, :, 0] - filled0[:, :, 1]).mean() == 0\n","sub_path":"tests/test_fill.py","file_name":"test_fill.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"159661436","text":"from __future__ import print_function\nfrom logging import getLogger\nfrom infi.pyutils.contexts import contextmanager\nfrom infi.traceback import traceback_decorator\nfrom .util import LOGGING_FORMATTER_KWARGS, get_timestamp, get_platform_name\n\nlogger = getLogger(__name__)\n\n@contextmanager\ndef create_logging_handler_for_collection(tempdir, prefix):\n from sys import maxsize\n from os import path\n from logging import FileHandler, DEBUG, Formatter\n from logging.handlers import MemoryHandler\n target = FileHandler(path.join(tempdir, \"{}.{}.debug.log\".format(prefix, get_timestamp())))\n target.setFormatter(Formatter(**LOGGING_FORMATTER_KWARGS))\n handler = MemoryHandler(maxsize, target=target)\n handler.setLevel(DEBUG)\n try:\n yield handler\n finally:\n handler.close()\n\n@contextmanager\ndef create_temporary_directory_for_log_collection(prefix):\n from tempfile import mkdtemp\n from shutil import rmtree\n tempdir = mkdtemp(prefix=\"{}-logs\".format(prefix))\n def onerror(function, path, exc_info):\n logger.debug(\"Failed to delete {!r}\".format(path))\n try:\n yield tempdir\n finally:\n rmtree(tempdir, onerror=onerror)\n\n\ndef get_tar_path(prefix, optional_archive_path):\n from os import close, remove, path\n from tempfile import mkstemp\n fd, archive_path = mkstemp(suffix=\".tar.gz\", prefix=\"{}-logs.{}-\".format(prefix, get_timestamp()))\n close(fd)\n remove(archive_path)\n \n if optional_archive_path is None:\n return archive_path\n\n if path.isdir(optional_archive_path):\n return path.join(optional_archive_path, path.basename(archive_path))\n \n return optional_archive_path\n \n\n@contextmanager\ndef log_collection_context(logging_memory_handler, tempdir, prefix, optional_archive_path=None):\n from logging import root, DEBUG\n path = get_tar_path(prefix, optional_archive_path)\n root.addHandler(logging_memory_handler)\n root.setLevel(DEBUG)\n try:\n yield path\n finally:\n with open_archive(path) as archive:\n logging_memory_handler.flush()\n logging_memory_handler.close()\n add_directory(archive, tempdir)\n print(\"Logs collected successfully to {!r}\".format(path))\n\n@contextmanager\ndef open_archive(path):\n from tarfile import TarFile\n archive = TarFile.open(name=path, mode=\"w:gz\", bufsize=16*1024)\n try:\n yield archive\n finally:\n archive.close()\n\ndef workaround_issue_10760(srcdir):\n # WORKAROUND for http://bugs.python.org/issue10760\n # Python's TarFile has issues with files have less data than the reported size\n # The workaround we did back in 2010 was to wrap TarFile objects with methods that work around that case,\n # But due to the structure of tar files, the workaround was a bit cumbersome\n # This time around, since we're already copying aside what we want to put in the archive, we can fix the files\n # before adding them to the archive\n from os import path, walk, stat\n for dirpath, dirnames, filenames in walk(srcdir):\n for filename in filenames:\n filepath = path.join(dirpath, filename)\n expected = stat(filepath).st_size\n actual = 0\n with open(filepath, 'rb') as fd:\n bytes_read = len(fd.read(512))\n while bytes_read == 512:\n bytes_read = len(fd.read(512))\n actual =+ bytes_read\n if actual < expected:\n with open(filepath, 'ab') as fd:\n fd.write('\\x00' * (expected-actual))\n\n\ndef add_directory(archive, srcdir):\n from os.path import basename\n workaround_issue_10760(srcdir)\n archive.add(srcdir, basename(srcdir))\n\n\ndef collect(item, tempdir, timestamp, delta):\n from colorama import Fore\n logger.info(\"Collecting {!r}\".format(item))\n print(\"Collecting {} ... \".format(item), end='')\n try:\n item.collect(tempdir, timestamp, delta)\n logger.info(\"Collected {!r} successfully\".format(item))\n print(Fore.GREEN + \"ok\" + Fore.RESET)\n return True\n except:\n logger.exception(\"An error ocurred while collecting {!r}\".format(item))\n print(Fore.MAGENTA + \"error\" + Fore.RESET)\n return False\n\n@traceback_decorator\ndef run(prefix, items, timestamp, delta, optional_archive_path=None):\n end_result = True\n with create_temporary_directory_for_log_collection(prefix) as tempdir:\n with create_logging_handler_for_collection(tempdir, prefix) as handler:\n with log_collection_context(handler, tempdir, prefix, optional_archive_path) as archive_path:\n logger.info(\"Starting log collection\")\n for item in items:\n result = collect(item, tempdir, timestamp, delta)\n end_result = end_result and result\n end_result = 0 if end_result else 1\n return end_result, archive_path\n\n\n# TODO A web frontend that parser log collections\n# Get by ftp\n# Web frontend:\n# additional metadata -- description, tags, resolved (t/f)\n# sortable (customer, date, resolved/not)\n# link to JIRA\n# delete/bulk-delete\n# authentication\n# automatic analysis (e.g. not most recent version of power tools)\n# View:\n# links to extracted files\n","sub_path":"src/infi/logs_collector/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"292102092","text":"import base64\nimport codecs\n\n\n# If receiving from LoRa Server use this method to decode payload from base64 to hex\ndef base64_to_hex(base64_data):\n \"\"\"Takes base64 encoded string and returns hex string\n\n :param base64_data: base64 encoded string\n :return: decoded hex string\n \"\"\"\n missing_padding = len(base64_data) % 4\n if missing_padding != 0:\n base64_data += b'=' * (4 - missing_padding)\n value = codecs.encode(base64.b64decode(base64_data), 'hex')\n return value.decode('utf-8').upper()\n\n\ndef hex_to_int(hex_string):\n \"\"\"Returns hex_string converted to int. Method can work with signed 2's complement any length.\n\n :param hex_string: hex string. Example 'DEADBEEF' or 'deadbeef'.\n :return: int representation of hex_string\n \"\"\"\n # get total number of bits to be able to extract MSB. If MSB=1 number is signed\n bits = len(bytearray.fromhex(hex_string)) * 8\n val = int('0x' + hex_string, 16)\n # get MSB and if MSB = 1 (means number is signed) - take 2's compliment\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val\n\n\ndef digital_input_output_presence_illuminance(data):\n \"\"\"Digital Output/Input/Presence/Illuminance | Data Resolution per bit = 1\n All these values are the same in decoding.\n\n :param data: hex string of sensor value\n :return: int decoded value\n \"\"\"\n return hex_to_int(data)\n\n\ndef analog_input_output(data):\n \"\"\"Analog Input/Output | Data Resolution per bit = 0.01 Signed\n\n :param data: hex string of sensor value\n :return: int decoded value\n \"\"\"\n return hex_to_int(data) / 100\n\n\ndef temperature(data):\n \"\"\"Temperature | Data Resolution per bit = 0.1 °C Signed MSB\n\n :param data: hex string of sensor value\n :return: int decoded value\n \"\"\"\n return hex_to_int(data) / 10\n\n\ndef humidity(data):\n \"\"\"Humidity | Data Resolution per bit = 0.5 % Unsigned\n\n :param data: hex string of sensor value\n :return: int decoded value\n \"\"\"\n return hex_to_int(data) / 2\n\n\ndef accelerometer(data):\n \"\"\"Accelerometer | \tData Resolution per bit = 0.001 G Signed MSB per axis\n Data Size: 6 bytes. x axis value = 2 bytes, y axis value = 2 bytes, z axis value = 2 bytes.\n Example: 04 D2 FB 2E 00 00 --> 04D2 - x, FB2E - y, 0000 - z.\n\n :param data: hex string of sensor value\n :return: dictionary of x,y,z axis as keys and their values\n \"\"\"\n return {'x': hex_to_int(data[:4]) / 1000, 'y': hex_to_int(data[4:8]) / 1000, 'z': hex_to_int(data[8:]) / 1000}\n\n\ndef barometer(data):\n \"\"\"Barometer | Data Resolution per bit = 0.1 hPa Unsigned MSB\n\n :param data: hex string of sensor value\n :return: int decoded value\n \"\"\"\n return hex_to_int(data) / 10\n\n\ndef gyrometer(data):\n \"\"\"Gyrometer | \tData Resolution per bit = 0.01 °/s Signed MSB per axis\n Data Size: 6 bytes. x axis value = 2 bytes, y axis value = 2 bytes, z axis value = 2 bytes.\n Example: 04 D2 FB 2E 00 00 --> 04D2 - x, FB2E - y, 0000 - z.\n\n :param data: hex string of sensor value\n :return: dictionary of x,y,z axis as keys and their values\n \"\"\"\n return {'x': hex_to_int(data[:4]) / 100, 'y': hex_to_int(data[4:8]) / 100, 'z': hex_to_int(data[8:]) / 100}\n\n\ndef gps_location(data):\n \"\"\"GPS Location | Data Resolution per bit below\n\n * Latitude : 0.0001 ° Signed MSB\n * Longitude : 0.0001 ° Signed MSB\n * Altitude : 0.01 meter Signed MSB\n\n :param data: hex string of sensor value\n :return: dictionary of lat,long,alt as key and their values\n \"\"\"\n return {'lat': hex_to_int(data[:6]) / 10000, 'long': hex_to_int(data[6:12]) / 10000, 'alt': hex_to_int(data[12:]) / 100}\n\n\nhex_library = {\n \"00\": {\n \"name\": \"Digital Input\",\n \"size\": 2,\n \"action\": digital_input_output_presence_illuminance\n },\n \"01\": {\n \"name\": \"Digital Output\",\n \"size\": 2,\n \"action\": digital_input_output_presence_illuminance\n },\n \"02\": {\n \"name\": \"Analog Input\",\n \"size\": 4,\n \"action\": analog_input_output\n },\n \"03\": {\n \"name\": \"Analog Output\",\n \"size\": 4,\n \"action\": analog_input_output\n },\n \"65\": {\n \"name\": \"Illuminance Sensor\",\n \"size\": 4,\n \"action\": digital_input_output_presence_illuminance\n },\n \"66\": {\n \"name\": \"Presence Sensor\",\n \"size\": 2,\n \"action\": digital_input_output_presence_illuminance\n },\n \"67\": {\n \"name\": \"Temperature Sensor\",\n \"size\": 4,\n \"action\": temperature\n },\n \"68\": {\n \"name\": \"Humidity Sensor\",\n \"size\": 2,\n \"action\": humidity\n },\n \"71\": {\n \"name\": \"Accelerometer\",\n \"size\": 12,\n \"action\": accelerometer\n },\n \"73\": {\n \"name\": \"Barometer\",\n \"size\": 4,\n \"action\": barometer\n },\n \"86\": {\n \"name\": \"Gyrometer\",\n \"size\": 12,\n \"action\": gyrometer\n },\n \"88\": {\n \"name\": \"GPS Location\",\n \"size\": 18,\n \"action\": gps_location\n }\n}\n","sub_path":"python_cayennelpp/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"410710485","text":"#! /usr/bin/python3\n#-#coding:UTF-8#-#\n# Authorr: weber\n# Date:2018-05-25\n\nadict = []\n\nfh = open('mir.collapse.fa', 'w+') \nfor line in open('mir.collapse'):\n\tlines = line.strip().split()#split()默认是所有的空字符,包括空格、换行(\\n)、制表符(\\t)等。\n\tadict.append(lines)\t\n#print(adict)\ni = 1\nfor x in adict[1:]:\n\tfh.write('>ESB_'+str(i)+'_x'+x[1]+'\\n')\n\tfh.write(x[0]+'\\n')\n\ti += 1\n\t\nfh.close()\n","sub_path":"collapsemiRNAreads_1.py","file_name":"collapsemiRNAreads_1.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"391995854","text":"# given a and b, return a ** b, without using the inbuilt operator\n\n# understand\n## one way: a single number being multiplied by itself\n## Valid and invalid inputs\n### invalid: letters\n### valid: any number whatsoever\n### maybe no negative numbers for b? 2 ** -2 = 1/4\n#### 1/( 2 ** 2 )\n\n### Square root with a power: raise to 1/2, 0.5\n### i = sqrt(-1) == (-1)**0.5\n\n## Anything to the power of 0 = 1\n### Let's not handle decimal numbers for b\n\n## Plan\n### We have two numbers, one or both may be negative\n\n### Iterative or recursive?\n\n### 2^3\n### a = 2, b = 3\n### Iterative pseudocode\n#### Check if b == 0, if so return 1\n#### Have a total = 0\n#### While loop: while b isn't 1\n##### multiply a by itself\n##### decrement b to approach 1\n#### return total\n\ndef iter_power_v1(a, b):\n\tif b == 0:\n\t\treturn 1\n\ttotal = a\n\twhile b != 1:\n\t\ttotal *= a\n\t\tb -= 1\n\treturn total\n\n# print(iter_power_v1(2, 3) == (2 ** 3))\n# print(iter_power_v1(10, 2) == (10 ** 2))\n# print(iter_power_v1(5, 7) == (5 ** 7))\n# print(iter_power_v1(10, 0) == (10 ** 0))\n# print(iter_power_v1(2, 1) == 2)\n# print(iter_power_v1(100, 1) == 100)\n\n## Review\n### Handled values for a and b successfully\n### And values of 0 for b!\n### But not negative\n\n## negative values for b\n## decimal value for b\n\n### Iterative pseudocode\n#### Check if b is integer, otherwise return error message\n#### Check if b == 0, if so return 1\n#### check if b < 0: if so, multiply by -1 and set invert to true\n\n#### Have a total == a\n#### While loop: while b isn't 1\n##### multiply a by itself\n##### decrement b to approach 1\n#### return total\n\ndef iter_power_v2(a, b):\n\tinvert = False\n\tif type(b) is not int:\n\t\treturn \"Sorry, we don't handle decimals.\"\n\tif b == 0:\n\t\treturn 1\n\tif b < 0:\n\t\tb *= -1\n\t\tinvert = True\n\ttotal = a\n\twhile b != 1:\n\t\ttotal *= a\n\t\tb -= 1\n\n\tif invert:\n\t\treturn 1 / total\n\telse:\n\t\treturn total\n\nprint(iter_power_v2(2, 3) == (2 ** 3))\nprint(iter_power_v2(10, 2) == (10 ** 2))\nprint(iter_power_v2(5, 7) == (5 ** 7))\nprint(iter_power_v2(10, 0) == (10 ** 0))\nprint(iter_power_v2(2, -2) == (2 ** -2))\nprint(iter_power_v2(2, 1) == 2)\nprint(iter_power_v2(100, 1) == 100)\n","sub_path":"powers.py","file_name":"powers.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"320897057","text":"# coding: utf-8\n\nimport sys\nimport math\ninlist = sys.stdin.read().strip().split()\n\ndef calclis(lis):\n n=0\n if lis[1]<100:\n n=0\n else:\n\n if lis[0]==0:\n n = math.ceil(lis[1]//100)*5\n\n elif lis[0]==1:\n n = math.ceil(lis[1]//100)*3\n\n elif lis[0]==2:\n n = math.ceil(lis[1]//100)*2\n\n elif lis[0]==3:\n n = math.ceil(lis[1]//100)*1\n else:\n n=\"え?\"\n print(n)\n return n\n\nnumlist = list(map(lambda x:int(x), inlist))\nnumlist = numlist[1:]\ncooklis = [numlist[x:x+2] for x in range(0,len(numlist),2)]\nsortedlis = sorted(cooklis, key=lambda n:n[0])\nsumlis = 0\nfor s in sortedlis:\n print(\"sumlis------\")\n sumlis+=calclis(s)\n print(sumlis)\n print(\"------sumlis\")\n\nprint(sumlis)\n","sub_path":"fxxxxxckU/22330cc.py","file_name":"22330cc.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546763757","text":"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nCommon NHWC padding ops\n\"\"\"\nimport itertools\nfrom typing import List\n\nimport jinja2\n\nfrom aitemplate import backend\nfrom aitemplate.backend import registry\nfrom aitemplate.compiler.base import Operator, Tensor\nfrom aitemplate.utils import shape_utils\n\n# pylint: disable=C0103,W0221\n\n\nSHAPE_ASSIGNMENT_TEMPLATE = jinja2.Template(\n \"\"\"\n{{indent}}{{y_dim0}} = NO;\n{{indent}}{{y_dim1}} = HO;\n{{indent}}{{y_dim2}} = WO;\n\"\"\"\n)\n\n\nclass nhwc_pad_common(Operator):\n \"\"\"\n Pad the 3-channel input data to 4/8-channel.\n \"\"\"\n\n def __init__(self, shape_func_template, padded_channels):\n super().__init__()\n self._attrs[\"op\"] = f\"nhwc3to{padded_channels}\"\n self.shape_eval_template = shape_func_template\n self.shape_save_template = SHAPE_ASSIGNMENT_TEMPLATE\n\n def _infer_shape(self, x: List[int]):\n eval_func = self.shape_eval_template.render(\n indent=\"\",\n dtype=\"\",\n x_dim0=x[0],\n x_dim1=x[1],\n x_dim2=x[2],\n x_dim3=x[3],\n )\n output = {}\n exec(eval_func, output) # noqa: P204\n return [\n int(output[\"NO\"]),\n int(output[\"HO\"]),\n int(output[\"WO\"]),\n int(output[\"CO\"]),\n ]\n\n def _infer_shapes(self, x: Tensor):\n x_shape_values = [var._attrs[\"values\"] for var in x._attrs[\"shape\"]]\n x_shapes = itertools.product(*x_shape_values)\n # run infershape for each\n y_shapes = []\n for x_shape in x_shapes:\n y_shape = self._infer_shape(x_shape)\n y_shapes.append(y_shape)\n\n def unique(vector):\n return sorted(set(vector))\n\n output_shape = [\n shape_utils.gen_int_var(unique([d[0] for d in y_shapes])),\n shape_utils.gen_int_var(unique([d[1] for d in y_shapes])),\n shape_utils.gen_int_var(unique([d[2] for d in y_shapes])),\n shape_utils.gen_int_var(unique([d[3] for d in y_shapes])),\n ]\n return output_shape\n\n def __call__(self, x: Tensor) -> List[Tensor]:\n self._attrs[\"inputs\"] = [x]\n self._set_depth()\n output_shape = self._infer_shapes(x)\n output = Tensor(output_shape, src_ops={self}, dtype=x.dtype())\n self._attrs[\"outputs\"] = [output]\n return output\n\n def _get_op_attributes(self):\n return {\n \"padded_channels\": self._attrs[\"op\"].split(\"to\")[-1],\n \"shape_func_template\": self.shape_eval_template,\n }\n\n def gen_function(self) -> str:\n target = backend.target.Target.current()\n template_path = target.template_path()\n func_key = \"{target}.{op}.gen_function\".format(\n target=target.name(), op=self._attrs[\"op\"]\n )\n func = registry.get(func_key)\n return func(\n self._attrs,\n template_path,\n self.shape_eval_template,\n self.shape_save_template,\n )\n","sub_path":"python/aitemplate/compiler/ops/padding/nhwc_pad_common.py","file_name":"nhwc_pad_common.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"618859721","text":"from matplotlib.backends.backend_pdf import PdfPages\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.misc import imread\r\nimport os\r\nimport numpy as np\r\n\r\nfiles = [ \"oracle.PNG\",\r\n \"SQL.jpg\" ]\r\ndef plotImage(f):\r\n folder = \"C:/temp/\"\r\n im = imread(os.path.join(folder, f)).astype(np.float32) / 255\r\n plt.imshow(im)\r\n a = plt.gca()\r\n a.get_xaxis().set_visible(False) # We don't need axis ticks\r\n a.get_yaxis().set_visible(False)\r\n\r\npp = PdfPages(\"c:/temp/page1.pdf\")\r\nplt.subplot(121)\r\nplotImage(files[0])\r\nplt.subplot(122)\r\nplotImage(files[1])\r\npp.savefig(plt.gcf()) # This generates page 1\r\npp.savefig(plt.gcf()) # This generates page 2\r\n\r\npp.close()\r\n","sub_path":"pdfcreation.py","file_name":"pdfcreation.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"77256035","text":"#!/usr/bin/python3\n\"\"\"Create a new view for Review objects\"\"\"\n\nfrom flask import jsonify, request, abort\nfrom models.state import State\nfrom models.review import Review\nfrom models import storage\nfrom api.v1.views import app_views\n\n\n@app_views.route('/places/<place_id>/reviews', methods=[\"GET\", \"POST\"],\n strict_slashes=False)\ndef list_reviews(place_id):\n \"\"\" Retrieves the list of all Review objects \"\"\"\n place = storage.get(\"Place\", place_id)\n if place is None:\n abort(404)\n if request.method == \"GET\":\n reviews = storage.all(\"Review\")\n all_review = []\n for key in reviews.values():\n if key.place_id == place_id:\n all_review.append(key.to_dict())\n return jsonify(all_review)\n if request.method == \"POST\":\n response = request.get_json()\n if response is None:\n abort(400, \"Not a JSON\")\n if \"user_id\" not in response:\n abort(400, \"Missing user_id\")\n user = storage.get(\"User\", response[\"user_id\"])\n if user is None:\n abort(404)\n if \"text\" not in response:\n abort(400, \"Missing text\")\n response[\"place_id\"] = place_id\n new_review = Review(**response)\n new_review.save()\n return jsonify(new_review.to_dict()), 201\n\n\n@app_views.route('/reviews/<review_id>', methods=[\"GET\", \"DELETE\", \"PUT\"],\n strict_slashes=False)\ndef review(review_id):\n \"\"\" Manipulate an specific Review \"\"\"\n review = storage.get(\"Review\", review_id)\n if review is None:\n abort(404)\n if request.method == \"GET\":\n return jsonify(review.to_dict())\n if request.method == \"DELETE\":\n storage.delete(review)\n storage.save()\n return jsonify({}), 200\n if request.method == \"PUT\":\n data = request.get_json()\n if data is None:\n abort(400, \"Not a JSON\")\n for key, value in data.items():\n if key not in ['id', 'created_at', 'updated_at', 'user_id',\n 'place_id']:\n setattr(review, key, value)\n storage.save()\n return jsonify(review.to_dict()), 200\n","sub_path":"api/v1/views/places_reviews.py","file_name":"places_reviews.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"642669212","text":"import time\nimport pandas as pd\nimport numpy as np\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n \n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = (input(\"Please enter the name of the city to analyze (options are 'chicago', 'new york city' or 'washington'): \\n\")).lower()\n \n # check user input for validity\n while city != 'chicago' and city != 'new york city' and city != 'washington': \n print(\"Invalid input.\")\n print(\"Valid input: chicaco, new york city, or washington.\")\n city = (input(\"Please enter either 'chicago', 'new york city' or 'washington': \\n\")).lower()\n \n # get user input for month (all, january, february, ... , june) \n month = (input(\"Please enter the name of the month to filter by ('january', 'february', 'march', 'april', 'may' or 'june'), or 'all' to apply no month filter: \\n\")).capitalize()\n \n # check user input for validity\n months = ('All', 'January', 'February', 'March', 'April', 'May', 'June')\n while month not in months:\n print(\"Invalid input.\")\n month = (input(\"Please enter either 'january', 'february', 'march', 'april', 'may', 'june' or 'all': \\n\")).capitalize()\n \n # get user input for day of week (all, monday, tuesday, ... sunday)\n day = (input(\"Please enter the name of the weekday to filter by (e.g. 'monday', 'tuesday', etc.) or 'all' to apply no day filter: \\n\")).capitalize()\n \n # check user input for validity\n weekdays = ('All', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday')\n while day not in weekdays:\n print(\"Invalid input.\")\n day = (input(\"Please enter the name of the weekday to filter by (e.g. 'monday', 'tuesday', etc.) or 'all' to apply no day filter: \\n\")).capitalize()\n\n print('-'*40)\n return city, month, day\n\n\ndef load_raw_data(city):\n \"\"\"\n Loads data for the specified city.\n\n Args:\n (str) city - name of the city to analyze\n Returns:\n df - Pandas DataFrame containing city data\n \"\"\"\n # Read data from CSV file into Pandas dataframe\n df = pd.read_csv(CITY_DATA[city])\n return df\n\n\ndef filter_data(raw_df,month,day):\n \"\"\"Filters the data based on user input and adds 'Hour' column to dataframe.\n \n Args:\n (dataframe) Pandas Dataframe raw_df - dataframe with data of the city that was asked for by the user\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n dataframe - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n \n # Create copy of raw dataframe\n df = raw_df.copy()\n \n # Convert start_time to datetime format\n df['start_time'] = pd.to_datetime(df['start_time'])\n \n # Add column for Start Month\n months = {1:'January', 2:'February', 3:'March', 4:'April', 5:'May', 6:'June', 7:'July', 8:'August', 9:'September', 10:'October', 11:'November', 12:'December'}\n df['Month'] = df['start_time'].dt.month\n df[\"Month\"].replace(months, inplace=True)\n \n # Add column for Start Day\n df['Day'] = df['start_time'].dt.day_name()\n \n # Add column for Start Hour\n df['Hour'] = df['start_time'].dt.hour\n \n # Filter dataframe on user input\n if month != 'All':\n df = df[df['Month'] == month]\n\n # filter by day of week if applicable\n if day != 'All':\n df = df[df['Day'] == day]\n \n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n popular_month = df['Month'].mode()[0]\n print('Most Popular Month: ', popular_month)\n\n # display the most common day of week\n popular_day = df['Day'].mode()[0]\n print('Most Popular Day: ', popular_day)\n\n # display the most common start hour\n popular_hour = df['Hour'].mode()[0]\n print('Most Popular Start Hour: ', popular_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('The Most Popular Start Station: ', popular_start_station,'\\n')\n\n # display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('The Most Popular End Station: ', popular_end_station,'\\n')\n\n # display most frequent combination of start station and end station trip\n df['Station Combination'] = 'Start Station: ' + df['Start Station'] +'\\n End Station: '+ df['End Station']\n popular_station_combination = df['Station Combination'].mode()[0]\n print('The Most Popular Start And End Station Combination:\\n', popular_station_combination)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n \n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('Total Travel Time: ', total_travel_time)\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('Mean Travel Time: ', mean_travel_time)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_type_count = df['User Type'].value_counts()\n print(\"Counts of User Types:\\n\",user_type_count,'\\n')\n\n # Display counts of gender\n if 'Gender' in df.columns:\n gender_count = df['Gender'].value_counts()\n print(\"Counts of Gender:\\n\",gender_count,'\\n')\n\n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n earliest_birth_year = df['Birth Year'].min()\n most_recent_birth_year = df['Birth Year'].max()\n most_common_birth_year = df['Birth Year'].mode()[0]\n \n print(\"Earliest Birth Year of Customers: \",earliest_birth_year)\n print(\"Most Recent Birth Year of Customers: \",most_recent_birth_year)\n print(\"Most Common Birth Year of Customers: \",most_common_birth_year)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n raw_df = load_raw_data(city)\n df = filter_data(raw_df,month,day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n \n # Ask the user if they want to see 5 lines of raw data\n show_raw = input('\\nWould you like to see 5 lines of raw data? Enter yes or no.\\n')\n while show_raw == 'yes':\n print(raw_df.head())\n show_raw = input('\\nWould you like to see 5 lines of raw data? Enter yes or no.\\n')\n \n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":8049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"465074800","text":"import subprocess\nimport asyncio\nimport inspect\nimport base64\nimport sys\nimport re\nimport os\n\n\nimport discord\n\nfrom discord.ext import commands\n\nfrom cogs.util import checks\nfrom cogs.util.categories import category\n\n\nclass Misc:\n def __init__(self, bot):\n self.bot = bot\n\n @category('misc')\n @commands.command()\n async def id(self, ctx):\n '''Get your user id'''\n await ctx.send('<@{0}>, your ID is `{0}`'.format(ctx.author.id))\n\n @category('bot')\n @commands.command()\n async def joinserver(self, ctx):\n '''Invite the bot to your server'''\n await ctx.send('Sorry. This bot has been designed to only work on HTC.')\n\n @category('bot')\n @commands.command()\n async def setname(self, ctx, *, name):\n '''Change the bot's username'''\n try:\n await self.bot.user.edit(username=name)\n except discord.HTTPException:\n await ctx.send('Changing the name failed.')\n\n @category('bot')\n @commands.command()\n async def setnick(self, ctx, *, name):\n '''Change the bot's nickname'''\n try:\n await ctx.guild.get_member(self.bot.user.id).edit(nick=name)\n except discord.HTTPException:\n await ctx.send('Changing the name failed.')\n\n @category('bot')\n @commands.command()\n async def setavatar(self, ctx):\n '''Change the bot's profile picture'''\n attachment = ctx.message.attachments[0]\n await attachment.save(attachment.filename)\n try:\n with open(attachment.filename, 'rb') as avatar:\n await self.bot.user.edit(avatar=avatar.read())\n except discord.HTTPException:\n await ctx.send('Changing the avatar failed.')\n except discord.InvalidArgument:\n await ctx.send('You did not upload an image.')\n\n @category('bot')\n @commands.command(aliases=['shutdown'])\n async def die(self, ctx):\n \"\"\"Shuts down the bot\"\"\"\n ctx.bot.dying = True\n await ctx.send(':wave:')\n await ctx.bot.logout()\n\n @category('bot')\n @commands.command()\n async def restart(self, ctx):\n '''Restart the bot'''\n ctx.bot.dying = True\n await ctx.send('Shutting down the bot. If the bot is in a restart loop, it will start back up.\\nPlease use `{}die` in future as it is a more accurate command.'.format(ctx.prefix))\n await ctx.bot.logout()\n\n @category('comp')\n @commands.command(aliases=['startcomp'])\n @checks.not_dm()\n async def start_comp(self, ctx):\n '''Start a competition'''\n if self.bot.like_comp_active:\n return await ctx.send('There is already a competition going on.')\n self.bot.like_comp_active = True\n self.bot.like_comp = {}\n await ctx.send('A like competition has been started! Woot?')\n\n @category('comp')\n @commands.command(aliases=['cancelcomp'])\n @checks.not_dm()\n async def cancel_comp(self, ctx):\n '''Cancel any current competitions'''\n if not self.bot.like_comp_active:\n return await ctx.send('There isn\\'t a competition going on..')\n self.bot.like_comp_active = False\n self.bot.like_comp = {}\n await ctx.send('The like competition has been canceled.')\n\n @category('comp')\n @commands.command(aliases=['endcomp'])\n @checks.not_dm()\n async def end_comp(self, ctx):\n '''End the current competition'''\n if not self.bot.like_comp_active:\n return await ctx.send('There isn\\'t a competition going on..')\n self.bot.like_comp_active = False\n\n m = 'The like competition has ended.\\n**Results:**\\n'\n likes = []\n for user in self.bot.like_comp:\n for song in self.bot.like_comp[user]:\n likes.append((user, song, len(self.bot.like_comp[user][song])))\n likes.sort(key=lambda x:x[2], reverse=True)\n\n m += '\\n'.join('`{}`: **{}** with the song **{}** and **{} like{}**'.format(n + 1, i[0], i[1], i[2], 's' if i[2] != 1 else '') for n, i in enumerate(likes[:10]))\n\n self.bot.like_comp = {}\n await ctx.send(m)\n\n @category('misc')\n @commands.command(aliases=['mostliked', 'most_likes', 'mostlikes'])\n async def most_liked(self, ctx):\n '''Get the top 10 most liked songs of all time'''\n likes = {}\n for i in self.bot.likes:\n for j in self.bot.likes[i]:\n j = base64.b64decode(j.encode('ascii')).decode('utf-8')\n if j not in likes:\n likes[j] = 0\n likes[j] += 1\n likes = list(likes.items())\n likes.sort(key=lambda x:x[1], reverse=True)\n likes\n m = '**The top 10 most liked songs of all time are:**\\n'\n m += '\\n'.join('{} ({} like{})'.format(i[0], i[1], 's' if i[1] != 1 else '') for i in likes[:10])\n await ctx.send(m)\n\n @category('misc')\n @commands.command(aliases=['permissions'])\n @checks.not_dm()\n async def perms(self, ctx):\n '''View your permissions'''\n perms = await checks.permissions_for(ctx)\n whitelist = []\n vc_only = []\n perms = await checks.permissions_for(ctx)\n cats = {}\n for cmd in ctx.bot.commands:\n if not hasattr(cmd, 'category'):\n cmd.category = 'Misc'\n if cmd.category.lower() not in cats:\n cats[cmd.category.lower()] = []\n cats[cmd.category.lower()].append(cmd)\n \n print(cats)\n for cat in perms['categories']:\n if cat in cats:\n for cmd in cats[cat]:\n for check in cmd.checks:\n try:\n if not await check(ctx):\n break\n except Exception as e:\n if 'user_in_vc' in e.args:\n vc_only.append(cmd.name)\n break\n else:\n whitelist.append(cmd.name)\n m = '```yaml\\n'\n m += 'Command_Whitelist: {}\\n'.format(', '.join(whitelist))\n if len(vc_only)>0: m += 'VC_only: {}\\n'.format(', '.join(vc_only))\n m += 'Max_Song_Length: {}\\n'.format(perms['max_song_length'])\n m += 'Max_Songs: {}\\n'.format(perms['max_songs_queued'])\n m += '```'\n await ctx.author.send(m)\n\n @category('misc')\n @commands.command()\n @checks.not_dm()\n async def listids(self, ctx):\n '''Get all of the IDs for the current server'''\n data = 'Your ID: {}\\n\\n'.format(ctx.author.id)\n\n data += 'Text Channel IDs:\\n'\n for c in ctx.guild.channels:\n if isinstance(c, discord.TextChannel):\n data += '{}: {}\\n'.format(c.name, c.id)\n\n data += '\\nVoice Channel IDs:\\n'\n for c in ctx.guild.channels:\n if isinstance(c, discord.VoiceChannel):\n data += '{}: {}\\n'.format(c.name, c.id)\n\n data += '\\nRole IDs:\\n'\n for r in ctx.guild.roles:\n data += '{}: {}\\n'.format(r.name, r.id)\n\n data += '\\nUser IDs:\\n'\n if ctx.guild.large:\n await self.bot.request_offline_members(ctx.guild)\n for m in ctx.guild.members:\n data += '{}: {}\\n'.format(m.name, m.id)\n\n filename = '{}-ids-all.txt'.format(\"\".join([x if x.isalnum() else \"_\" for x in ctx.guild.name]))\n\n with open(filename, 'wb') as ids_file:\n ids_file.write(data.encode('utf-8'))\n\n await ctx.send(':mailbox_with_mail:')\n with open(filename, 'rb') as ids_file:\n await ctx.author.send(file=discord.File(ids_file))\n\n os.remove(filename)\n\n @category('modding')\n @commands.command()\n async def bldump(self, ctx):\n '''Gets a list of every blacklisted user.'''\n\n m = '**Blacklisted users:\\n**'\n m += '\\n'.join(str(i) for i in self.bot.blacklist)\n await ctx.author.send(m)\n await ctx.send(':mailbox_with_mail:')\n\n @category('modding')\n @commands.command()\n async def blacklist(self, ctx, mode, id):\n \"\"\"Blacklist a user from using commands\"\"\"\n mode = mode.lower()\n if mode not in ['+', '-', 'add', 'remove']:\n await ctx.send('Usage: `{}blacklist [+|-|add|remove] <user id>`'.format(ctx.prefix))\n return\n\n try:\n id = int(id)\n except ValueError:\n await ctx.send('Usage: `{}blacklist [+|-|add|remove] <user id>`'.format(ctx.prefix))\n return\n\n if mode in ['+', 'add']:\n user = ctx.guild.get_member(id)\n if user is None or not user.permissions_in(ctx.channel).manage_channels:\n if id not in self.bot.blacklist:\n self.bot.blacklist.append(id)\n self.bot.save_bl()\n await ctx.send('The user with the id `{}` has been blacklisted.'.format(id))\n else:\n await ctx.send('The user with the id `{}` has already been blacklisted.'.format(id))\n else:\n await ctx.send('You can\\'t blacklist someone with `Manage Channels`. Please ask a developer if you *must* blacklist them.')\n else:\n if id not in self.bot.blacklist:\n await ctx.send('`{}` isn\\'t in the blacklist.'.format(id))\n else:\n while id in self.bot.blacklist:\n self.bot.blacklist.remove(id)\n self.bot.save_bl()\n await ctx.send('The user with the id `{}` has been removed from the blacklist.'.format(id))\n\n @category('git')\n @commands.command(aliases=['git_pull'])\n async def update(self, ctx):\n '''Updates the bot from git'''\n\n await ctx.send(':warning: Warning! Pulling from git!')\n\n if sys.platform == 'win32':\n process = subprocess.run('git pull', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.stdout, process.stderr\n else:\n process = await asyncio.create_subprocess_exec('git', 'pull', stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = await process.communicate()\n stdout = stdout.decode().splitlines()\n stdout = '\\n'.join('+ ' + i for i in stdout)\n stderr = stderr.decode().splitlines()\n stderr = '\\n'.join('- ' + i for i in stderr)\n\n await ctx.send('`Git` response: ```diff\\n{}\\n{}```'.format(stdout, stderr))\n await ctx.send('These changes will only come into effect next time you restart the bot. Use `{0}die` or `{0}restart` now (or later) to do that.'.format(ctx.prefix))\n \n @category('git')\n @commands.command()\n async def revert(self, ctx, commit):\n '''Revert local copy to specified commit'''\n\n await ctx.send(':warning: Warning! Reverting!')\n\n if sys.platform == 'win32':\n process = subprocess.run('git reset --hard {}'.format(commit), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.stdout, process.stderr\n else:\n process = await asyncio.create_subprocess_exec('git', 'reset', '--hard', commit, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = await process.communicate()\n stdout = stdout.decode().splitlines()\n stdout = '\\n'.join('+ ' + i for i in stdout)\n stderr = stderr.decode().splitlines()\n stderr = '\\n'.join('- ' + i for i in stderr)\n\n await ctx.send('`Git` response: ```diff\\n{}\\n{}```'.format(stdout, stderr))\n await ctx.send('These changes will only come into effect next time you restart the bot. Use `{0}die` or `{0}restart` now (or later) to do that.'.format(ctx.prefix))\n \n @category('git')\n @commands.command(aliases=['gitlog'])\n async def git_log(self, ctx, commits:int = 20):\n '''Shows the latest commits. Defaults to 20 commits.'''\n\n if sys.platform == 'win32':\n process = subprocess.run('git log --pretty=oneline --abbrev-commit', shell=True, \n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = process.stdout, process.stderr\n else:\n process = await asyncio.create_subprocess_exec('git', 'log', '--pretty=oneline', '--abbrev-commit', \n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = await process.communicate()\n stdout = stdout.decode().splitlines()\n stdout = '\\n'.join('+ ' + i[:90] for i in stdout[:commits])\n stderr = stderr.decode().splitlines()\n stderr = '\\n'.join('- ' + i for i in stderr)\n \n if commits > 10:\n try:\n await ctx.author.send('`Git` response: ```diff\\n{}\\n{}```'.format(stdout, stderr))\n except discord.errors.HTTPException:\n import os\n with open('gitlog.txt', 'w') as log_file:\n log_file.write('{}\\n{}'.format(stdout,stderr))\n with open('gitlog.txt', 'r') as log_file:\n await ctx.author.send(file=discord.File(log_file))\n os.remove('gitlog.txt')\n else:\n await ctx.send('`Git` response: ```diff\\n{}\\n{}```'.format(stdout, stderr))\n\n \n @category('bot')\n @commands.command(aliases=['exception'])\n async def error(self, ctx, *, text: str = None):\n '''Raises an error. Testing purposes only, please don't use.'''\n raise Exception(text or 'Woo! Errors!')\n\n @category('misc')\n @commands.command()\n async def help(self, ctx, *args):\n '''This help message'''\n cmds = {i for i in ctx.bot.all_commands.values()}\n\n if len(args) == 0:\n d = ''#'**TWOWBot help:**'\n\n cats = {}\n for cmd in cmds:\n if not hasattr(cmd, 'category'):\n cmd.category = 'Misc'\n if cmd.category not in cats:\n cats[cmd.category] = []\n cats[cmd.category].append(cmd)\n cats = list(cats.keys())\n cats.sort()\n \n width = max([len(cat) for cat in cats]) + 2\n d += '**Categories:**\\n'\n for cat in zip(cats[0::2], cats[1::2]):\n d += '**`{}`**{}**`{}`**\\n'.format(cat[0],' ' * int(2.3 * (width-len(cat[0]))), cat[1])\n if len(cats)%2 == 1:\n d += '**`{}`**\\n'.format(cats[-1])\n \n d += '\\nUse `{0}help <category>` to list commands in a category.\\n'.format(ctx.prefix)\n d += 'Use `{0}help <command>` to get in depth help for a command.\\n'.format(ctx.prefix)\n\n elif len(args) == 1:\n cats = {}\n for cmd in cmds:\n if not hasattr(cmd, 'category'):\n cmd.category = 'Misc'\n if cmd.category not in cats:\n cats[cmd.category] = []\n cats[cmd.category].append(cmd)\n if args[0].title() in cats:\n d = 'Commands in category **`{}`**:\\n'.format(args[0])\n for cmd in sorted(cats[args[0].title()], key=lambda x:x.name):\n d += '\\n `{}{}`'.format(ctx.prefix, cmd.name)\n\n brief = cmd.brief\n if brief is None and cmd.help is not None:\n brief = cmd.help.split('\\n')[0]\n\n if brief is not None:\n d += ' - {}'.format(brief)\n d += '\\n'\n else:\n if args[0] not in ctx.bot.all_commands:\n d = 'Command not found.'\n else:\n cmd = ctx.bot.all_commands[args[0]]\n d = 'Help for command `{}`:\\n'.format(cmd.name)\n d += '\\n**Usage:**\\n'\n\n if type(cmd) != commands.core.Group:\n params = list(cmd.clean_params.items())\n p_str = ''\n for p in params:\n if p[1].default == p[1].empty:\n p_str += ' [{}]'.format(p[0])\n else:\n p_str += ' <{}>'.format(p[0])\n d += '`{}{}{}`\\n'.format(ctx.prefix, cmd.name, p_str)\n else:\n d += '`{}{} '.format(ctx.prefix, cmd.name)\n if cmd.invoke_without_command:\n d += '['\n else:\n d += '<'\n d += '|'.join(cmd.all_commands.keys())\n if cmd.invoke_without_command:\n d += ']`\\n'\n else:\n d += '>`\\n'\n\n d += '\\n**Description:**\\n'\n d += '{}\\n'.format('None' if cmd.help is None else cmd.help.strip())\n\n if cmd.checks:\n d += '\\n**Checks:**'\n for check in cmd.checks:\n d += '\\n{}'.format(check.__qualname__.split('.')[0])\n d += '\\n'\n\n if cmd.aliases:\n d += '\\n**Aliases:**'\n for alias in cmd.aliases:\n d += '\\n`{}{}`'.format(ctx.prefix, alias)\n\n d += '\\n'\n else:\n d = ''\n cmd = ctx.bot\n cmd_name = ''\n for i in args:\n i = i.replace('@', '@\\u200b')\n if hasattr(cmd, 'all_commands') and i in cmd.all_commands:\n cmd = cmd.all_commands[i]\n cmd_name += cmd.name + ' '\n else:\n if cmd == ctx.bot:\n d += 'Command not found.'\n else:\n d += '`{}` has no sub-command `{}`.'.format(cmd.name, i)\n break\n if cmd != ctx.bot:\n d = 'Help for command `{}`:\\n'.format(cmd_name)\n d += '\\n**Usage:**\\n'\n\n if type(cmd) != commands.core.Group:\n params = list(cmd.clean_params.items())\n p_str = ''\n for p in params:\n if p[1].default == p[1].empty:\n p_str += ' [{}]'.format(p[0])\n else:\n p_str += ' <{}>'.format(p[0])\n d += '`{}{}{}`\\n'.format(ctx.prefix, cmd_name, p_str)\n else:\n d += '`{}{} '.format(ctx.prefix, cmd.name)\n if cmd.invoke_without_command:\n d += '['\n else:\n d += '<'\n d += '|'.join(cmd.all_commands.keys())\n if cmd.invoke_without_command:\n d += ']`\\n'\n else:\n d += '>`\\n'\n\n d += '\\n**Description:**\\n'\n d += '{}\\n'.format('None' if cmd.help is None else cmd.help.strip())\n\n if cmd.checks:\n d += '\\n**Checks:**'\n for check in cmd.checks:\n d += '\\n{}'.format(check.__qualname__.split('.')[0])\n d += '\\n'\n\n if cmd.aliases:\n d += '\\n**Aliases:**'\n for alias in cmd.aliases:\n d += '\\n`{}{}`'.format(ctx.prefix, alias)\n\n d += '\\n'\n\n d += '\\n*Made by Bottersnike#3605 and hanss314#0128*'\n return await ctx.send(d)\n \ndef setup(bot):\n bot.add_cog(Misc(bot))\n","sub_path":"musicbot/cogs/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":19741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"495297834","text":"from otree.api import Currency as c, currency_range\nfrom . import models\nfrom ._builtin import Page, WaitPage\nfrom .models import Constants, parse_config\nimport math\n\n\nclass Instructions(Page):\n\n def is_displayed(self):\n return self.round_number == 1\n \n def vars_for_template(self):\n return {\n 'instructions_link': self.session.config['instructions_link'],\n }\n\n\nclass Decision(Page):\n\n def is_displayed(self):\n return self.round_number <= self.group.num_rounds()\n\n def vars_for_template(self):\n return {\n \"payoff_matrix\": parse_config(self.session.config['config_file'])[self.round_number-1]['payoff_matrix'],\n \"probability_matrix\": parse_config(self.session.config['config_file'])[self.round_number-1]['probability_matrix'],\n }\n\n\nclass Results(Page):\n\n def is_displayed(self):\n return self.round_number <= self.group.num_rounds()\n\n\ndef get_config_columns(group):\n num_signals = group.num_signals()\n subperiod_length = group.subperiod_length()\n num_subperiods = math.ceil(num_signals / subperiod_length)\n\n seconds_per_tick = group.seconds_per_tick()\n rest_length = group.rest_length_seconds()\n\n config = parse_config(group.session.config['config_file'])\n payoff_matrix = config[group.round_number - 1]['payoff_matrix']\n probability_matrix = config[group.round_number - 1]['probability_matrix']\n\n return [num_subperiods, seconds_per_tick, rest_length, payoff_matrix, probability_matrix]\n\ndef get_output_table_header(groups):\n return [\n 'timestamp_of_start',\n 'session_ID',\n 'period_id',\n 'pair_id', \n 'p1_code',\n 'p2_code',\n 'p1_action',\n 'p2_action',\n 'p1_countGood',\n 'p2_countGood',\n 'p1_periodResult',\n 'p2_periodResult',\n 'p1_avg_payoffs',\n 'p2_avg_payoffs',\n 'subperiod_length',\n 'num_subperiods',\n 'seconds_per_tick',\n 'rest_length_seconds',\n 'payoff_matrix(AGood, ABad, BGood, BBad)',\n 'probability_matrix(AA, AB, BA, BB)'\n ]\n\ndef get_output_table(events):\n if not events:\n return []\n rows = []\n p1, p2 = events[0].group.get_players()\n p1_code = p1.participant.code\n p2_code = p2.participant.code\n group = events[0].group\n config_columns = get_config_columns(group)\n subperiod_num = 0\n for event in events:\n if event.channel == 'subperiod-start':\n p1_result = group.subperiod_results[str(subperiod_num)][p1_code]\n p2_result = group.subperiod_results[str(subperiod_num)][p2_code]\n p1_payoffs = event.value[p1_code]['payoffs']\n p2_payoffs = event.value[p2_code]['payoffs']\n rows.append([\n event.timestamp,\n group.session.code,\n group.subsession_id,\n group.id_in_subsession,\n p1_code,\n p2_code,\n event.value[p1_code]['fixed_decision'],\n event.value[p2_code]['fixed_decision'],\n p1_result.count('G'),\n p2_result.count('G'),\n p1_result,\n p2_result,\n sum(p1_payoffs) / len(p1_payoffs),\n sum(p2_payoffs) / len(p2_payoffs),\n len(p1_payoffs)\n ] + config_columns)\n subperiod_num += 1\n\n rows.append(\"\")\n \n return rows\n\n\npage_sequence = [\n Instructions,\n Decision,\n Results,\n]\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"260244401","text":"from transformers import (AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,\n GPT2DoubleHeadsModel, GPT2Tokenizer, WEIGHTS_NAME, CONFIG_NAME)\n\nfrom utils import get_empd_dataset, get_dataset, make_logdir\nimport logging\n\n# logging.basicConfig(level = logging.INFO)\nlogger = logging.getLogger(__file__)\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n\n tokenizer_class = OpenAIGPTTokenizer \n tokenizer = tokenizer_class.from_pretrained(\"openai-gpt\")\n logger.info(\"Start persona dataset\") \n # pers_dataset = get_dataset(tokenizer, \"\", \"./dataset_cache\")\n\n # logger.info(\"Start emp dataset\")\n emp_dataset = get_empd_dataset(tokenizer, \"\", \"./test\")\n # print(emp_dataset.keys())\n i = 0\n # for u in pers_dataset[\"train\"]:\n # if i == 10:\n # break\n # else:\n # i += 1\n # print(u)\n","sub_path":"test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"648960143","text":"import os, wget\nfrom flask_backend import app\nimport shutil\n\n#create media subfolders\ndef init_media(id,loc):\n\n path= os.path.join(app.config['UPLOAD_FOLDER'] ,str(id))\n #create id subfolders\n try:\n os.mkdir(path)\n except OSError:\n print(\"Creation of the directory %s failed\" % path)\n else:\n print(\"Successfully created the directory %s \" % path)\n try:\n os.mkdir(path + \"/video\")\n except OSError:\n print(\"Creation of the directory %s failed\" % path + \"/video\")\n else:\n print(\"Successfully created the directory %s \" % path + \"/video\")\n try:\n os.mkdir(path + \"/photos\")\n except OSError:\n print(\"Creation of the directory %s failed\" % path + \"/photos\")\n else:\n print(\"Successfully created the directory %s \" % path + \"/photos\")\n #create photos\n print(\"getting photos of event \"+str(id))\n size=\"640x640\"\n location=str(loc[0])+\",\"+str(loc[1])\n fov=\"90\"\n pitch=\"0\"\n key=\"AIzaSyDcLG_2KgktdQJXLaeyQZHJzmvcSjNwoPM\"\n for i in range(0,4):\n heading = str(i*90)\n url = \"https://maps.googleapis.com/maps/api/streetview?size=\"+size+\"&location=\"+location+\"&fov=\"+fov+\"&heading=\"+heading+\"&pitch=\"+pitch+\\\n \"&key=\"+key\n print(url)\n wget.download(url, path + \"/photos/\"+str(i)+\".jpeg\")\n\ndef convert_avi_to_mp4(avi_file_path):\n print(avi_file_path)\n os.popen(\"ffmpeg -i '{input}'.avi -ac 2 -b:v 2000k -c:a aac -c:v libx264 -b:a 160k -vprofile high -bf 0 -strict experimental -f mp4 '{input}.mp4' && rm {input}.avi && ffmpeg -ss 00:00:00 -i {input}.mp4 -vframes 1 -q:v 2 {input}T.jpg\".format(input = avi_file_path))\n return True\n\ndef getThumbnail(mp4_file_path):\n print(mp4_file_path)\n os.popen(\n \"ffmpeg -ss 00:00:00 -i {input}.mp4 -vframes 1 -q:v 2 {input}T.jpg\".format(input=mp4_file_path))\n return True\n\ndef rmMedia(id):\n path = os.path.join(app.config['UPLOAD_FOLDER'], str(id))\n # removing id subfolders\n try:\n shutil.rmtree(path)\n except OSError as e:\n print(\"Error: %s : %s\" % (path, e.strerror))","sub_path":"flask_backend/media_processing.py","file_name":"media_processing.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"574375978","text":"import pymysql \nfrom socket import *\nimport datetime\nfrom AES_encrypt import* #Python file name for encryption\nfrom AES_Decrypt import* #Python file name for decryption\n\n#function for unsuccessful cases\ndef unsuccessful():\n conn.commit()\n conn.close()\n sharekey=connectionSocket.recv(2048) \n Plaintext=\"Transaction Unsuccessfull\"\n encrypteddata=str(AES_encrypt(sharekey,Plaintext))\n connectionSocket.send(encrypteddata.encode())\n b=datetime.datetime.now()\n print(\" Elapsed time : \")\n print(b-a)\n connectionSocket.close()\n\n\na=datetime.datetime.now()\nserverPort = 12000\nserverSocket = socket(AF_INET, SOCK_STREAM)\nserverSocket.bind(('', serverPort))\nserverSocket.listen(1)\nprint (\"The server is ready to receive\")\nwhile 1:\n connectionSocket, addr = serverSocket.accept()\n print(\"Connection Accepted\")\n\n shky=share_key()\n connectionSocket.send(shky.encode())\n sentence=connectionSocket.recv(2048)\n print(\"Sentence Received\")\n sentence=sentence.decode()\n sentence=eval(sentence)\t\n sentence = AES_Decrypt(sentence[0],sentence[1])\n sentence=sentence.split()\n\n account=sentence[0]\n tt=sentence[1]\n print(\"Transaction Type:\")\n print(tt)\n amount=float(sentence[2])\n account2=sentence[3]\n conn = pymysql.connect( host='localhost', user='root', password = \"\", db='test',) \n cur = conn.cursor()\n cur.execute(\"select balance from bank2 where AccountNumber = \" + account) \n balance = str(cur.fetchall()).replace('(','').replace(')','').replace(',','')\n balance=float(balance)\n if(tt=='debit'):\n if(balance>amount):\n balance=balance - amount\n print(\"Updated Balance :\")\n print(balance)\n \n else:\n print(\" Low Balance\")\n unsuccessful()\n continue\n elif(tt=='credit'):\n balance=balance + amount\n print(\"Updated Balance :\")\n print(balance)\n else:\n print(\"illegal operation\")\n unsuccessful()\n continue\n update=\"update bank2 set balance = \"+str(balance)+\" where AccountNumber = \"+account\n cur.execute(update)\n update=\"update bank2 set lastt = now() where AccountNumber = \"+account\n cur.execute(update)\n\n sharekey=connectionSocket.recv(2048) \n Plaintext=\"Transaction over Successfully\"\n encrypteddata=str(AES_encrypt(sharekey,Plaintext))\n connectionSocket.send(encrypteddata.encode())\n\n\n cur.execute(\"select CIF from bank2 where AccountNumber = \" + account) \n CIF = str(cur.fetchall()).replace('(','').replace(')','').replace(',','').replace(\"'\",\"\")\n cur.execute(\"select Name from bank where AccountNumber = \" + account2) \n Party = str(cur.fetchall()).replace('(','').replace(')','').replace(',','')\n name=CIF + \".txt\"\n f = open(CIF + \".txt\", \"a\")\n f.write(\"\\n--------------------\\n\")\n f.write(\"Account Number: \"+account+\"\\n\")\n f.write(\"Amount : \"+str(amount)+\"\\n\")\n f.write(\"Type of Transaction: \"+tt+\"\\n\")\n f.write(\"Time of Transaction: \"+str(datetime.datetime.now())+\"\\n\")\n f.write(\"Party: \"+Party+\"(\"+account2+\")\"+\"\\n\")\n f.write(\"--------------------\\n\")\n f.close()\n conn.commit()\n conn.close()\n \n b=datetime.datetime.now()\n print(\" Elapsed time : \")\n print(b-a)\n connectionSocket.close()\n\n \n\n","sub_path":"bank2v2.py","file_name":"bank2v2.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"631255703","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 15 04:18:20 2018\n\n@author: sadievrenseker\n\nDers 39: Python ile SVR uygulaması\n\n\"\"\"\n\n#1. kutuphaneler\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# veri yukleme\nveriler = pd.read_csv('maaslar.csv')\n\nx = veriler.iloc[:,1:2]\ny = veriler.iloc[:,2:]\nX = x.values\nY = y.values\n\n#verilerin olceklenmesi\nfrom sklearn.preprocessing import StandardScaler\n\nsc1 = StandardScaler()\nx_olcekli = sc1.fit_transform(X)\nsc2 = StandardScaler()\ny_olcekli = sc2.fit_transform(Y)\n#print(\"x de bu amk\",X)\n#print(\"x olcekli bu iste \",x_olcekli)\n\nfrom sklearn.svm import SVR\n\nsvr_reg = SVR(kernel = 'rbf')\nsvr_reg.fit(x_olcekli,y_olcekli)\n\nplt.scatter(x_olcekli,y_olcekli,color='red')\nplt.plot(x_olcekli,svr_reg.predict(x_olcekli),color='blue')\n\nprint(svr_reg.predict(11))\nprint(svr_reg.predict(6))\nplt.show()\n\n\n\n\n\n\n\n\n \n\n","sub_path":"Prediction (Chapter 3)/Support Vector Regression (SVR) (Chapter 3.4)/39-svr.py","file_name":"39-svr.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"514329495","text":"\n# -*-coding:utf8 -*-\nimport datetime\nimport os\n\nfrom common.dir_config import log_path\n\n\ndef clean_log(path):\n if os.path.exists(path) and os.path.isdir(path):\n today = datetime.date.today().strftime('%Y-%m-%d')\n yesterday = (datetime.date.today() + datetime.timedelta(-1)).strftime('%Y-%m-%d')\n before_yesterday = (datetime.date.today() + datetime.timedelta(-2)).strftime('%Y-%m-%d')\n file_name_list = [today, yesterday, before_yesterday]\n print(file_name_list)\n for file in os.listdir(path):\n file_name_sp = file.split('.')\n if len(file_name_sp) > 2:\n file_date = file_name_sp[1] # 取文件名里面的日期\n # print type(file_date)\n # print type(file_name_list[0])\n if file_date not in file_name_list:\n abs_path = os.path.join(path, file)\n print('删除的文件是%s,' % abs_path)\n os.remove(abs_path)\n else:\n print('没有删除的文件是%s' % file)\n else:\n print('路径不存在/不是目录')\n\nif __name__ == '__main__':\n clean_log(log_path)\n","sub_path":"web_pytest/common/delete_log2.py","file_name":"delete_log2.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"217720742","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom networkx.drawing.nx_agraph import graphviz_layout\nimport networkx as nx\n\ndef display_graph(path):\n G = nx.Graph()\n G.add_edges_from(path)\n pos = graphviz_layout(G, prog=\"dot\")\n nx.draw(G, pos, with_labels=True)\n plt.show()\n\ndef uniform_cost_search(edges, initial_node, goal_node):\n priority_queue = pd.DataFrame([['R',initial_node,0]], columns=['from','to','weights'])\n traversed = pd.DataFrame(columns=['from','to','weights'])\n closed = []\n parent = { initial_node:'R' }\n success = False\n while not priority_queue.empty:\n from_node = priority_queue.iloc[0]['from']\n current_node = priority_queue.iloc[0]['to']\n current_weight = priority_queue.iloc[0]['weights']\n priority_queue = priority_queue.iloc[1:]\n if current_node not in closed:\n parent[current_node] = from_node\n traversed = traversed.append(pd.DataFrame([[from_node, current_node, current_weight]], columns=['from','to','weights']))\n if current_node == goal_node:\n success = True\n break\n closed.append(current_node)\n next_rows = edges.loc[(edges['from']==current_node) |(edges['to']==current_node)]\n next_rows = next_rows.apply(lambda row: reversal_node(row, current_node, current_weight), axis=1)\n next_rows = next_rows[~next_rows.to.isin(closed)]\n priority_queue = priority_queue.append(next_rows)\n priority_queue = priority_queue.sort_values(by=['weights']).reset_index(drop=True)\n shortest_path = [current_node]\n while current_node != 'R':\n parent_node = traversed.loc[(traversed['to']==current_node)].iloc[0]['from']\n shortest_path.insert(0,parent_node)\n current_node = parent_node\n search_space = []\n for row in traversed.iterrows():\n search_space.append((row[1]['from'],row[1]['to']))\n display_graph(search_space)\n total_weight = traversed.loc[(traversed['to']==goal_node)].iloc[0]['weights']\n return { \"success\":success, \"path\": shortest_path[1:], \"weights\": total_weight}\n\n\nif __name__ == '__main__':\n edges = pd.read_csv('Graph.csv')\n initial_node = 'A'\n goal_node = 'M'\n print(uniform_cost_search(edges, initial_node, goal_node))\n","sub_path":"UniformCostSearch.py","file_name":"UniformCostSearch.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"211912678","text":"nums=input().split(\" \")\nnOfHouse=int(nums[0])\nnOfiInstruction=int(nums[1])\ninstruct=[]\nfor i in range(nOfiInstruction):\n instruct.append(input().split(\" \"))\n if len(instruct[i])>1:\n instruct[i][1]=int(instruct[i][1])-1\nresult=[]\ndestroyed=[]\nfor i in range(nOfiInstruction):\n if instruct[i][0]=='D':\n destroyed.append(instruct[i][1])\n elif instruct[i][0]=='R'and len(destroyed)>0:\n del destroyed[len(destroyed)-1]\n elif instruct[i][0]=='Q':\n this=0\n left=0\n right=0\n #下面看往左能到达的房子数:\n j=instruct[i][1]\n while j>0:\n if not(destroyed.__contains__(j-1)):\n left=left+1\n else:break\n j=j-1\n #下面看往右能到达的房子数:\n j=instruct[i][1]\n while j<nOfHouse-1:\n if not destroyed.__contains__(j+1):\n right=right+1\n else:\n break\n j=j+1\n if not destroyed.__contains__(instruct[i][1]):\n result.append(left+right+1)\n else:\n result.append(0)\n #print(destroyed)\n\nfor i in range(len(result)):\n print(result[i])\n","sub_path":"Code/CodeRecords/2632/60796/268595.py","file_name":"268595.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"253420184","text":"import numpy as np\nimport torch\nfrom torchvision.datasets import mnist\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom common import *\nfrom NetTest import *\n\ndef data_tf(x):\n x = np.array(x, dtype='float32')/255\n x = (x - 0.5) / 0.5\n x = x.reshape((1,28,28))\n x = torch.from_numpy(x)\n return x\n\ndef getDataLoader():\n train_set = mnist.MNIST('../data/mnist', train=False, transform=data_tf, download=False)\n test_set = mnist.MNIST('../data/mnist', train=False, transform=data_tf, download=False)\n train_data = DataLoader(train_set, batch_size=64, shuffle=True)\n test_data = DataLoader(test_set, batch_size=128, shuffle=False)\n return train_data, test_data\n\ndef train(net, config):\n # get the data\n train_data, test_data = getDataLoader()\n # loss function and the SGD\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(net.parameters(), config.lr)\n if(config.use_cuda):\n net = net.cuda()\n criterion = criterion.cuda()\n\n print(\"Start training now ...\")\n losses = []\n acces = []\n eval_losses = []\n eval_acces = []\n\n for e in range(config.epoch):\n train_loss = 0\n train_acc = 0\n net.train()\n # training phase\n for im, label in train_data:\n # print(im.shape)\n im = Variable(im)\n label = Variable(label)\n if config.use_cuda:\n im = im.cuda()\n label = label.cuda()\n # forward\n out = net(im)\n loss = criterion(out, label)\n # backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_loss += loss.data[0]\n _, pred = out.max(1)\n num_correct = (pred == label).sum().data[0]\n acc = float(num_correct / float(im.shape[0]))\n train_acc += acc\n\n losses.append(train_loss / len(train_data))\n acces.append(train_acc / len(train_data))\n # evaluate in th test set\n eval_loss = 0\n eval_acc = 0\n net.eval()\n # test phase\n for im, label in test_data:\n im = Variable(im)\n label = Variable(label)\n if config.use_cuda:\n im = im.cuda()\n label = label.cuda()\n out = net(im)\n loss = criterion(out, label)\n eval_loss += loss.data[0]\n _, pred = out.max(1)\n num_correct = (pred == label).sum().data[0]\n acc = num_correct / float(im.shape[0])\n eval_acc += acc\n\n eval_losses.append(eval_loss / len(test_data))\n eval_acces.append(eval_acc / len(test_data))\n\n print(\"train acc: {}, train data len: {}\".format(train_acc, len(train_data)))\n print('epoch: {}, Train Loss: {:.6f}, Train Acc: {:.6f}, Eval Loss: {:.6f}, Eval Acc: {:6f}'\n .format(e, train_loss / len(train_data), train_acc / len(train_data),\n eval_loss / len(test_data), eval_acc / len(test_data)))\n\nif __name__ == \"__main__\":\n net, config = getAlexNet_Config()\n train(net, config)","sub_path":"mnist/MnistRun.py","file_name":"MnistRun.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"548663193","text":"import time\nimport math\nstart = time.clock()\n\ndef primes_less_than(N):\n primes = [2]\n for i in range(2, N):\n temp = 1\n if(i % 2 == 0):\n continue\n else:\n maxVal = i**.5\n for j in primes:\n if(j > maxVal):\n break\n if(i % j == 0):\n temp = 0\n break\n if(temp == 1):\n primes.append(i)\n return primes\n\nprimesMil = primes_less_than(1000000)\n\ndef prime_test(n, primes):\n maxTest = math.ceil(n**.5)\n for i in primes:\n if i < maxTest:\n if(n % i == 0):\n return 0\n if(primes[-1] < maxTest):\n for i in range(primes[-1], maxTest):\n if(n % i == 0):\n return 0\n return 1\n\nvalues = []\nsideLength = 3\nprimeCount = 0\nprimePct = [[0,1]]\n\nwhile primePct[-1][1] > .1:\n if(sideLength % 1000 < 2):\n print(sideLength)\n values.extend([sideLength**2 - (sideLength - 1), sideLength**2 - 2*(sideLength - 1), sideLength**2 - 3*(sideLength - 1)])\n if prime_test(values[-1], primesMil):\n primeCount += 1\n if prime_test(values[-2], primesMil):\n primeCount += 1\n if prime_test(values[-3], primesMil):\n primeCount += 1\n primePct.append([sideLength, float(primeCount)/float(1+len(values)*4/3)])\n sideLength += 2\n \nstop = time.clock()\nprint(stop - start)\nprint(sideLength-2)\n\n\n\n\n\n\n\n\n\n","sub_path":"58_3.py","file_name":"58_3.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"277413589","text":"\nimport mount.mounting as mnt\nimport mount.unmounting as umnt\nimport disk_usage.usage as dsk\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument('-m', action='store_true')\nap.add_argument('-u', action='store_true')\nap.add_argument('-d', action='store_true')\nap.add_argument('-i', default=None)\nargs = ap.parse_args()\n\nif args.m:\n mnt.main()\nelif args.u:\n umnt.main()\nelif args.d:\n dsk.main(args.i)\nelse:\n print(\"\\n\\tAsher Mancinelli's CLI Utility:\\n\\n\\t-m\\tMounting options\\n\\t-u\\tUnmounting options\\n\\t-d\\tDisk usage information\\n\")\n \n","sub_path":"utils/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"60291910","text":"import math\nimport math\nimport matplotlib.pyplot as plt\nimport sympy\nimport control as ctr\n# import slycot\nfrom control import *\n\nTimeLine = []\nfor i in range(0, 10000):\n TimeLine.append(i)\n# 1 звено\n# передаточная функция\nf4 = tf(21, [5, 1]) # Усилительно-исполнительный орган\nf3 = tf(1, [5, 1]) # Паровая турбина\nf2 = tf(1, [8, 1]) # Генератор\nf1 = tf([2, 0.00001], [0.00001]) # Апериодическая гибкая ОС\n\nf5 = f4 * f3 * f2\nf6 = f5 / (1 + f5 * f1)\n\nprint(f6)\ny, x = ctr.step_response(f6, TimeLine)\nplt.plot(y, x, 'r-')\nplt.grid()\nplt.title('Переходная характеристика')\nplt.show()\n\npzmap(f6)\nplt.grid(True)\nplt.plot()\nplt.show()\n\n\nf7 = f5 * f1\nnyquist(f7)\nplt.title('Nyquist Diagram ')\nplt.ylabel('Imaginary Axis')\nplt.xlabel('Real Axis')\nplt.grid(True)\nplt.plot()\nplt.show()\n\nmag2, phase2, omega2 = bode(f7)\nplt.plot()\nplt.grid(True)\nplt.show()\n\n","sub_path":"lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"475550528","text":"from openpyxl import load_workbook\r\n\r\nclass Compiler():\r\n def __init__(self):\r\n self.r = []\r\n self.Main()\r\n\r\n def Main(self):\r\n \r\n #Defined variables\r\n robots = []\r\n ro = []\r\n\r\n #Opens the excel sheet\r\n wb = load_workbook('Book1.xlsx')\r\n sheet = wb.get_sheet_by_name('Book1')\r\n\r\n #Appends each robot's team number to the list\r\n for item in sheet['B']:\r\n if item.value not in robots and item.value != 'Team Number' and item.value != None:\r\n robots.append(item.value)\r\n\r\n #Creates each team's dictionary\r\n for r in robots:\r\n file = {'Team':r, 'aG':[], 'gR':[], 'pR':[]}\r\n ro.append(file)\r\n #print(ro)\r\n \r\n\r\n #Fills each team's dictionary\r\n for rnd in sheet:\r\n for r in robots:\r\n if rnd[1].value == r:\r\n ro[robots.index(r)]['aG'].append(rnd[3].value)\r\n ro[robots.index(r)]['gR'].append(rnd[4].value)\r\n ro[robots.index(r)]['pR'].append(rnd[6].value)\r\n #print(ro)\r\n\r\n for r in robots:\r\n file = {'Team':r,\r\n 'aG':self.aGHist(ro[robots.index(r)]['aG']),\r\n 'gR':self.gRHist(ro[robots.index(r)]['gR']),\r\n 'pR':self.pRHist(ro[robots.index(r)]['pR'])}\r\n self.r.append(file)\r\n \r\n\r\n '''#Prints each team's dictionary\r\n for robot in ro:\r\n print(robot)'''\r\n #(0,2,2,4,2,7)\r\n #Creates values for the Gear histogram\r\n def gRHist(self, scores):\r\n values = [0,0,0,0,0,0,0,0,0,0,0,0,0]\r\n for s in scores:\r\n values[s] += 1\r\n return(values)\r\n\r\n def aGHist(self, aGRound):\r\n values = [0,0]\r\n buckets = [0,1]\r\n for g in aGRound:\r\n values[g] += 1\r\n return(values, buckets)\r\n\r\n def pRHist(self, pRound):\r\n values = [0,0,0,0,0,0,0,0,0,0,0]\r\n buckets = ['0-\\n4','5-\\n9','10-14','15-19','20-24','25-29','30-34','35-39','40+']\r\n for p in pRound:\r\n if p < 5:\r\n values[0] += 1\r\n elif p < 10:\r\n values[1] += 1\r\n elif p < 15:\r\n values[2] += 1\r\n elif p < 20:\r\n values[3] += 1\r\n elif p < 25:\r\n values[4] += 1\r\n elif p < 30:\r\n values[5] += 1\r\n elif p < 35:\r\n values[6] += 1\r\n elif p < 40:\r\n values[7] += 1\r\n else:\r\n values[8] += 1\r\n return values, buckets\r\n \r\n \r\n \r\n #Percent aG\r\n '''gSum = 0\r\n rDone = []\r\n gAvg = {}\r\n for r in ro:\r\n if r['Team'] not in rDone:\r\n for aG in r['aG']:\r\n gSum += int(aG)\r\n rDone.append(r['Team'])\r\n print(rDone)\r\n print(gSum)\r\n self.r[ro.index(r)]['aG'] = gSum'''\r\n #print(self.r)\r\n\r\n\r\ncomp = Compiler()\r\nprint(comp.r)\r\n","sub_path":"Compiler 4.0.py","file_name":"Compiler 4.0.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"312082489","text":"from typing import Callable, Union, Tuple\nimport numpy as np \nimport pickle\n\n\nclass ModelHandler():\n\t\"\"\"\n\tОборачивает модель удобным API.\n\n\t\"\"\"\n\n\tdef __init__(self, model_path:str, vectorizer_path:str, ratinger_path:str) -> None:\n\t\t\"\"\"\n\t\t:model_path: (str) Путь к модели, должна быть в формате .pkl \n\t\t:vectorizer_path: (str) Путь к векторизатору, должен быть в формате .pkl\n\t\t:ratinger_path: (str) Путь к функции перевода вероятности в рейтинг, в формате .pkl\n\t\t\"\"\"\n\n\t\tself.model = self.load_pickle(model_path)\n\t\tself.vectorizer = self.load_pickle(vectorizer_path)\n\t\tself.ratinger = self.load_pickle(ratinger_path)\n\n\tdef load_pickle(self, path:str) -> Callable:\n\t\t\"\"\"\n\t\tЗагружает файл в формате .pkl и возвращает его.\n\t\t:path: (str) -> Путь к файлу\n\t\t\"\"\"\n\t\twith open(path, 'rb') as pickled_file:\n\t\t\treturn pickle.load(pickled_file)\n\n\tdef predict(self, data:str) -> Tuple[np.array]:\n\t\t\"\"\"\n\t\tВыдает предсказания, используя векторизатор и модель.\n\t\t:data: (str) -> Данные для предсказания.\n\t\t\tФормат: \"This is a good film!\"\n\n\t\tВозвращает: tuple(class:np.array, rating:np.array)\n\t\t\"\"\"\n\n\t\t#Преобразование в необходимый формат\n\t\tdata = np.array([data])\n\n\t\t#Векторизация\n\t\tfeatures = self.vectorizer.transform(data)\n\n\t\t#Получение предсказания модели\n\t\tpositive_proba = self.model.predict_proba(features)[:, 1]\n\n\t\t#Получение оценки от 1 до 10:\n\t\trating = self.ratinger(positive_proba) * 10\n\n\t\t#Преобразование рейтинга в класс\n\t\tif rating <= 4:\n\t\t\treview_class = np.array([-1])\n\t\telif (rating > 4) and (rating < 7):\n\t\t\treview_class = np.array([0])\n\t\telse:\n\t\t\treview_class = np.array([1])\n\n\t\treturn review_class, rating\n\n\tdef __call__(self, data:Union[str, list]) -> list:\n\t\t\"\"\"\n\t\tФункция для использования класса как Callable.\n\t\tСлужит оберткой над self.predict.\n\t\t:data: (str) -> Данные для предсказания.\n\t\t\tФормат: \"This is a good film!\"\n\n\t\tВозвращает: predictions:list\n\t\t\"\"\"\n\t\treview_class, rating = self.predict(data)\n\n\t\tpredictions = np.stack([review_class, rating], axis=1).tolist()\n\n\t\treturn predictions\n\n\n\n","sub_path":"api/modules/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"560201753","text":"import ccxt\nfrom datetime import datetime\nfrom datetime import timedelta\nimport pandas as pd\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nimport time\n\nhuobi_exchange = ccxt.huobipro({'urls': {\n 'logo': 'https://user-images.githubusercontent.com/1294454/27766569-15aa7b9a-5edd-11e7-9e7f-44791f4ee49c.jpg',\n 'api': {\n 'market': 'https://api.huobi.io',\n 'public': 'https://api.huobi.io',\n 'private': 'https://api.huobi.io',\n 'zendesk': 'https://huobiglobal.zendesk.com/hc/en-us/articles',\n },\n 'www': 'https://www.huobi.pro',\n 'referral': 'https://www.huobi.br.com/en-us/topic/invited/?invite_code=rwrd3',\n 'doc': 'https://github.com/huobiapi/API_Docs/wiki/REST_api_reference',\n 'fees': 'https://www.huobi.pro/about/fee/',\n }})\nhuobi_exchange.load_markets()\n\nwhile True:\n time_now = time.strftime(\"%H\", time.localtime()) # 刷新\n if time_now == \"20\": #此处设置每天定时的时间\n\n now = datetime.now()\n if int(now.strftime(\"%H%M%S\")) <= 200000:\n now = now + timedelta(days=-1)\n else:\n pass\n\n aDay = timedelta(days=-7)\n lastweek = now + aDay\n print(now.strftime('%Y-%m-%d'))\n print(lastweek.strftime('%Y-%m-%d'))\n today_time = now.strftime('%Y-%m-%d') + ' 19:00:00+00:00' # 20:00的收盘价 是19:00的k线柱\n lastweek_time = lastweek.strftime('%Y-%m-%d') + ' 19:00:00+00:00'\n\n symbol = ['BTC/USDT', 'BSV/USDT', 'HT/USDT']\n dongliang_list = []\n thisweekclose_list = []\n\n for symbol in symbol:\n if huobi_exchange.has['fetchOHLCV']:\n kline_data = pd.DataFrame(huobi_exchange.fetch_ohlcv(symbol, timeframe='1h'))\n kline_data.columns = ['Datetime', 'open', 'High', 'Low', 'close', 'vol']\n kline_data = kline_data[['Datetime', 'close']]\n kline_data['Datetime'] = kline_data['Datetime'].apply(huobi_exchange.iso8601)\n kline_data['Datetime'] = pd.to_datetime(kline_data['Datetime']) + pd.Timedelta(hours=8)\n thisweekclose = kline_data[kline_data['Datetime'] == today_time]['close'].values\n lastweekclose = kline_data[kline_data['Datetime'] == lastweek_time]['close'].values\n dongliang = thisweekclose / lastweekclose\n dongliang_list.append(dongliang)\n thisweekclose_list.append(thisweekclose)\n\n print(now.strftime('%Y-%m-%d'), symbol, thisweekclose)\n print(lastweek.strftime('%Y-%m-%d'), symbol, lastweekclose)\n print(symbol, '动量', dongliang)\n\n dongliang_btc = dongliang_list[0]\n dongliang_bsv = dongliang_list[1]\n dongliang_ht = dongliang_list[2]\n BTC_price = thisweekclose_list[0]\n BSV_price = thisweekclose_list[1]\n HT_price = thisweekclose_list[2]\n\n # print(dongliang_btc)\n # print(dongliang_bsv)\n # print(dongliang_ht)\n\n print('BTC-BSV 轮动组')\n if dongliang_btc > dongliang_bsv:\n print('操作BTC')\n caozuo_btcbsv = '操作BTC'\n price_set_btcbsv = BTC_price\n else:\n print('操作BSV')\n caozuo_btcbsv = '操作BSV'\n price_set_btcbsv = BSV_price\n\n if dongliang_btc < 0.99 and dongliang_bsv < 0.99:\n price_set_btcbsv = price_set_btcbsv * 1.15\n caozuo_btcbsv = caozuo_btcbsv + '空仓追涨' + str(price_set_btcbsv)\n print('空仓追涨',str(price_set_btcbsv))\n else:\n price_set_btcbsv = price_set_btcbsv * 0.85\n caozuo_btcbsv = caozuo_btcbsv + '持仓止损' + str(price_set_btcbsv)\n print('持仓止损',str(price_set_btcbsv))\n\n print('BTC-HT 轮动组')\n if dongliang_btc > dongliang_ht:\n print('操作BTC')\n caozuo_btcht = '操作BTC'\n price_set_btcht = BTC_price\n else:\n print('操作HT')\n caozuo_btcht = '操作HT'\n price_set_btcht = HT_price\n\n if dongliang_btc < 0.99 and dongliang_ht < 0.99:\n price_set_btcht = price_set_btcht * 1.15\n caozuo_btcht = caozuo_btcht + '空仓追涨' + str(price_set_btcht)\n print('空仓追涨',str(price_set_btcht))\n else:\n price_set_btcht = price_set_btcht * 0.85\n caozuo_btcht = caozuo_btcht + '持仓止损' + str(price_set_btcht)\n print('持仓止损',str(price_set_btcht))\n\n mailserver = 'smtp.qq.com'\n userName_Sendmail = '1234567@qq.com' #发送邮箱地址\n userName_AuthCode = 'qhpzsesjwtzxcbcg' #发送邮箱的pop3授权码 在邮箱设置中设置\n received_mail = ['345678@qq.com'] #接受邮箱地址\n\n content = '\\n'.join([now.strftime('%Y-%m-%d'), 'BTC价格', str(BTC_price), 'BTC动量', str(dongliang_btc),\n 'BSV价格', str(BSV_price), 'BSV动量', str(dongliang_bsv), 'HT价格', str(HT_price) ,'HT动量',\n str(dongliang_ht), 'BTC-BSV 轮动组', caozuo_btcbsv, 'BTC-HT 轮动组', caozuo_btcht])\n email = MIMEText(content, 'plain', 'utf-8')\n email['Subject'] = Header(content, 'utf-8')\n email['From'] = Header(\"Python机器人\", 'utf-8') #发送人抬头\n email['To'] = Header(\"helloworld\", 'utf-8') #接受人抬头\n\n smtp = smtplib.SMTP_SSL(mailserver, port=465)\n smtp.login(userName_Sendmail, userName_AuthCode)\n smtp.sendmail(userName_Sendmail, '1234567@qq.com', email.as_string()) #邮箱可以自己发自己 ,所以收发邮箱可以一样\n\n smtp.quit()\n\n\n time.sleep(60*60)\n","sub_path":"getdongliang.py","file_name":"getdongliang.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"342935108","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import loadtxt\nfrom sklearn.metrics import mean_squared_error\nfrom scipy.stats import norm\n#==========================Problem 3.1 Linear Regression==========================#\n\n\n#================= a) Polynomial Features [10 Points]=============#\n\n\nlinRegData_path = \"./dataSets/linRegData.txt\"\ndef load_file(filepath):\n data = loadtxt(filepath, comments=\"#\", unpack=False)\n return data\n\nlinRegData = load_file(linRegData_path)\n#linRegData = linRegData[np.lexsort(np.fliplr(linRegData).T)]\ntrain_data = linRegData[:20]\ntest_data = linRegData[20:]\ntest_data = test_data[np.lexsort(np.fliplr(test_data).T)]\n# x = linRegData[:,0]\n# y = linRegData[:,1]\ntrain_x = train_data[:,0].reshape((20,1))\ntrain_y = train_data[:,1].reshape((20,1))\ntest_x = test_data[:,0].reshape((130,1))\ntest_y = test_data[:,1].reshape((130,1))\n\n\n\n# total have 20 feature, each is a gaussian function g1`````g20,\n# so at each data point, data model is [input, output], input:[g1(x)```g20(x)]\ndef create_data_model_gaussian_feature(num_of_basis_functions):\n trainData_feature_sets = []\n testData_feature_sets = []\n\n # for train data sets\n for x_train_value in train_x:\n feature_train_sets = calculate_gaussian_feature_for_each_data(float(x_train_value),num_of_basis_functions)\n trainData_feature_sets.append(feature_train_sets)\n train_x_model = np.transpose(np.array(trainData_feature_sets))\n print(train_x_model.shape)\n\n # for test data sets\n for x_value in test_x:\n feature_sets = calculate_gaussian_feature_for_each_data(float(x_value),num_of_basis_functions)\n testData_feature_sets.append(feature_sets)\n test_x_model = np.transpose(np.array(testData_feature_sets))\n print(test_x_model.shape)\n return train_x_model, test_x_model\n\n\n\ndef calculate_gaussian_feature_for_each_data(x, num_of_basis_functions):\n gaussian_features_list = []\n miu_sets = np.linspace(0, 2, num_of_basis_functions)\n total_for_normal = 0\n for miu in miu_sets:\n gaussian_m = norm(loc=miu, scale=np.sqrt(0.02)).pdf(x)\n total_for_normal += gaussian_m\n for miu in miu_sets:\n gaussian_k = norm(loc=miu,scale=np.sqrt(0.02)).pdf(x)/total_for_normal\n gaussian_features_list.append(gaussian_k)\n gaussian_features_list.append(1)\n return gaussian_features_list\n\n\n\ndef calculate_w(num_of_basis_functions):\n train_x_model, test_x_model = create_data_model_gaussian_feature(num_of_basis_functions)\n # part1 = np.linalg.inv(np.matmul(train_x_model, np.transpose(train_x_model)) )\n part1 = np.matmul(train_x_model, np.transpose(train_x_model))\n shape_part1 = part1.shape\n regular_unit_matrix = np.identity(shape_part1[0])\n part1 = part1 + 0.000006*regular_unit_matrix\n part1 = np.linalg.inv(part1)\n part2 = np.matmul(part1, train_x_model)\n W = np.matmul(part2, train_y)\n\n #W = np.matmul( np.matmul( np.linalg.inv( np.matmul(train_x_model, np.transpose(train_x_model) )),train_x_model), train_y)\n return W , train_x_model, test_x_model\n\ndef run_model():\n\n rmse_test_list = []\n rmse_train_list = []\n\n for i in range(2,8):\n # train_x_model, test_x_model = create_data_model_gauusian_feature(i)\n W,train_x_model,test_x_model = calculate_w(i)\n print(\"=======w shape\", W.shape)\n # for test\n y_test_predict = np.matmul(np.transpose(test_x_model), W)\n rmse_test = np.sqrt(mean_squared_error(test_y ,y_test_predict))\n rmse_test_list.append(rmse_test)\n print('===rmse test : ', rmse_test)\n\n # for train\n y_train_predict = np.matmul(np.transpose(train_x_model), W)\n rmse_train = np.sqrt(mean_squared_error(train_y, y_train_predict))\n rmse_train_list.append(rmse_train)\n print('===rmse train : ', rmse_train)\n #\n # if(i == 29):\n # # plt.scatter(train_x, train_y)\n # plt.scatter(test_x,test_y)\n # plt.scatter(test_x,y_test_predict)\n # plt.legend([' test true', 'test predict'])\n # plt.show()\n\n plt.plot(list(range(2,8)),rmse_test_list,'r')\n plt.plot(list(range(2,8)),rmse_train_list,'b')\n plt.legend(['test','train'])\n plt.show()\n\nrun_model()","sub_path":"hw3/Gaussian_features.py","file_name":"Gaussian_features.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"7993881","text":"import os\nimport random\nimport tensorflow as tf\nimport time\nimport subprocess\nimport numpy as np\nimport cv2\n\n\nclass DeconvNet:\n def __init__(self, checkpoint_dir='./checkpoints/'):\n self.maybe_download_and_extract()\n self.build()\n\n self.saver = tf.train.Saver(max_to_keep=5, keep_checkpoint_every_n_hours=1)\n\n self.session = tf.Session()\n self.session.run(tf.initialize_all_variables())\n self.checkpoint_dir = checkpoint_dir\n\n def maybe_download_and_extract(self):\n if not os.path.isdir('data/VOC2012'):\n subprocess.call(\"./download.sh\", shell=True)\n\n def predict(self, image):\n if not os.path.exists(self.checkpoint_dir):\n raise IOError(self.checkpoint_dir + ' does not exist.')\n else:\n path = tf.train.get_checkpoint_state(self.checkpoint_dir)\n if path is None:\n raise IOError('No checkpoint to restore in ' + self.checkpoint_dir)\n else:\n self.saver.restore(self.session, path.model_checkpoint_path)\n\n return self.prediction.eval(session=self.session, feed_dict={image: [image]})[0]\n\n def train(self, training_steps=1000, restore_session=False):\n # TODO: change train method\n for i in range(0, training_steps):\n start = time.time()\n index = random.randint(1, 10)\n image = np.float32(cv2.imread('data/{}.png'.format(str(index).zfill(3)), 0))\n expected = np.float32(cv2.imread('data/{}.png'.format(str(index).zfill(3)), 0))\n self.train_step.run(session=self.session, feed_dict={image: [image], ground_truth: [expected]})\n\n error = self.accuracy.eval(session=self.session, feed_dict={image: [image], ground_truth: [expected]})\n\n print('step {} with trainset {} finished in {:.2f}s with error of {:.2%} ({} total) and loss {:.6f}'.format(\n i, index, time.time() - start, (error/(expected.shape[0]*expected.shape[1])), \n int(error), loss.eval(session=session, feed_dict={x: [image], y: [expected]})))\n\n if i % 10 == 0:\n image = np.float32(cv2.imread('data/001.png', 0))\n output = self.session.run(self.prediction, feed_dict={x: [image]})\n cv2.imwrite('cache/output{}.png'.format(str(i).zfill(5)), np.uint8(output[0] * 255))\n\n if i % 100 == 0:\n self.saver.save(self.session, self.checkpoint_dir+'model', global_step=i)\n print('Model {} saved'.format(i))\n\n def build(self):\n image = tf.placeholder(tf.float32, shape=[224, 224, 3])\n ground_truth = tf.placeholder(tf.int64, shape=[224, 224, 3])\n\n rgb = tf.reshape(image, [-1, 224, 224, 3])\n\n conv_1_1 = self.conv_layer(rgb, [3, 3, 1, 64], 64, 'conv_1_1')\n conv_1_2 = self.conv_layer(conv_1_1, [3, 3, 64, 64], 64, 'conv_1_2')\n\n pool_1, pool_1_argmax = self.pool_layer(conv_1_2)\n\n conv_2_1 = self.conv_layer(pool_1, [3, 3, 64, 128], 128, 'conv_2_1')\n conv_2_2 = self.conv_layer(conv_2_1, [3, 3, 128, 128], 128, 'conv_2_2')\n\n pool_2, pool_2_argmax = self.pool_layer(conv_2_2)\n\n conv_3_1 = self.conv_layer(pool_2, [3, 3, 128, 256], 256, 'conv_3_1')\n conv_3_2 = self.conv_layer(conv_3_1, [3, 3, 256, 256], 256, 'conv_3_2')\n conv_3_3 = self.conv_layer(conv_3_2, [3, 3, 256, 256], 256, 'conv_3_3')\n\n pool_3, pool_3_argmax = self.pool_layer(conv_3_3)\n\n conv_4_1 = self.conv_layer(pool_3, [3, 3, 256, 512], 512, 'conv_4_1')\n conv_4_2 = self.conv_layer(conv_4_1, [3, 3, 512, 512], 512, 'conv_4_2')\n conv_4_3 = self.conv_layer(conv_4_2, [3, 3, 512, 512], 512, 'conv_4_3')\n\n pool_4, pool_4_argmax = self.pool_layer(conv_4_3)\n\n conv_5_1 = self.conv_layer(pool_4, [3, 3, 256, 512], 512, 'conv_5_1')\n conv_5_2 = self.conv_layer(conv_5_1, [3, 3, 256, 512], 512, 'conv_5_2')\n conv_5_3 = self.conv_layer(conv_5_2, [3, 3, 256, 512], 512, 'conv_5_3')\n\n pool_5, pool_5_argmax = self.pool_layer(conv_5_3)\n\n fc_6 = self.conv_layer(pool_5, [7, 7, 512, 4096], 4096, 'fc_6', padding='SAME')\n fc_7 = self.conv_layer(fc_6, [1, 1, 4096, 4096], 4096, 'fc_7', padding='SAME')\n\n deconv_fc_6 = self.deconv_layer(fc_7, [7, 7, 512, 4096], 4096, 'fc6_deconv', padding='SAME')\n\n unpool_5 = self.unpool_layer2x2(deconv_fc_6, pool_5_argmax)\n\n deconv_5_3 = self.deconv_layer(unpool_5, [3, 3, 512, 512], 512, 'deconv_5_3')\n deconv_5_2 = self.deconv_layer(deconv_5_3, [3, 3, 512, 512], 512, 'deconv_5_2')\n deconv_5_1 = self.deconv_layer(deconv_5_2, [3, 3, 512, 512], 512, 'deconv_5_1')\n\n unpool_4 = self.unpool_layer2x2(deconv_5_1, pool_4_argmax)\n\n deconv_4_3 = self.deconv_layer(unpool_4, [3, 3, 512, 512], 512, 'deconv_4_3')\n deconv_4_2 = self.deconv_layer(deconv_4_3, [3, 3, 512, 512], 512, 'deconv_4_2')\n deconv_4_1 = self.deconv_layer(deconv_4_2, [3, 3, 256, 512], 256, 'deconv_4_1')\n\n unpool_3 = self.unpool_layer2x2(deconv_4_1, pool_3_argmax)\n\n deconv_3_3 = self.deconv_layer(unpool_3, [3, 3, 256, 256], 256, 'deconv_3_3')\n deconv_3_2 = self.deconv_layer(deconv_3_3, [3, 3, 256, 256], 256, 'deconv_3_2')\n deconv_3_1 = self.deconv_layer(deconv_3_2, [3, 3, 128, 256], 128, 'deconv_3_1')\n\n unpool_2 = self.unpool_layer2x2(deconv_3_1, pool_2_argmax)\n\n deconv_2_2 = self.deconv_layer(unpool_2, [3, 3, 128, 128], 128, 'deconv_2_2')\n deconv_2_1 = self.deconv_layer(deconv_2_2, [3, 3, 64, 128], 64, 'deconv_2_1')\n\n unpool_1 = self.unpool_layer2x2(deconv_2_1, pool_1_argmax)\n\n deconv_1_2 = self.deconv_layer(unpool_1, [3, 3, 64, 64], 64, 'deconv_1_2')\n deconv_1_1 = self.deconv_layer(deconv_1_2, [3, 3, 32, 64], 32, 'deconv_1_1')\n\n score_1 = self.deconv_layer(deconv_1_1, [1, 1, 21, 32], 21, 'score_1')\n\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(score_1, ground_truth, name='Cross_Entropy')\n cross_entropy_mean = tf.reduce_mean(cross_entropy, name='x_entropy_mean')\n tf.add_to_collection('losses', cross_entropy_mean)\n\n loss = tf.add_n(tf.get_collection('losses'), name='total_loss')\n self.train_step = tf.train.AdamOptimizer(1e-6).minimize(loss)\n\n self.prediction = tf.argmax(score_1, dimension=3)\n self.accuracy = tf.reduce_sum(tf.pow(tf.to_float(self.prediction) - ground_truth, 2))\n\n def weight_variable(self, shape):\n initial = tf.truncated_normal(shape, stddev=0.1)\n return tf.Variable(initial)\n\n def bias_variable(self, shape):\n initial = tf.constant(0.1, shape=shape)\n return tf.Variable(initial)\n\n def conv_layer(self, x, W_shape, b_shape, name, padding='VALID'):\n W = self.weight_variable(W_shape)\n b = self.bias_variable([b_shape])\n return tf.nn.relu(tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding=padding) + b)\n\n def pool_layer(self, x):\n return tf.nn.max_pool_with_argmax(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n def deconv_layer(self, x, W_shape, b_shape, name, padding='VALID'):\n W = self.weight_variable(W_shape)\n b = self.bias_variable([b_shape])\n\n x_shape = tf.shape(x)\n out_shape = tf.pack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, W_shape[2]])\n\n return tf.nn.conv2d_transpose(x, W, out_shape, [1, 1, 1, 1], padding=padding) + b\n\n # waiting for better performance with fulture version of tf.unravel_index\n # https://github.com/tensorflow/tensorflow/issues/2075\n def unravel_index(self, indices, shape):\n indices = tf.expand_dims(indices, 0)\n shape = tf.expand_dims(shape, 1)\n strides = tf.cumprod(shape, reverse=True)\n strides_shifted = tf.cumprod(shape, exclusive=True, reverse=True)\n return (indices // strides_shifted) % strides\n\n # ! Not tested because currently I'm only on CPU !\n # and there is no tf.nn.max_pool_with_argmax function for CPU only\n # (This comment will be removed in the next 3 days)\n # But also waiting for a nicer (C++ GPU) implementation\n # https://github.com/tensorflow/tensorflow/issues/2169\n def unpool_layer2x2(self, x, argmax_from_pool_layer):\n x_shape = x.get_shape().as_list()\n output_shape = [x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3]]\n argmax_shape = argmax_from_pool_layer.get_shape().as_list()\n\n unraveled_pool_map = tf.zeros(output_shape, dtype=tf.float32)\n\n pool_map = self.unravel_index(argmax_from_pool_layer, output_shape)\n\n # Build unraveled pool map\n # Zeros initialized tensor same size as 2*width x 2*height of x and contains\n # ones at positions from argmax_from_pool_layer\n for feature_map in range(argmax_shape[3]):\n for h in range(argmax_shape[1]):\n for w in range(argmax_shape[2]):\n unraveled_pool_map[pool_map[0, 0, h, w, feature_map], pool_map[1, 0, h, w, feature_map]]\n\n # Multiply 2x2 field of unraveled pool map with index of x\n # => Sets all positions of the unraveled pool map to corresponding x value\n for feature_map in range(x_shape[3]):\n for h in range(x_shape[1]):\n for w in range(x_shape[2]):\n unraveled_pool_map[0, (h * 2):2, (w * 2):2, feature_map] *= x[0, h, w, feature_map]\n\n return argmax_from_pool_layer\n","sub_path":"DeconvNet.py","file_name":"DeconvNet.py","file_ext":"py","file_size_in_byte":9434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"230208163","text":"with open('C:\\\\Users\\\\User\\\\Desktop\\\\Rosalind\\\\rosalind_revc.txt', 'rt') as f:\n s = f.read().upper()\n sc = ''\n for n in s[::-1]:\n if n == 'A':\n sc += 'T'\n elif n == 'T':\n sc += 'A'\n elif n == 'C':\n sc += 'G'\n elif n == 'G':\n sc += 'C'\nwith open('stronghold03.txt', 'wt') as g:\n print(sc, file = g)","sub_path":"Rosalind/stronghold03.py","file_name":"stronghold03.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"592260513","text":"'''\nWrapper for tldextract to make managing data easier\n'''\n\nimport os\nimport time\n\nfrom xdg.BaseDirectory import save_cache_path\nimport tldextract\n\ndef extract(url, *, include_psl_private_domains=False):\n cache_dir = save_cache_path('tldextract')\n last_updated = os.path.join(cache_dir, 'last_updated')\n extractor = tldextract.TLDExtract(\n cache_dir = cache_dir,\n include_psl_private_domains = include_psl_private_domains,\n )\n\n update = False\n try:\n t = os.path.getmtime(last_updated)\n if time.time() - t > 86400 * 7:\n update = True\n except FileNotFoundError:\n update = True\n\n if update:\n extractor.update()\n with open(last_updated, 'w'): pass\n\n return extractor(url)\n\n","sub_path":"pylib/tldextractutils.py","file_name":"tldextractutils.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"7687834","text":"#!/usr/bin/env python3\nfrom train_cnn import parse_args, DataSource\nfrom utils import display\n\n\nclass FakeModel:\n def __init__(self, args):\n self.input_size = (args.tile_size, args.tile_size)\n\n def input_img_to_cnn(self, tile, tile_size):\n tile = tile.astype('float32')\n tile = tile.reshape((tile_size, tile_size, 1))\n tile /= 255\n return tile\n\n\ndef main():\n args = parse_args()\n data = DataSource(args, FakeModel(args))\n dg = data.data_generator()\n vdg = data.validation_generator()\n\n print(data.ideal_steps)\n\n while True:\n display(*next(dg))\n # display(*next(vdg))\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/debug_tiles.py","file_name":"debug_tiles.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"239944001","text":"import sys\n\nInputFile = open (sys.argv[1],'r')\nOutputFile = open (sys.argv[2],'w')\n\nno_of_lines = InputFile.readlines()\nno_of_mix = len (no_of_lines)\nfl = (len(no_of_lines[0].strip().split()) - 1) / 2\n\nfor i in range(0,no_of_mix):\n l = no_of_lines[i].strip().split()\n OutputFile.write (\"%s\\n\" % l[0])\n for j in range(0,fl):\n OutputFile.write (\" %s %s\" % (l[j+1], l[j+1+fl]))\n OutputFile.write (\"\\n\")\n\nInputFile.close()\nOutputFile.close()\n","sub_path":"scripts/ConvGMM.py","file_name":"ConvGMM.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"78879525","text":"def check_palindrome(string):\n \"\"\" Function to check if the {value} is a palindrome by keeping all letters in lowercase \"\"\"\n return True if string.lower() == string.lower()[::-1] else False\n\n\ndef check_type(value):\n \"\"\" Function to check if the {value} is a string \"\"\"\n return (\n check_palindrome(value) if type(value) == str else check_palindrome(str(value))\n )\n\n\nif __name__ == \"__main__\":\n print(check_type(\"Madam\")) # True\n print(check_type(\"Radiator\")) # False\n print(check_type(12421)) # True\n print(check_type(1234)) # False\n","sub_path":"easy/Q2/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"308112991","text":"#encoding: UTF-8\n#Autor: Diego Perez AKA DiegoCodes\n#Rendimiento De Autos\n\n#Calcula Rendimiento por KM/L y milla por galon\ndef calculatePerformance(km,l):\n kmlPerformance = km/l\n mgalPerformance = (km*1.609344)/(l*0.264172051)\n return kmlPerformance,mgalPerformance\n \ndef main():\n km = int(input(\"Kilometros Recorridos:\"))\n l = int(input(\"Litros de Gasolina Utilizados:\"))\n (kmlPerformance,mgalPerformance) = calculatePerformance(km,l)\n print(\"Rendimiento en Km por Litro es de: %.2f \" % kmlPerformance)\n print(\"Rendimiento en Milla por Galon es de: %.2f \" % mgalPerformance)\n kmToTravel = int(input(\"Cuantos km recorrera?\"))\n kmToTravelPerformance = kmToTravel/kmlPerformance\n print(\"Para recorrer\",kmToTravel,\"kms ,requiere de %.2f\" % kmToTravelPerformance,\"litros\")\nmain()\n","sub_path":"RendimientoGasolina.py","file_name":"RendimientoGasolina.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"495029947","text":"from vapoursynth import core\nfrom .util import fallback, parse_planes, append_params, vs_to_mv\n\ndef Super(clip, hpad=16, vpad=None, pel=2, levels=0, chroma=True, sharp=2, rfilter=2, pelclip=None, opt=True):\n vpad = fallback(vpad, hpad)\n if clip.format.sample_type:\n sup = core.mvsf.Super(clip, hpad, vpad, pel, levels, chroma, sharp, rfilter, pelclip)\n else:\n sup = core.mv.Super(clip, hpad, vpad, pel, levels, chroma, sharp, rfilter, pelclip, opt)\n return [sup, hpad, vpad]\n\n\n\ndef Analyse(super, radius=1, blksize=None, blksizev=None, levels=0, search=4, searchparam=2, pelsearch=0, _lambda=None, chroma=True, truemotion=True, lsad=None, plevel=None, _global=None, pnew=None, pzero=None, pglobal=0, overlap=None, overlapv=None, divide=0, badsad=10000., badrange=None, meander=True, trymany=False, fields=False, tff=None, search_coarse=3, dct=0, opt=True):\n \n radius = append_params(radius, 2)\n \n blksizev = fallback(blksizev, blksize)\n blksize = fallback(blksize, super[1])\n blksizev = fallback(blksizev, super[2])\n \n super = super[0]\n ssw, ssh = super.format.subsampling_w, super.format.subsampling_h\n \n overlapv = fallback(overlapv, overlap)\n overlapv = fallback(overlapv, blksizev >> 1)\n overlap = fallback(overlap, blksize >> 1)\n \n overlap = min(overlap, blksize >> 1 >> ssw << ssw)\n overlapv = min(overlapv, blksizev >> 1 >> ssh << ssh)\n \n if super.format.sample_type == vs.FLOAT:\n MAnalyse = core.mvsf.Analyze\n elif blksize == 2:\n raise ValueError('zzfunc.mv.Analyse: blksize 2 is only compatible with float input')\n else:\n MAnalyse = partial(core.mv.Analyse, opt=opt)\n badsad = round(badsad)\n \n if badrange is None:\n badrange = -24 if search in [3,6,7] else 24\n \n def getvecs(isb, delta): return MAnalyse(super, isb=isb, blksize=blksize, blksizev=blksizev, levels=levels, search=search, searchparam=searchparam, pelsearch=pelsearch, _lambda=_lambda, chroma=chroma, delta=delta, truemotion=truemotion, lsad=lsad, plevel=plevel, _global=_global, pnew=pnew, pzero=pzero, pglobal=pglobal, overlap=overlap, overlapv=overlapv, divide=divide, badsad=badsad, badrange=badrange, meander=meander, trymany=trymany, fields=fields, tff=tff, search_coarse=search_coarse, dct=dct)\n \n bv = [getvecs(1, i) for i in range(1, radius+1)]\n fv = [getvecs(0, i) for i in range(1, radius+1)]\n \n blksize >>= 1\n blksizev >>= 1\n if (blksize, blksizev) == (8, 1):\n blksize, blksizev = 2, 2\n overlapv >>= 3\n elif (blksize, blksizev) == (4, 2):\n blksize, blksizev = 4, 4\n overlap >>= 1\n else:\n overlap >>= 1\n overlapv >>= 1\n \n args = dict(blksize=blksize, blksizev=blksizev, search=search, searchparam=searchparam, _lambda=_lambda, chroma=chroma, truemotion=truemotion, _pnew=pnew, overlap=overlap, overlapv=overlapv, divide=divide, meander=meander, fields=fields, tff=tff, dct=dct)\n \n return [bv, fv, args]\n\nAnalyze = Analyse\n\n\n\ndef Recalculate(super, vectors, radius=None, thsad=None, smooth=None, blksize=None, blksizev=None, search=None, searchparam=None, _lambda=None, chroma=None, truemotion=None, pnew=None, overlap=None, overlapv=None, divide=None, meander=None, fields=None, tff=None, dct=None, opt=True):\n \n radius = fallback(radius, [len(x) for x in vectors[:2]])\n radius = append_params(radius, 2)\n thsad = fallback(thsad, vectors[2].get(thsad, 200))\n smooth = fallback(smooth, vectors[2].get(smooth, 1))\n blksizev = fallback(blksizev, blksize)\n blksize = fallback(blksize, vectors[2]['blksize'])\n blksizev = fallback(blksizev, vectors[2]['blksizev'])\n search = fallback(search, vectors[2]['search'])\n searchparam = fallback(searchparam, vectors[2]['searchparam'])\n _lambda = fallback(_lambda, vectors[2]['_lambda'])\n chroma = fallback(chroma, vectors[2]['chroma'])\n truemotion = fallback(truemotion, vectors[2]['truemotion'])\n pnew = fallback(pnew, vectors[2]['pnew'])\n overlapv = fallback(overlapv, overlap)\n overlap = fallback(overlap, vectors[2]['overlap'])\n overlapv = fallback(overlapv, vectors[2]['overlapv'])\n divide = fallback(divide, vectors[2]['divide'])\n meander = fallback(meander, vectors[2]['meander'])\n fields = fallback(fields, vectors[2]['fields'])\n tff = fallback(tff, vectors[2]['tff'])\n dct = fallback(dct, vectors[2]['dct'])\n \n overlap = min(overlap, blksize >> 1 >> ssw << ssw)\n overlapv = min(overlapv, blksizev >> 1 >> ssh << ssh)\n \n if super.format.sample_type==vs.FLOAT:\n MRecalculate = core.mvsf.Recalculate\n elif blksize == 2:\n return vectors\n else:\n MRecalculate = partial(core.mv.Recalculate, opt=opt)\n thsad = round(thsad)\n \n def refine(vec): return MRecalculate(super, vec, thsad=thsad, smooth=smooth, blksize=blksize, blksizev=blksizev, search=search, searchparam=searchparam, _lambda=_lambda, chroma=chroma, truemotion=truemotion, pnew=pnew, overlap=overlap, overlapv=overlapv, divide=divide, meander=meander, fields=fields, tff=tff, dct=dct)\n \n bv = [refine(x) for x in vectors[0][:radius[0]]]\n fv = [refine(x) for x in vectors[1][:radius[1]]]\n \n blksize >>= 1\n blksizev >>= 1\n if (blksize, blksizev) == (8, 1):\n blksize, blksizev = 2, 2\n overlapv >>= 3\n elif (blksize, blksizev) == (4, 2):\n blksize, blksizev = 4, 4\n overlap >>= 1\n else:\n overlap >>= 1\n overlapv >>= 1\n \n args = dict(thsad=thsad/2, smooth=smooth, blksize=blksize, blksizev=blksizev, search=search, searchparam=searchparam, _lambda=_lambda, chroma=chroma, truemotion=truemotion, _pnew=pnew, overlap=overlap, overlapv=overlapv, divide=divide, meander=meander, fields=fields, tff=tff, dct=dct)\n \n return [bv, fv, args]\n\n\n\ndef Compensate(clip, super, vectors, radius=None, cclip=None, scbehavior=1, thsad=10000.0, thsad2=None, fields=False, time=100.0, thscd1=400.0, thscd2=130.0, tff=None, interleaved=True, opt=True):\n \n radius = fallback(radius, min(len(x) for x in vectors[:2]))\n tff = fallback(tff, vectors[2]['tff'])\n \n vectors = Interleave(vectors, radius)\n \n if super.format.sample_type==vs.FLOAT:\n if thsad2 is not None or len(set(radius)) == 1:\n comp = core.mvsf.Compensate(clip, super[0], vectors, cclip, scbehavior, thsad, thsad2, fields, time, thscd1, thscd2, tff)\n if interleaved:\n return comp\n return Disperse(comp, radius)\n MCompensate = core.mvsf.Compensate\n else:\n MCompensate = partial(core.mv.Compensate, opt=opt)\n thsad = round(thsad[0])\n thscd1 = round(thscd1)\n thscd2 = round(thscd2)\n \n cclip = fallback(cclip, clip)\n \n def comp(isb, delta): return MCompensate(clip, super[0], vectors[1 - isb][abs(delta)], scbehavior=scbehavior, thsad=thsad, fields=fields, time=time, thscd1=thscd1, thscd2=thscd2, tff=tff)\n \n bcomp = [comp(1, i) for i in range(radius)]\n fcomp = [comp(0, i) for i in range(radius)]\n comp = bcomp + [cclip] + fcomp\n \n if interleaved:\n return core.std.Interleave(comp)\n return comp\n\n\n\ndef Degrain(clip, super, vectors, radius=None, thsad=400., thsad2=None, planes=None, limit=None, thscd1=400., thscd2=130., opt=True):\n \n numplanes = clip.format.num_planes\n radius = fallback(radius, min(len(x) for x in vectors[:2]))\n thsad = append_params(thsad, numplanes)\n if thsad2 is not None:\n thsad2 = append_params(thsad2, numplanes)\n planes = parse_planes(planes, numplanes, 'mv.Degrain')\n planes = vs_to_mv(planes)\n limit = append_params(limit, numplanes)\n \n if clip.format.sample_type == vs.INTEGER:\n thsadc = round(thsad[-1])\n thsad = round(thsad[0])\n limitc = round(limit[-1])\n limit = round(limit[0])\n thscd1 = round(thscd1)\n thscd2 = round(thscd2)\n args = dict(clip=clip, super=super[0], mvbw=vectors[0][0], mvfw=vectors[1][0], thsad=thsad, thsadc=thsadc, plane=planes, limit=limit, limitc=limitc, thscd1=thscd1, thscd2=thscd2, opt=opt)\n if radius == 1:\n return core.mv.Degrain1(**args)\n if radius == 2:\n return core.mv.Degrain2(mvbw2=vectors[0][1], mvfw2=vectors[1][1], **args)\n return core.mv.Degrain3(mvbw2=vectors[0][1], mvfw2=vectors[1][1], mvbw3=vectors[0][2], mvfw3=vectors[1][2], **args)\n \n mvmulti = Interleave(vectors, radius)\n \n return core.mvsf.Degrain(clip, super[0], mvmulti, thsad, thsad2, plane, limit, thscd1, thscd2)\n\n\n\ndef Interleave(vectors, radius):\n bv = vectors[0][:radius]\n fv = vectors[1][:radius]\n bv.reverse()\n return core.std.Interleave(bv+fv)\n\n\n\ndef Disperse(vectors, radius):\n bv = vectors[0][:radius]\n fv = vectors[1][:radius]\n vectors = bv+fv\n return [vectors[x::radius * 2 + 1] for x in range(radius * 2 + 1)]\n \n","sub_path":"zzfunc/mv.py","file_name":"mv.py","file_ext":"py","file_size_in_byte":8922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"13000208","text":"#!/usr/bin/env python3\n# Imports\nfrom collections import defaultdict\n\nimport bpy\nimport numpy as np\nimport xarray as xr\n\n# TODO Fix this import\nfrom blendernc.cython_build import lic_internal\nfrom blendernc.decorators import NodesDecorators\nfrom blendernc.get_utils import get_geo_coord_names\nfrom blendernc.python_functions import refresh_cache\n\n\nclass BlenderNC_NT_lic(bpy.types.Node):\n # === Basics ===\n # Description string\n \"\"\"Select axis\"\"\"\n # Optional identifier string. If not explicitly defined,\n # the python class name is used.\n bl_idname = \"netCDFlic\"\n # Label for nice name display\n bl_label = \"LIC\"\n # Icon identifier\n bl_icon = \"DECORATE_DRIVER\"\n blb_type = \"NETCDF\"\n\n # Dataset requirements\n blendernc_dataset_identifier: bpy.props.StringProperty()\n \"\"\"An instance of the original StringProperty.\"\"\"\n blendernc_dict = defaultdict(None)\n\n # === Optional Functions ===\n # Initialization function, called when a new node is created.\n # This is the most common place to create the sockets for a node,\n # as shown below.\n def init(self, context):\n self.inputs.new(\"bNCnetcdfSocket\", \"Dataset (u)\")\n self.inputs.new(\"bNCnetcdfSocket\", \"Dataset (v)\")\n self.outputs.new(\"bNCnetcdfSocket\", \"Dataset\")\n\n # Copy function to initialize a copied node from an existing one.\n def copy(self, node):\n print(\"Copying from node \", node)\n\n # Free function to clean up on removal.\n def free(self):\n print(\"Removing node \", self, \", Goodbye!\")\n\n # Additional buttons displayed on the node.\n def draw_buttons(self, context, layout):\n layout.label(text=\"Line Integral Convolution \")\n\n # Detail buttons in the sidebar.\n # If this function is not defined,\n # the draw_buttons function is used instead\n def draw_buttons_ext(self, context, layout):\n pass\n\n # Optional: custom label\n # Explicit user label overrides this,\n # but here we can define a label dynamically\n def draw_label(self):\n return \"LIC\"\n\n @NodesDecorators.node_connections\n def update(self):\n frame = bpy.context.scene.frame_current\n unique_identifier = self.blendernc_dataset_identifier\n unique_data_dict_node = self.blendernc_dict[unique_identifier]\n parent_node = self.inputs[0].links[0].from_node\n dataset = parent_node.blendernc_dict[unique_identifier][\"Dataset\"].copy()\n # TODO: Move this condition to the decorator.\n if self.inputs[-1].links and self.inputs[0].links:\n input_from_node = self.inputs[-1].links[0].from_node\n sel_var = unique_data_dict_node[\"selected_var\"]\n var_name = sel_var[\"selected_var_name\"]\n\n dataset_other = (\n self.inputs[-1]\n .links[0]\n .from_node.blendernc_dict[input_from_node.blendernc_dataset_identifier]\n )\n varname_other = dataset_other[\"selected_var\"][\"selected_var_name\"]\n\n dataarray_link_1 = unique_data_dict_node[\"Dataset\"][var_name]\n dataarray_link_2 = dataset_other[\"Dataset\"][varname_other]\n\n if dataarray_link_1.shape != dataarray_link_2.shape:\n raise ValueError(\n \"\"\"Both velocity fields should be the same dimensions.\"\"\"\n )\n\n texture = np.random.rand(*dataarray_link_2.shape).astype(np.float32)\n\n coords_name = get_geo_coord_names(dataset)\n\n x_coord = dataarray_link_1[coords_name[\"lon_name\"][0]]\n y_coord = dataarray_link_1[coords_name[\"lat_name\"][0]]\n\n vector_list = [dataarray_link_1.fillna(0), dataarray_link_2.fillna(0)]\n\n vectors = xr.concat(vector_list, dim=\"vel\").T.values.astype(np.float32)\n\n kernellen = int(0.2 * len(x_coord))\n if (kernellen % 2) == 0:\n kernellen += 1\n\n kernel_shift = np.sin(\n (0.5 * np.arange(kernellen) / float(kernellen) + frame)\n )\n\n kernel = (\n np.sin(np.arange(kernellen) * np.pi / float(kernellen)) * kernel_shift\n )\n\n kernel = kernel.astype(np.float32)\n # TODO: Move this function to the image production\n lic_data = lic_internal.line_integral_convolution(\n vectors,\n texture,\n kernel,\n )\n\n lic_dataset = xr.Dataset(\n {\n var_name: ([\"lat\", \"lon\"], lic_data.T),\n },\n coords={\n \"lon\": ([\"lon\"], x_coord),\n \"lat\": ([\"lat\"], y_coord),\n },\n )\n\n unique_data_dict_node[\"Dataset\"] = lic_dataset\n NodeTree = self.rna_type.id_data.name\n identifier = self.blendernc_dataset_identifier\n refresh_cache(NodeTree, identifier, frame)\n","sub_path":"blendernc/nodes/vectors/BlenderNC_NT_line_int_conv.py","file_name":"BlenderNC_NT_line_int_conv.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"285715097","text":"#coding:utf-8\n\nfrom urllib import request\nimport re\nfrom urllib import error\n\ndef find_paper():\n file='E:\\ppp.txt'\n ppp=open(file,'w',encoding='utf-8')\n page=1\n while True:\n try:\n url='http://www.rzdonggang.gov.cn/eportal/ui?pageId=2271¤tPage='+str(page)+'&moduleId=a08a4ae6cfa24b968d67c0170cd68697&staticRequest=yes'\n sub_req=request.Request(url)\n sub_req.add_header('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3100.0 Safari/537.36')\n sub_data=request.urlopen(sub_req).read().decode('utf-8')\n except error.URLError as e:\n print(e.reason)\n\n data_cut_t=sub_data.find('<table cellspacing=\"0\" cellpadding=\"0\" border=\"0\" class=\"\" style=\"width:100%;border-collapse:collapse;\">')\n data_cut_b = sub_data.find('<table border=\"0\" cellpadding=\"0\" cellspacing=\"0\" width=\"100%\"> ')\n sub_data_cut=sub_data[data_cut_t:data_cut_b]\n\n sub_data_url = re.findall(r'<tbody>.*</tbody>', sub_data_cut, re.S)\n if sub_data_url!=0:\n page+=1\n else:\n break\n ppp.write(\"\\n\".join(str(i) for i in sub_data_url))\n print(sub_data_url)\n ppp.close()\n\nif __name__ == '__main__':\n find_paper(find_paper())\n\n","sub_path":"spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"340238979","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 16 12:38:42 2019\r\n\r\n@author: danielmaxwell\r\n\"\"\"\r\n\r\n# ----------------------------------------------------------------------------\r\n# Atlantic Cod Analysis\r\n# ----------------------------------------------------------------------------\r\n\r\n# Setup environment with appropriate libraries.\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\n# Clean and stage the date. The .csv file was downloaded from the NOAA website\r\n# @ https://foss.nmfs.noaa.gov. The selection criteria was 'Cod, Atlantic' for\r\n# seven New England states, from 1988 to 2017. Using Excel, the original file\r\n# was then modified as follows: a) the Conf column was removed, b) all of the \r\n# column (variable) names were set to lower-case, and c) the pounds and dollars\r\n# columns were formatted as numbers. The raw data was formatted as currency, \r\n# using commas as separators. This last step is mandatory. Otherwise, the\r\n# load_csv function will import these columns as strings rather than numbers.\r\n# The code below adds a new tons column to the dataframe and then saves it as\r\n# a .csv file. This, then, becomes our new base dataset, the one we work with\r\n# from this point forward. N.B. This only needs to be done once!\r\n\r\nbase_df = pd.read_csv('c:/informatics/cod_landings_raw.csv')\r\n\r\nbase_df['tons'] = base_df['pounds'] / 2000\r\n\r\nbase_df.to_csv('c:/informatics/cod_landings.csv', index = False)\r\n\r\n# Pivot the data set from long to wide format to view tons caught for each \r\n# state by year. Compare numbers from late 1980's to present. The grouping\r\n# and summing operation is necessary because the NOAA supplied data set often\r\n# has multiple observations for a given state in a given year. The pounds and\r\n# dollars columns are removed as they are not needed.\r\n\r\nbase_df = pd.read_csv('c:/informatics/cod_landings.csv')\r\nbase_df = base_df.drop(['pounds','dollars'], axis = 1)\r\n\r\npbase_df = base_df.groupby(['year','state'], as_index = False).sum()\r\n\r\npivot_df = pbase_df.pivot(index = 'state', columns = 'year', values = 'tons')\r\npivot_df.head(n = 7)\r\n\r\n# Viewing data in a table gives us an initial sense of catch sizes over time.\r\n# An inspection of the numbers suggests that there have been some dramatic\r\n# declines in catches from the late 1980's. Let's plot the data for one state\r\n# (Massachussetts) to get a sense of what's happening. \r\ncod_df = base_df.groupby(['year','state','species'], as_index = False)['tons'].sum()\r\n\r\n# Assign column names and set state as the index.\r\ncod_df.columns = ['year','state','species','tons']\r\ncod_df = cod_df.set_index('state')\r\n\r\n# Subset the MA data.\r\nma = cod_df.loc['Massachusetts']\r\n\r\n# Generate the MA plot.\r\nplt.plot(ma.year, ma.tons)\r\nplt.title('Atlantic Cod (MA Landings)')\r\nplt.ylabel('Tons')\r\nplt.xlabel('Year')\r\n\r\n# Plot and compare the seven states in our data set. Do catch numbers follow\r\n# a similar pattern across the states? Subset the data into dataframes, one\r\n# for each state.\r\nme = cod_df.loc['Maine']\r\nct = cod_df.loc['Connecticut']\r\nri = cod_df.loc['Rhode Island']\r\nnh = cod_df.loc['New Hampshire']\r\nnj = cod_df.loc['New Jersey']\r\nmd = cod_df.loc['Maryland']\r\n\r\n# Generate the plot. Do all of the states follow a similar pattern?\r\nplt.plot(ma.year, ma.tons)\r\nplt.plot(me.year, me.tons)\r\nplt.plot(ct.year, ct.tons)\r\nplt.plot(ri.year, ri.tons)\r\nplt.plot(nh.year, nh.tons)\r\nplt.plot(nj.year, nj.tons)\r\nplt.plot(md.year, md.tons)\r\nplt.title('Atlantic Cod (Landings)')\r\nplt.ylabel('Tons')\r\nplt.xlabel('Year')\r\nplt.legend(['MA','ME','CT','RI','NH','NJ','MD'])\r\n","sub_path":"archive/cod_analysis_code.py","file_name":"cod_analysis_code.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"331461450","text":"\"\"\"\nMy Calendar II\nhttps://leetcode.com/problems/my-calendar-ii/\n\nbook\nTime O(nlogn)\nSpace O(n)\n\"\"\"\nclass MyCalendarTwo:\n\n def __init__(self):\n self.starts = []\n self.ends = []\n\n def book(self, start: int, end: int) -> bool:\n starts = list(self.starts)\n ends = list(self.ends)\n starts.append(start)\n starts.sort()\n ends.append(end)\n ends.sort()\n \n i, j = 0, 0\n count = 0\n \n while i < len(starts):\n if starts[i] < ends[j]:\n count += 1\n if count == 3:\n return False\n i += 1\n else:\n count -= 1\n j += 1\n \n self.starts = starts\n self.ends = ends\n \n return True\n \n\n\n# Your MyCalendarTwo object will be instantiated and called as such:\n# obj = MyCalendarTwo()\n# param_1 = obj.book(start,end)\n","sub_path":"python/my_calendar_ii.py","file_name":"my_calendar_ii.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"547100518","text":"#for reduce method we required functools module\r\nfrom functools import *;\r\n\r\ndef Accept_Data():\r\n size=int(input(\"Enter the no of element\"))\r\n arr=list()\r\n print(\"enter the no of elements\")\r\n for i in range(0,size,1):\r\n print(\"enter the \",i+1)\r\n no=int(input())\r\n arr.append(no)\r\n\r\n return arr\r\n\r\ndef ChkEven(no):\r\n if (no%2)==0:\r\n return True\r\n else:\r\n return False\r\n\r\ndef Modify(no):\r\n return no+2\r\n\r\ndef Add(no1,no2):\r\n return no1+no2\r\n\r\ndef main():\r\n #creation of list\r\n Raw_Data=Accept_Data()\r\n\r\n #print accepted data\r\n print(\"accepted data is {}\".format(Raw_Data))\r\n\r\n #filter data\r\n FilterData=list(filter(ChkEven,Raw_Data))\r\n print(\"filterd Data is {}\".format(FilterData))\r\n\r\n #modify data\r\n Modified_Data=list(map(Modify,FilterData))\r\n print(\"modified or mapped data is {}\".format(Modified_Data))\r\n\r\n #reduce data or compute the data\r\n ReducedData=reduce(Add,Modified_Data)\r\n print(\"the final output is after reduce method {}\".format(ReducedData))\r\n\r\n\r\n\r\n\r\n\r\nif __name__==\"__main__\":\r\n main()","sub_path":"Core/Filter_map_reduce.py","file_name":"Filter_map_reduce.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"59063651","text":"bobs = []\ngrounds = []\nnb_bobs = 70\nMouseMag = 0.1\nRepMag = 250\nWallMag = 6*50\nObjMag = 10\nfrot = 0.5\nbob_radius = 18\nmaxmasse = 95\nR = 100\ngrounds = []\nfriction = 0.8\nbob_damp = 0.2\n\nnx = 100//(bob_radius+2)\nny = 320//(bob_radius+2)\n\nx0 = (bob_radius+2)\ny0 = (bob_radius+2)\n\nsavedlen = 500\n\nj = 0\n\n\nlistPos = []\nfor i in range(nx):\n y0 = (bob_radius+2)\n x0 += (bob_radius+2)\n for j in range(ny):\n t = (x0,y0)\n listPos.append(t)\n y0 += (bob_radius+2)\n\n\ndef setup():\n size(1400, 400, P2D)\n global grounds\n \n up1 = Ground(0,30,100,30)\n up2 = Ground(100,30,520,150)\n up3 = Ground(520, 150,620,150)\n up4 = Ground(620,150,1010,30)\n up5 = Ground(1010,30,1110,30)\n \n dn5 = Ground(1110,370,1010,370)\n dn4 = Ground(1010,370,620,280)\n dn3 = Ground(620,280,520,280)\n dn2 = Ground(520,280,100,370)\n dn1 = Ground(100,370,0,370)\n \n depart = Ground(0,370,0,30)\n \n objectif = Ground(1200,370,1200,30)\n \n grounds.append(up1)\n grounds.append(up2)\n grounds.append(up3)\n grounds.append(up4)\n grounds.append(up5)\n \n grounds.append(dn1)\n grounds.append(dn2)\n grounds.append(dn3)\n grounds.append(dn4)\n grounds.append(dn5)\n \n grounds.append(depart)\n grounds.append(objectif)\n \n for i in range(nb_bobs):\n bobs.append(Bob(random(10,380), random(110,310), bob_radius,i))\n \n bake()\n \ndef draw():\n global objectif\n global j\n objectif = Ground(1200,370,1200,30)\n \n\n clear()\n noStroke()\n fill(0, 15)\n wall1 = grounds[0]\n\n # for bob in bobs:\n # if bob.position.x<objectif.x:\n # bob.move()\n # bob.display()\n # else:\n # bob.position.x = 2*width\n \n for bob in bobs:\n \n pos = bob.savedpos[j]\n bob.display(pos.x, pos.y)\n\n j = (j + 1) % savedlen\n \n fill(127)\n for ground in grounds:\n stroke(255)\n line(ground.a.x, ground.a.y, ground.b.x, ground.b.y)\n \n if j < 10:\n t = \"000\" + str(i)\n elif j < 100:\n t = \"00\" + str(i)\n elif j < 1000: \n t = \"0\" + str(i) \n else:\n t = str(j)\n saveFrame(\"Frame/bobStory(\" + t + \").png\")\n \n \n \ndef bake():\n global objectif\n for i in range(savedlen):\n print(i*100/savedlen, \" %\")\n for bob in bobs:\n if bob.position.x<1200:\n bob.move(i)\n else:\n bob.position.x = 2*width\n \n bob.savepos()\n \n\n\n\nclass Bob(object):\n\n def __init__(self, x, y, radius, index):\n self.position = PVector(x, y)\n self.velocity = PVector(0, 0)\n self.acceleration = PVector(0,0)\n \n self.index = index\n self.obj = PVector(1200-3*radius,200)\n self.masse = random(45,95)\n self.radius = radius* self.masse/maxmasse\n self.savedpos = []\n\n def savepos(self):\n self.savedpos.append(self.position.copy())\n \n def move(self,i):\n self.velocity.add(self.acceleration)\n self.velocity.y*=frot\n self.position.add(self.velocity)\n self.acceleration.mult(0)\n self.CheckOtherCollision()\n self.acceleration.add(self.FollowObjective(i))\n self.acceleration.add(self.checkGroundCollision(grounds))\n self.acceleration.add(self.FrictionForce())\n self.acceleration.mult(1/self.masse)\n\n # def display(self):\n # noStroke()\n # fill(200)\n # circle(self.position.x, self.position.y, self.radius * 2)\n\n def display(self, x, y):\n # Draw orb.\n noStroke()\n fill(200)\n circle(x, y, self.radius * 2)\n \n\n def checkGroundCollision(self, grounds):\n F = PVector(0,0)\n for ground in grounds:\n if self.intersection(ground):\n #self.velocity.y = 0\n #self.acceleration.y = 0\n f = PVector(ground.b.x - ground.a.x, ground.b.y - ground.a.y,).rotate(HALF_PI)\n f.setMag(WallMag)\n F.add(f)\n return F\n \n def FrictionForce(self):\n f = self.velocity.copy()\n f*= -1\n f.setMag(friction)\n return f\n\n def CheckOtherCollision(self):\n for other in bobs[self.index:]:\n dP = PVector(other.position.x - self.position.x, other.position.y - self.position.y)\n d = other.radius + self.radius\n \n if self.position.dist(other.position) < d:\n angle = atan2(dP.y, dP.x) # pi/2 - angle de dP\n #angle = PVector.angleBetween(PVector(1,0),dP)\n\n T = PVector.fromAngle(angle)\n T.setMag(d)\n \n A = T - dP\n A *= bob_damp\n\n \n self.velocity.sub(other.masse/(other.masse+self.masse)*A)\n other.velocity.add(self.masse/(other.masse+self.masse)*A)\n \n \n \n def FollowMouse(self):\n if mousePressed:\n ax = -(mouseX-self.position.x)\n ay = -(mouseY-self.position.y)\n return PVector(0,0)\n else:\n ax = mouseX-self.position.x\n ay = mouseY-self.position.y\n return PVector(ax,ay).setMag(MouseMag)\n \n def FollowObjective(self,i):\n # if mousePressed:\n # ax = self.obj.x - self.position.x\n # ay = self.obj.y - self.position.y\n # return PVector(ax,ay).setMag(ObjMag)\n # else:\n # return PVector(0,0)\n if i > 0.2*savedlen:\n ax = self.obj.x - self.position.x\n ay = self.obj.y - self.position.y\n return PVector(ax,ay).setMag(ObjMag)\n else:\n return PVector(0,0)\n \n \n def intersection(self, ground):\n L = ground.a \n E = ground.b \n C = self.position \n d = E.copy().sub(L) \n r = self.radius \n f = C.copy().sub(E)\n a = d.dot(d) \n b = 2*f.dot(d) \n c = f.dot(f) - r*r \n delta = b*b -4*a*c \n t1 = (-b - sqrt(delta))/(2*a) \n t2 = (-b + sqrt(delta))/(2*a) \n T1 = E.copy().add(d.copy().mult(-t1)) \n T2 = E.copy().add(d.copy().mult(-t2)) \n \n return (delta > 0 and t1 >= 0 and t1 <= 1 or (t2 >= 0 and t2 <= 1))\n\n \nclass Ground(object):\n\n def __init__(self, x1, y1, x2, y2):\n self.a = PVector(x1, y1)\n self.b = PVector(x2, y2)\n self.x = (self.a.x + self.b.x) / 2\n self.y = (self.a.y + self.b.y) / 2\n self.lon = dist(self.a.x, self.a.y, self.b.x, self.b.y)\n self.rot = atan2((self.b.y - self.a.y), (self.b.x - self.a.x))\n","sub_path":"archives/new_baked_bob.pyde","file_name":"new_baked_bob.pyde","file_ext":"pyde","file_size_in_byte":6672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"128802525","text":"from django import forms\n\nfrom .models import FeatureRequest\n\n\nclass FeatureRequestForm(forms.ModelForm):\n\n class Meta:\n model = FeatureRequest\n fields = [\n 'title',\n 'description',\n 'client',\n 'priority',\n 'target_date',\n 'ticket_url',\n 'product_area'\n ]\n\n widgets = {\n 'title': forms.TextInput(attrs={\n 'placeholder': 'Enter Title',\n 'data-bind': \"value: title\"\n }),\n 'description': forms.Textarea(attrs={\n 'placeholder': 'Enter Description',\n 'data-bind': 'value: description'\n }),\n 'client': forms.Select(attrs={\n 'data-bind': \"\"\"\n options: clients,\n optionsText: 'name',\n optionsValue: 'id',\n value: selectedClient,\n optionsAfterRender: setOptionAsDisabled\n \"\"\",\n 'class': 'left'\n }),\n 'priority': forms.NumberInput(attrs={\n 'min': 1,\n 'placeholder': 'Enter Priority',\n 'data-bind': 'attr: {max: maxPriority}, value: priority',\n 'class': 'right'\n }),\n 'target_date': forms.DateTimeInput(attrs={\n 'type': 'date',\n 'data-bind': 'value: date'\n }),\n 'ticket_url': forms.TextInput(attrs={\n 'placeholder': 'Enter Ticket URL',\n 'data-bind': 'value: url'\n }),\n 'product_area': forms.Select(attrs={\n 'data-bind': \"\"\"\n options: productArea,\n optionsText: 'name',\n optionsValue: 'id',\n value: selectedArea,\n optionsAfterRender: setOptionAsDisabled\n \"\"\"\n }),\n }\n","sub_path":"feature_request_app/create/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"601909349","text":"from django.conf.urls import url\nfrom views import demo_template, couleur_template , post_upload,\\\n get_static_img, get_couleur_img, find_js\nfrom advertise import process_clicked_ad, retrieve_ad, find_ad_img\n\nurlpatterns = [\n url(r'^getAd/(.*)$', process_clicked_ad, name='get_ads'),\n url(r'^retrieve_ad$', retrieve_ad, name='get_ads'),\n url(r'^ads/(.*)', get_static_img, name='get_static_img'),\n url(r'^demo/upload$', post_upload, name='post_upload'),\n url(r'^demo$', demo_template, name='demo_template'),\n url(r'^demo/img/(.*)', get_couleur_img, name='get_couleur_img'),\n url(r'^demo/(.*)$', couleur_template, name='couleur_template'),\n url(r'(.js$)', find_js, name='find_js'),\n url(r'^media/ads/(.*)$', find_ad_img, name='find_js'),\n]\n","sub_path":"advertisement_service/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"422441891","text":"# Use the Azure Machine Learning data collector to log accuracy\nfrom azureml.logging import get_azureml_logger\nlogger = get_azureml_logger()\n\nfrom sklearn import datasets\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom pyspark import SparkContext\n\n# from sklearn.grid_search import GridSearchCV\n# Use spark_sklearn’s grid search instead:\nfrom spark_sklearn import GridSearchCV\nimport pandas as pd\n\nclass timeit():\n from datetime import datetime\n def __enter__(self):\n self.tic = self.datetime.now()\n def __exit__(self, *args, **kwargs):\n print('runtime: {}'.format(self.datetime.now() - self.tic))\n\nsc = SparkContext.getOrCreate()\n\ndigits = datasets.load_digits()\nx, y = digits.data, digits.target\n\n# Create hold-out test dataset\nx_train,x_test,y_train,y_test = train_test_split(x,y, test_size=0.25)\n\nparam_grid = {\"max_depth\": [3, None],\n \"max_features\": [1, 3, 10],\n \"min_samples_leaf\": [1, 3, 10],\n \"bootstrap\": [True, False],\n \"criterion\": [\"gini\", \"entropy\"],\n \"n_estimators\": [10, 20, 40, 80]}\n\ngs = GridSearchCV(sc=sc, estimator=RandomForestClassifier(), cv=4,param_grid=param_grid,refit=True)\n\nwith timeit():\n gs.fit(x_train, y_train)\n\nresults = pd.DataFrame(gs.cv_results_)\nprint(results.sort_values(['mean_test_score'],ascending=False)[0:10])\n\n# Validate accuracy of best model against hold-out data\nbest_model = gs.best_estimator_\ntest_accuracy = best_model.score(x_test,y_test)\nprint(test_accuracy)\n\nlogger.log('Best model accuracy',test_accuracy)\n","sub_path":"sweep_spark.py","file_name":"sweep_spark.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"343698650","text":"# Copyright 2015 Red Hat, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport inspect\nimport sys\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\n\nfrom grafana_dashboards.builder import Builder\nfrom grafana_dashboards import config\n\nCONF = cfg.CONF\nLOG = logging.getLogger(__name__)\n\n\nclass Commands(object):\n\n def __init__(self):\n self.builder = Builder()\n\n def execute(self):\n exec_method = getattr(self, CONF.action.name)\n args = inspect.getargspec(exec_method)\n args.args.remove('self')\n kwargs = {}\n for arg in args.args:\n kwargs[arg] = getattr(CONF.action, arg)\n exec_method(**kwargs)\n\n def update(self, path):\n self.builder.update_dashboard(path)\n\n\ndef main():\n\n def add_command_parsers(subparsers):\n parser_update = subparsers.add_parser('update')\n parser_update.add_argument(\n 'path', help='colon-separated list of paths to YAML files or'\n ' directories')\n\n CONF.register_cli_opt(\n cfg.SubCommandOpt('action', handler=add_command_parsers))\n logging.register_options(CONF)\n logging.setup(CONF, 'grafana-dashboard')\n config.prepare_args(sys.argv)\n\n Commands().execute()\n sys.exit(0)\n","sub_path":"grafana_dashboards/cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"563107264","text":"\"\"\"\r\nCreates a GUI window to calibrate energy for an Mca.\r\n\r\nAuthor: Mark Rivers\r\nCreated: Sept. 18, 2002\r\nModifications:\r\n\"\"\"\r\nfrom Tkinter import *\r\nimport copy\r\nimport tkMessageBox\r\nimport Pmw\r\nimport Mca\r\nimport Numeric\r\nimport math\r\nimport Xrf\r\nimport CARSMath\r\nimport BltPlot\r\n\r\n############################################################\r\nclass mcaCalibrateEnergy_widgets:\r\n \"\"\" Private class \"\"\"\r\n def __init__(self, nrois):\r\n self.use_flag = range(nrois)\r\n self.centroid = range(nrois)\r\n self.fwhm = range(nrois)\r\n self.energy = range(nrois)\r\n self.energy_diff = range(nrois)\r\n self.line = range(nrois)\r\n\r\nclass mcaCalibrateEnergy:\r\n def __init__(self, mca, command=None):\r\n \"\"\"\r\n Creates a new GUI window for calibrating energy for an Mca object.\r\n\r\n Inputs:\r\n mca:\r\n An Mca instance to be calibrated. The Mca must have at least 2\r\n Regions of Interest (ROIs) defined for a linear calibration and\r\n 2 ROIs defined for a quadratic calibration.\r\n\r\n Keywords:\r\n command:\r\n A callback command that will be executed if the OK button on\r\n the GUI window is pressed. The callback will be invoked as:\r\n command(exit_status)\r\n where exit_status is 1 if OK was pressed, and 0 if Cancel was\r\n pressed or the window was closed with the window manager.\r\n\r\n Procedure:\r\n The calibration is done by determining the centroid position and\r\n energy of each ROI.\r\n\r\n The centroids positions are computed by fitting the\r\n ROI counts to a Gaussian, using CARSMath.fit_gaussian.\r\n\r\n The energy the ROI can be entered manually in the GUI window, or it\r\n can be determined automatically if the label of the ROI can be\r\n successfully used in Xrf.lookup_xrf_line() or Xrf.lookup_gamma_line().\r\n\r\n Each ROI can be selectively used or omitted when doing the calibration.\r\n\r\n The errors in the energy calibration and the FWHM of each ROI as a\r\n function of energy, can be plotted using BltPlot.\r\n \"\"\"\r\n self.input_mca = mca\r\n self.mca = copy.copy(mca)\r\n self.exit_command = command\r\n self.roi = self.mca.get_rois()\r\n self.nrois = len(self.roi)\r\n if (self.nrois < 2):\r\n tkMessageBox.showerror(title='mcaCalibrateEnergy Error', \r\n message='Must have at least two ROIs to perform calibration')\r\n return\r\n self.calibration = self.mca.get_calibration()\r\n self.fwhm_chan = Numeric.zeros(self.nrois, Numeric.Float)\r\n self.widgets = mcaCalibrateEnergy_widgets(self.nrois)\r\n self.data = self.mca.get_data()\r\n\r\n # Compute the centroid and FWHM of each ROI\r\n for i in range(self.nrois):\r\n left = self.roi[i].left\r\n right = self.roi[i].right+1\r\n total_counts = self.data[left:right]\r\n n_sel = right - left\r\n sel_chans = left + Numeric.arange(n_sel)\r\n left_counts = self.data[left]\r\n right_counts = self.data[right]\r\n bgd_counts = (left_counts + Numeric.arange(float(n_sel))/(n_sel-1) *\r\n (right_counts - left_counts))\r\n net_counts = total_counts - bgd_counts\r\n net = Numeric.sum(net_counts)\r\n \r\n if ((net > 0.) and (n_sel >= 3)):\r\n amplitude, centroid, fwhm = CARSMath.fit_gaussian(sel_chans, net_counts)\r\n self.roi[i].centroid = centroid\r\n self.fwhm_chan[i] = fwhm\r\n else:\r\n self.roi[i].centroid = (left + right)/2.\r\n self.fwhm_chan[i] = right-left\r\n self.roi[i].fwhm = (self.mca.channel_to_energy(self.roi[i].centroid + \r\n self.fwhm_chan[i]/2.) - \r\n self.mca.channel_to_energy(self.roi[i].centroid - \r\n self.fwhm_chan[i]/2.))\r\n\r\n self.widgets.top = t = Pmw.Dialog(command=self.menu_ok_cancel,\r\n buttons=('OK', 'Apply', 'Cancel'),\r\n title='mcaCalibrateEnergy')\r\n top = t.component('dialogchildsite')\r\n box = Frame(top, borderwidth=1, relief=SOLID); box.pack(fill=X, pady=3)\r\n t = Label(box, text='ROI'); t.grid(row=0, column=0)\r\n t = Label(box, text='Use?'); t.grid(row=0, column=1)\r\n t = Label(box, text='Centroid'); t.grid(row=0, column=2)\r\n t = Label(box, text='FWHM'); t.grid(row=0, column=3)\r\n t = Label(box, text='Energy'); t.grid(row=0, column=4)\r\n t = Label(box, text='Fluor. line'); t.grid(row=0, column=5)\r\n t = Label(box, text='Energy diff.'); t.grid(row=0, column=6)\r\n text_width=10\r\n for i in range(self.nrois):\r\n row=i+1\r\n t = Label(box, text=str(i)); \r\n t.grid(row=row, column=0)\r\n self.widgets.use_flag[i] = t = Pmw.OptionMenu(box,\r\n items=('No','Yes'),\r\n initialitem = self.roi[i].use,\r\n command=lambda e, s=self, r=i: s.menu_use(e,r))\r\n t.grid(row=row, column=1)\r\n self.widgets.centroid[i] = t = Pmw.EntryField(box, \r\n value=('%.3f' % self.roi[i].centroid),\r\n entry_width=text_width, entry_justify=CENTER, \r\n command=lambda s=self, r=i: s.menu_centroid(r))\r\n t.grid(row=row, column=2)\r\n self.widgets.fwhm[i] = t = Label(box, \r\n text=('%.3f' % self.roi[i].fwhm), width=text_width,\r\n justify=CENTER, borderwidth=1, relief=SOLID)\r\n t.grid(row=row, column=3)\r\n # If the ROI energy is zero, then try to use the label to lookup an\r\n # XRF line energy\r\n if (self.roi[i].energy == 0.0):\r\n self.roi[i].energy = Xrf.lookup_xrf_line(self.roi[i].label)\r\n if (self.roi[i].energy == None):\r\n self.roi[i].energy = Xrf.lookup_gamma_line(self.roi[i].label)\r\n if (self.roi[i].energy == None): self.roi[i].energy=0.0\r\n self.widgets.energy[i] = t = Pmw.EntryField(box, \r\n value=('%.3f' % self.roi[i].energy),\r\n entry_width=text_width, entry_justify=CENTER, \r\n command=lambda s=self, r=i: s.menu_energy(r))\r\n t.grid(row=row, column=4)\r\n self.widgets.line[i] = t = Pmw.EntryField(box, \r\n value=str(self.roi[i].label),\r\n entry_width=text_width, entry_justify=CENTER, \r\n command=lambda s=self, r=i: s.menu_line(r))\r\n t.grid(row=row, column=5)\r\n\r\n self.widgets.energy_diff[i] = t = Label(box, \r\n text=('%.3f' % 0.0), width=text_width,\r\n justify=CENTER, borderwidth=1, relief=SOLID)\r\n t.grid(row=row, column=6)\r\n\r\n row = Frame(top, borderwidth=1, relief=SOLID); row.pack(fill=X, pady=3)\r\n self.widgets.fit_type = t = Pmw.OptionMenu(row, labelpos=N,\r\n label_text='Calibration type:',\r\n items=('Linear','Quadratic'))\r\n t.pack(side=LEFT, anchor=S)\r\n self.widgets.do_fit = t = Button(row, text='Compute calibration', \r\n command=self.menu_do_fit)\r\n t.pack(side=LEFT, anchor=S)\r\n self.widgets.plot_cal = t = Button(row, text='Plot calibration error',\r\n command=self.menu_plot_calibration)\r\n t.pack(side=LEFT, anchor=S)\r\n self.widgets.plot_fwhm = t = Button(row, text='Plot FWHM',\r\n command=self.menu_plot_fwhm)\r\n t.pack(side=LEFT, anchor=S)\r\n\r\n row = Frame(top, borderwidth=1, relief=SOLID); row.pack(fill=X, pady=3)\r\n text_width=10\r\n t = Label(row, text='Calibration coefficients'); t.pack()\r\n self.widgets.cal_units = t = Pmw.EntryField(row, \r\n label_text='Units:', labelpos=W,\r\n value=self.calibration.units,\r\n entry_width=text_width, entry_justify=CENTER)\r\n t.pack(side=LEFT)\r\n self.widgets.cal_offset = t = Pmw.EntryField(row, \r\n label_text='Offset:', labelpos=W,\r\n value=self.calibration.offset,\r\n entry_width=text_width, entry_justify=CENTER)\r\n t.pack(side=LEFT)\r\n self.widgets.cal_slope = t = Pmw.EntryField(row, \r\n label_text='Slope:', labelpos=W,\r\n value=self.calibration.slope,\r\n entry_width=text_width, entry_justify=CENTER)\r\n t.pack(side=LEFT)\r\n self.widgets.cal_quad = t = Pmw.EntryField(row, \r\n label_text='Quadratic:', labelpos=W,\r\n value=self.calibration.quad,\r\n entry_width=text_width, entry_justify=CENTER)\r\n t.pack(side=LEFT)\r\n\r\n def menu_plot_calibration(self):\r\n \"\"\" Private method \"\"\"\r\n energy = []\r\n energy_diff = []\r\n energy_use = []\r\n energy_diff_use = []\r\n for i in range(self.nrois):\r\n energy.append(self.roi[i].energy)\r\n energy_diff.append(self.roi[i].energy -\r\n self.mca.channel_to_energy(self.roi[i].centroid))\r\n if (self.roi[i].use):\r\n energy_use.append(energy[i])\r\n energy_diff_use.append(energy_diff[i])\r\n p = BltPlot.BltPlot(energy, energy_diff, \r\n title='MCA Calibration', legend=1,\r\n symbol='circle', linewidth=0,\r\n xtitle='Energy', ytitle='Calibration error', \r\n label='All points')\r\n p.oplot(energy_use, energy_diff_use, \r\n symbol=\"square\", linewidth=1,\r\n label='Points used')\r\n\r\n def menu_plot_fwhm(self):\r\n \"\"\" Private method \"\"\"\r\n energy = []\r\n fwhm = []\r\n for i in range(self.nrois):\r\n energy.append(self.roi[i].energy)\r\n fwhm.append(self.roi[i].fwhm)\r\n p = BltPlot.BltPlot(energy, fwhm, title='MCA FWHM',\r\n symbol='circle', linewidth=1, \r\n xtitle='Energy', ytitle='FWHM')\r\n\r\n def menu_energy(self, roi):\r\n \"\"\" Private method \"\"\"\r\n energy = float(self.widgets.energy[roi].get())\r\n self.roi[roi].energy = energy\r\n self.widgets.energy[roi].setentry('%.3f' % energy)\r\n\r\n def menu_centroid(self, roi):\r\n \"\"\" Private method \"\"\"\r\n centroid = float(self.widgets.centroid[roi].get())\r\n self.roi[roi].centroid = centroid\r\n self.widgets.centroid[roi].setentry('%.3f' % centroid)\r\n\r\n def menu_use(self, value, roi):\r\n \"\"\" Private method \"\"\"\r\n self.roi[roi].use = (value == 'Yes')\r\n\r\n def menu_line(self, roi):\r\n \"\"\" Private method \"\"\"\r\n line = self.widgets.line[roi].get()\r\n energy = Xrf.lookup_xrf_line(line)\r\n if (energy == None):\r\n energy = Xrf.lookup_gamma_line(line)\r\n if (energy != None): \r\n self.roi[roi].energy = energy\r\n self.widgets.energy[roi].setentry('%.3f' % energy)\r\n\r\n def menu_do_fit(self):\r\n \"\"\" Private method \"\"\"\r\n degree = self.widgets.fit_type.index(\r\n self.widgets.fit_type.getcurselection()) + 1\r\n use = []\r\n for i in range(self.nrois):\r\n if (self.roi[i].use): use.append(i)\r\n nuse = len(use)\r\n if ((degree == 1) and (nuse < 2)):\r\n tkMessageBox.showerror(title='mcaCalibateEnergy Error', \r\n message='Must have at least two valid points for linear calibration')\r\n return\r\n elif ((degree == 2) and (nuse < 3)):\r\n tkMessageBox.showerror(title='mcaCalibateEnergy Error', \r\n message='Must have at least three valid points for quadratic calibration')\r\n return\r\n chan=Numeric.zeros(nuse, Numeric.Float)\r\n energy=Numeric.zeros(nuse, Numeric.Float)\r\n weights=Numeric.ones(nuse, Numeric.Float)\r\n for i in range(nuse):\r\n chan[i] = self.roi[use[i]].centroid\r\n energy[i] = self.roi[use[i]].energy\r\n coeffs = CARSMath.polyfitw(chan, energy, weights, degree)\r\n self.calibration.offset = coeffs[0]\r\n self.widgets.cal_offset.setentry(str(self.calibration.offset))\r\n self.calibration.slope = coeffs[1]\r\n self.widgets.cal_slope.setentry(str(self.calibration.slope))\r\n if (degree == 2):\r\n self.calibration.quad = coeffs[2]\r\n else:\r\n self.calibration.quad = 0.0\r\n self.widgets.cal_quad.setentry(str(self.calibration.quad))\r\n self.mca.set_calibration(self.calibration)\r\n for i in range(self.nrois):\r\n energy_diff = (self.roi[i].energy -\r\n self.mca.channel_to_energy(self.roi[i].centroid))\r\n self.widgets.energy_diff[i].configure(text=('%.4f' % energy_diff))\r\n # Recompute FWHM\r\n self.roi[i].fwhm = (self.mca.channel_to_energy(self.roi[i].centroid + \r\n self.fwhm_chan[i]/2.) - \r\n self.mca.channel_to_energy(self.roi[i].centroid -\r\n self.fwhm_chan[i]/2.))\r\n self.widgets.fwhm[i].configure(text=('%.3f' % self.roi[i].fwhm))\r\n\r\n def menu_ok_cancel(self, button):\r\n \"\"\" Private method \"\"\"\r\n if (button == 'OK') or (button == 'Apply'):\r\n # Copy calibration and rois to input mca object\r\n self.input_mca.set_calibration(self.calibration)\r\n self.input_mca.set_rois(self.roi)\r\n if (button == 'OK'):\r\n exit_status=1\r\n elif (button == 'Apply'):\r\n return\r\n else:\r\n exit_status = 0\r\n if (self.exit_command): self.exit_command(exit_status)\r\n self.widgets.top.destroy()\r\n","sub_path":"gsecars/mcaCalibrateEnergy.py","file_name":"mcaCalibrateEnergy.py","file_ext":"py","file_size_in_byte":14011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"546092889","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/bpiwowar/development/datasets/datasets/__main__.py\n# Compiled at: 2016-11-18 05:48:09\n# Size of source mod 2**32: 3293 bytes\nimport argparse, sys, logging\ntry:\n from argcomplete import autocomplete\nexcept:\n autocomplete = lambda x: None\n\nshare_dir = '/Users/bpiwowar/development/datasets/share'\nparser = argparse.ArgumentParser(description='datasets manager')\nparser.add_argument('--verbose', action='store_true', help='Be verbose')\nparser.add_argument('--debug', action='store_true', help='Be even more verbose')\nparser.add_argument('--configuration', help='Directory containing the configuration files', default=share_dir)\nsubparsers = parser.add_subparsers(help='sub-command help', dest='command')\nsubparsers.add_parser('info', help='Information about ircollections')\nsubparsers.add_parser('search', help='Search all the registered datasets')\nprepare_parser = subparsers.add_parser('prepare', help='Prepare a dataset')\nget_parser = subparsers.add_parser('get', help='Prepare a dataset')\nfor p in [prepare_parser, get_parser]:\n prepare_parser.add_argument('dataset', nargs=1, help='The dataset ID')\n prepare_parser.add_argument('args', nargs='*', help='Arguments for the preparation')\n\nautocomplete(parser)\nargs = parser.parse_args()\nif args.command is None:\n parser.print_help()\n sys.exit()\nif args.verbose:\n logging.getLogger().setLevel(logging.INFO)\nif args.debug:\n logging.getLogger().setLevel(logging.DEBUG)\nimport os\nfrom os.path import join\nimport yaml\n\ndef readyaml(path):\n with open(path) as (f):\n return yaml.load(f)\n\n\ndef configpath(args):\n return join(args.configuration, 'config')\n\n\ndef datapath(args):\n return join(args.configuration, 'data')\n\n\ndef command_search(args):\n cpath = configpath(args)\n for root, dirs, files in os.walk(cpath, topdown=False):\n index = join(root, 'index.yaml')\n prefix = os.path.relpath(root, cpath)\n if os.path.exists(index):\n index = readyaml(index)\n if 'files' in index:\n for relpath in index['files']:\n path = join(root, '%s.yaml' % relpath)\n data = readyaml(path)\n if data is not None and 'data' in data:\n for d in data['data']:\n if type(d['id']) == list:\n for _id in d['id']:\n print('%s.%s' % (prefix, _id))\n\n else:\n print('%s.%s' % (prefix, d['id']))\n\n else:\n logging.warn('No data defined in %s' % path)\n\n\ntry:\n fname = 'command_%s' % args.command.replace('-', '_')\n f = globals()[fname]\n f(args)\nexcept Exception as e:\n sys.stderr.write('Error while running command %s:\\n' % args.command)\n sys.stderr.write(str(e))\n if args.debug:\n import traceback\n sys.stderr.write(traceback.format_exc())","sub_path":"pycfiles/datamaestro-0.6.13-py3-none-any/__main__.cpython-35.py","file_name":"__main__.cpython-35.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"26805411","text":"import json\nimport boto3\nfrom site_renderer import SiteRenderer\n\ns3_client = boto3.client('s3')\n\nS3_BUCKET = 'whatsatabes.com'\nFILENAME = 'index.html'\n\ndef lambda_handler(event, context):\n renderer = SiteRenderer()\n rendered_output = renderer.render()\n\n s3_client.put_object(\n ACL='public-read',\n ContentType='text/html',\n Body=rendered_output,\n Bucket=S3_BUCKET,\n Key=FILENAME)\n","sub_path":"src/whats-at-abes/page_renderer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"148740471","text":"# http://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM, Activation\nfrom sklearn.preprocessing import MinMaxScaler\nimport os\n\n# brew install graphviz\n# pip3 install graphviz\n# pip3 install pydot\nfrom keras.utils.visualize_util import plot\n\nimport matplotlib.pyplot as plt\n\ntimesteps = seq_length = 7\ndata_dim = 5\n\n# Open,High,Low,Close,Volume\nxy = np.loadtxt('data-02-stock_daily.csv', delimiter=',')\nxy = xy[::-1] # reverse order (chronically ordered)\n\n# very important. It does not work without it.\nscaler = MinMaxScaler(feature_range=(0, 1))\nxy = scaler.fit_transform(xy)\n\nx = xy\ny = xy[:, [-1]] # Close as label\n\ndataX = []\ndataY = []\nfor i in range(0, len(y) - seq_length):\n _x = x[i:i + seq_length]\n _y = y[i + 1] # Next close price\n print(_x, \"->\", _y)\n dataX.append(_x)\n dataY.append(_y)\n\n# split to train and testing\ntrain_size = int(len(dataY) * 0.7)\ntest_size = len(dataY) - train_size\ntrainX, testX = np.array(dataX[0:train_size]), np.array(\n dataX[train_size:len(dataX)])\ntrainY, testY = np.array(dataY[0:train_size]), np.array(\n dataY[train_size:len(dataY)])\n\nmodel = Sequential()\nmodel.add(LSTM(1, input_shape=(seq_length, data_dim), return_sequences=False))\n# model.add(Dense(1))\nmodel.add(Activation(\"linear\"))\nmodel.compile(loss='mean_squared_error', optimizer='adam')\n\nmodel.summary()\n\n# Store model graph in png\n# plot(model, to_file=os.path.basename(__file__) + '.png', show_shapes=True)\n\nprint(trainX.shape, trainY.shape)\nmodel.fit(trainX, trainY, nb_epoch=200)\n\n# make predictions\ntestPredict = model.predict(testX)\n\n# inverse values\n# testPredict = scaler.transform(testPredict)\n# testY = scaler.transform(testY)\n\n# print(testPredict)\nplt.plot(testY)\nplt.plot(testPredict)\nplt.show()\n","sub_path":"klab-12-3-rnn_stock_prediction.py","file_name":"klab-12-3-rnn_stock_prediction.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"55222164","text":"# File: pamfun.py\n# Functions for pulse amplitude modulation (PAM)\nfrom pylab import *\ndef pam10(an, FB, Fs, ptype, pparms=[]):\n\t\"\"\"\n\tPulse amplitude modulation: a_n -> s(t), -TB/2<=t<(N-1/2)*TB,\n\tV1.0 for ’rect’, ’sinc’, and ’tri’ pulse types.\n\t>>>>> tt, st = pam10(an, FB, Fs, ptype, pparms) <<<<<\n\twhere an:\n\tN-symbol DT input sequence a_n, 0 <= n < N\n\tFB:\n\tBaud rate of a_n, TB=1/FB\n\tFs:\n\tsampling rate of s(t)\n\tptype: pulse type (’rect’,’sinc’,’tri’)\n\tpparms not used for ’rect’,’tri’\n\tpparms = [k, beta] for ’sinc’\n\tk:\n\t\"tail\" truncation parameter for ’sinc’\n\t(truncates p(t) to -k*TB <= t < k*TB)\n\tbeta: Kaiser window parameter for ’sinc’\n\ttt:\n\ttime axis for s(t), starts at -TB/2\n\tst:\n\tCT output signal s(t), -TB/2<=t<(N-1/2)*TB,\n\twith sampling rate Fs\n\t\"\"\"\n\tN = len(an)\t\t\t# Number of data symbols\n\tTB = 1/float(FB)\t\t# Time per symbol\n\tixL = ceil(-Fs*0.5*TB)\t\t# Left index for time axis\n\tixR = ceil(Fs*(N-0.5)*TB) \t# Right index for time axis\n\ttt = arange(ixL,ixR)/float(Fs) \t# Time axis for s(t)\n\t\n\t# ***** Conversion from DT a_n to CT a_s(t) *****\n\tast = zeros(len(tt))\t\t# Initialize a_s(t)\n\tix = array(around(Fs*arange(0,N)*TB),int)\t# Symbol center indexes\n\tast[ix-int(ixL)] = Fs*an\t# delta_n -> delta(t) conversion\n\t\n\t# ***** Set up PAM pulse p(t) *****\n\tptype = ptype.lower()\t# Convert ptype to lowercase\n\t\n\t# Set left/right limits for p(t)\n\tif (ptype=='rect'):\n\t\tkL = -0.5; kR = -kL\n\telse:\n\t\tkL = -1.0; kR = -kL\n\t\n\t# Default left/right limits\n\tixpL = ceil(Fs*kL*TB)\t\t# Left index for p(t) time axis\n\tixpR = ceil(Fs*kR*TB)\t\t# Right index for p(t) time axis\n\tttp = arange(ixpL,ixpR)/float(Fs)\t # Time axis for p(t)\n\tpt = zeros(len(ttp))\t\t# Initialize pulse p(t)\n\tif (ptype=='rect'):\t\t# Rectangular p(t)\n\t\tix = where(logical_and(ttp>=kL*TB, ttp<kR*TB))[0]\n\t\tpt[ix] = ones(len(ix))\n\telif (ptype == 'tri'):\n\t\tpt = array([(1+i*1/TB) if i*1/TB+1 < 1.0 else (1-i*1/TB) for i in list(ttp)])\n\telif (ptype=='sinc'):\n\t\tk=pparms[0]\n\t\tkL = -1.0*k; kR = -kL*k\n\t\tixpL = ceil(Fs*kL*TB)\t\t# Left index for p(t) time axis\n\t\tixpR = ceil(Fs*kR*TB)\t\t# Right index for p(t) time axis\n\t\tttp = arange(ixpL,ixpR)/float(Fs)\t # Time axis for p(t)\n\t\tpt = zeros(len(ttp))\n\t\tbeta=pparms[1]\n\t\tpt = array([sin(pi*t/TB)/(pi*t/TB) if t!= 0 else 1.0 for t in list(ttp)])\n\t\tpt=pt*kaiser(len(pt), beta)\n\telse:\n\t\tprint(\"ptype ’%s’ is not recognized\" % ptype)\n\t\n\t# ***** Filter with h(t) = p(t) *****\n\tst = convolve(ast,pt)/float(Fs) \t# s(t) = a_s(t)*p(t)\n\tst = st[-ixpL:ixR-ixL-ixpL] \t\t# Trim after convolution\n\treturn tt, st\n","sub_path":"pamfun.py","file_name":"pamfun.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"245997211","text":"import re\nimport typing as tp\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef extract_news(parser):\n \"\"\" Extract news from a given web page \"\"\"\n news_list = []\n\n authors = [i.text for i in parser.body.find_all(\"a\", {\"class\": \"hnuser\"})]\n comments = [\n i for i in parser.body.find_all(\"a\") if \"item?id=\" in i.attrs[\"href\"]\n ]\n comments = [\n i.text for i in comments if (re.match(r\"\\d+\\scomment\", i.text) or i.text == \"discuss\")\n ]\n comment_counts = [\n int(i[: i.find(\"\\xa0\")]) if not \"discuss\" in i else 0 for i in comments\n ]\n points = [\n int(i.text[: i.text.find(\" \")]) for i in parser.body.find_all(\"span\", {\"class\": \"score\"})\n ]\n titles = [i.text for i in parser.body.find_all(\"a\", {\"class\": \"storylink\"})]\n urls = [\n i.attrs[\"href\"] for i in parser.body.find_all(\"a\", {\"class\": \"storylink\"})\n ]\n\n for i, _ in enumerate(authors):\n extract = {\n \"author\": authors[i],\n \"comments\": comment_counts[i],\n \"points\": points[i],\n \"title\": titles[i],\n \"url\": urls[i],\n }\n news_list.append(extract)\n\n return news_list\n\n\ndef extract_next_page(parser):\n \"\"\" Extract next page URL \"\"\"\n morelink = parser.body.find(\"a\", {\"class\": \"morelink\"}).attrs[\"href\"]\n return morelink\n\n\ndef get_news(url, n_pages = 1):\n \"\"\" Collect news from a given web page \"\"\"\n news = []\n while n_pages:\n print(\"Collecting data from page: {}\".format(url))\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n news_list = extract_news(soup)\n next_page = extract_next_page(soup)\n url = \"https://news.ycombinator.com/\" + next_page\n news.extend(news_list)\n n_pages -= 1\n return news\n","sub_path":"homework06/naive_bayes/scraputils.py","file_name":"scraputils.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"410173854","text":"import unittest\n# from pudb import set_trace; set_trace()\n\nfrom tosparql.sparql2lambdaDCS import sparQLquery2LambdaDCS\nfrom utils.tree_tools import tree_or_string\n\nclass Sparql2LambdaDCSTestCase(unittest.TestCase):\n def test_UnarySimple(self):\n inputSparQLquery = \"\"\"SELECT ?xB WHERE {\n?xB <name> \"oregon\" .\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(<name> 'oregon')\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_UnaryWrongVariable(self):\n # It would be useful to recognize that a SparQL query might be\n # malformed and raise a ValueError exception.\n inputSparQLquery = \"\"\"SELECT ?xA WHERE {\n?xB <name> \"oregon\" .\n}\"\"\"\n self.assertRaises(ValueError, sparQLquery2LambdaDCS, inputSparQLquery)\n\n def test_BinarySimple(self):\n inputSparQLquery = \"\"\"SELECT ?xB WHERE {\n?xB <name> ?xA .\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(<name> ?xA)\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_UnaryChain1(self):\n inputSparQLquery = \"\"\"SELECT ?xA WHERE {\n?xA <name> ?xB .\n?xB <city> \"Madrid\" .\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(<name> (<city> 'Madrid'))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_UnaryChain1TwoCalls(self):\n inputSparQLquery = \"\"\"SELECT ?xA WHERE {\n?xA <name> ?xB .\n?xB <city> \"Madrid\" .\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(<name> (<city> 'Madrid'))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n # Check that residual data from a previous call does not interfere with a new call\n inputSparQLquery = \"\"\"SELECT ?xD WHERE {\n?xD <name> ?xC .\n?xC <city> \"Madrid\" .\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(<name> (<city> 'Madrid'))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_UnaryChain1Reversed(self):\n inputSparQLquery = \"\"\"SELECT ?xA WHERE {\n?xA <name> ?xB .\n\"Madrid\" <city> ?xB .\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(<name> (!<city> 'Madrid'))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_UnaryChainSwappedStatements(self):\n inputSparQLquery = \"\"\"SELECT ?xA WHERE {\n?xB <city> \"Madrid\" .\n?xA <name> ?xB .\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(<name> (<city> 'Madrid'))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_UnaryChain2(self):\n inputSparQLquery = \"\"\"SELECT ?xA WHERE {\n?xA <name> ?xB .\n?xB <city> ?xC .\n?xC <district> \"Vallecas\" .\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(<name> (<city> (<district> 'Vallecas')))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_AndUnary(self):\n inputSparQLquery = \"\"\"SELECT ?xA WHERE {\n?xA <name> \"Spain\" .\n?xA <city> \"Madrid\" .\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(and (<name> 'Spain') (<city> 'Madrid'))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_ArgmaxUnaryLargestLength(self):\n inputSparQLquery = \"\"\"SELECT ?xE WHERE {\n\"Colorado\" <river> ?xD .\n?xD <length> ?xE .\n}\nORDER BY DESC(?xE) LIMIT 1\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(!<length> (argmax 1 1 (!<river> 'Colorado') <length>))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_ArgmaxUnaryTwoLargestLength(self):\n inputSparQLquery = \"\"\"SELECT ?xE WHERE {\n?xD <type> \"river\" .\n?xD <flowsthru> \"colorado\" .\n?xD <length> ?xE .\n}\nORDER BY DESC(?xE) LIMIT 2\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(!<length> (argmax 1 2 (and (<type> 'river') (<flowsthru> 'colorado')) <length>))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_ArgmaxUnaryRiverWithLargestLength(self):\n inputSparQLquery = \"\"\"SELECT ?xD WHERE {\n?xD <type> \"river\" .\n?xD <flowsthru> \"colorado\" .\n?xD <length> ?xE .\n}\nORDER BY DESC(?xE) LIMIT 1\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(argmax 1 1 (and (<type> 'river') (<flowsthru> 'colorado')) <length>)\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_FilterWithConstant(self):\n # All rivers with length > 2000\n inputSparQLquery = \"\"\"SELECT ?xD WHERE {\n?xD <type> \"river\" .\n?xD <length> ?xE .\nFILTER ( ?xE > 2000 )\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(and (<type> 'river') (<length> (> 2000)))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n ## TODO: get a query with a variable, not a constant\n def test_FilterWithVariable(self):\n # All rivers with length > 2000\n inputSparQLquery = \"\"\"SELECT ?xD WHERE {\n?xD <type> \"river\" .\n?xD <length> ?xE .\nFILTER ( ?xE > 2000 )\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(and (<type> 'river') (<length> (> 2000)))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_simpleUnion(self):\n # All geographical entities with name \"north carolina\" or \"arkansas\"\n inputSparQLquery = \"\"\"SELECT ?xD WHERE {\n{ ?xD <name> \"atlanta\" .} UNION { ?xD <name> \"arkansas\" . }\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n expected_lambda_dcs_str = \"(or (<name> 'atlanta') (<name> 'arkansas'))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\n def test_simpleUnionSelVarOutside(self):\n # All rivers that flow through either utah or nevada\n inputSparQLquery = \"\"\"SELECT ?N WHERE {\n ?r <type> 'river' .\n ?r <name> ?N .\n{ ?r <flowsthru> \"montana\" . } UNION { ?r <flowsthru> \"idaho\" . }\n}\"\"\"\n lambda_dcs_str = sparQLquery2LambdaDCS(inputSparQLquery)\n ## TODO: The following is the optimized LambdaDCS query, but the algorithm returns the second one.\n ## TODO: Both are correct, but the first one is optimal\n # expected_lambda_dcs_str = \"(!<name> (and (<type> 'river') (or (<flowsthru> 'utah') (<flowsthru> 'nevada')))\"\n expected_lambda_dcs_str = \"(or (!<name> (and (<flowsthru> 'montana') (<type> 'river'))) (!<name> (and (<flowsthru> 'idaho') (<type> 'river'))))\"\n self.assertEqual(expected_lambda_dcs_str, lambda_dcs_str)\n\nif __name__ == '__main__':\n suite1 = unittest.TestLoader().loadTestsFromTestCase(Sparql2LambdaDCSTestCase)\n suites = unittest.TestSuite([suite1])\n unittest.TextTestRunner(verbosity=2).run(suites)\n\n","sub_path":"tosparql/sparql2lambdaDCS_test.py","file_name":"sparql2lambdaDCS_test.py","file_ext":"py","file_size_in_byte":7139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"512727744","text":"import csv\nfrom io import StringIO\nimport sys\nfrom typing import List\nfrom datetime import datetime\n\n\nfrom app.db import with_session\nfrom models.result_store import KeyValueStore\n\n# HACK: https://stackoverflow.com/questions/15063936/csv-error-field-larger-than-field-limit-131072\ncsv.field_size_limit(sys.maxsize)\n\n\n@with_session\ndef create_key_value_store(key, value, commit=True, session=None):\n return KeyValueStore.create(\n {\"key\": key, \"value\": value}, commit=commit, session=session\n )\n\n\n@with_session\ndef update_key_value_store(key, value, commit=True, session=None): # csv\n kvs = get_key_value_store(key, session=session)\n\n kvs.value = value\n kvs.updated_at = datetime.utcnow()\n\n if commit:\n session.commit()\n\n else:\n session.flush()\n kvs.id\n return kvs\n\n\n@with_session\ndef upsert_key_value_store(key, value, commit=True, session=None):\n kvp = get_key_value_store(key, session=session)\n if kvp:\n return update_key_value_store(key, value, commit, session=session)\n else:\n return create_key_value_store(key, value, commit, session=session)\n\n\n@with_session\ndef get_key_value_store(key, session=None):\n return KeyValueStore.get(session=session, key=key)\n\n\n@with_session\ndef delete_key_value_store(key, commit=True, session=None):\n item = get_key_value_store(key=key, session=session)\n if item:\n session.delete(item)\n if commit:\n session.commit()\n\n\ndef string_to_csv(raw_csv_str: str) -> List[List[str]]:\n # Remove NULL byte to make sure csv conversion works\n raw_csv_str = raw_csv_str.replace(\"\\x00\", \"\")\n result = []\n\n if len(raw_csv_str) > 0:\n raw_results = StringIO(raw_csv_str)\n csv_reader = csv.reader(raw_results, delimiter=\",\")\n result = [row for row in csv_reader]\n return result\n","sub_path":"querybook/server/logic/result_store.py","file_name":"result_store.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"147539167","text":"from cognac import actor, swig, log\n\n\n\"\"\"rapunzel.\"\"\"\n\n\n@actor\nclass rapunzel:\n \"\"\"rapunzel actor.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"initializes rapunzel class.\"\"\"\n self.dst = \"prince::prince\"\n self.src = \"rapunzel\"\n self.tpl = \"|: got : {}\"\n\n async def handler(self, msg):\n \"\"\"handles messages.\"\"\"\n log(self.tpl.format(msg), logger=\"both\")\n return await swig(self.src, self.dst, {})\n","sub_path":"examples/ranpunzel/rapunzel.py","file_name":"rapunzel.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"330771655","text":"import os\nhandler_path=os.path.dirname(os.path.abspath(__file__))+\"\\\\..\\\\Handler\"\n\nimport sys\nsys.path.append(handler_path)\nfrom ShiftWrapReportHandler import *\nfrom flask import render_template, url_for, redirect,request\nfrom flaskapp import flask_app\nfrom script.Form.forms import QueryForm,TlaForm\nfrom script.Log.log import *\nimport datetime\n\nimport json\nimport os\n\n\n@flask_app.route(\"/shiftwrap_report_view\",methods=[\"GET\",\"POST\"])\ndef shiftwrap_report_view():\n if request.method == 'GET':\n\n html_content = createhtmlbody()\n report_name = \"./templates/temp/shiftwrap_report_\" + str(datetime.datetime.now())[0:19].replace(':', '-').replace(\" \", \"-\") + \".html\"\n # report_name=\"static/shiftwrap_report.html\"\n html_file_standerby = open(report_name, \"w+\")\n print(html_content, file=html_file_standerby, flush=True)\n html_file_standerby.close()\n\n return render_template(\"shiftwrap_report_view.html\")\n else:\n form = request.form\n if \"send-inner\" in form.keys():\n sendmail()\n\n elif \"send-outer\" in form.keys():\n sendmail(\"outer\")\n\n return render_template(\"home.html\")\n\n\n\n\n@flask_app.route(\"/shiftwrap_report_create\",methods=[\"GET\",\"POST\"])\ndef shiftwrap_report_create():\n all_reports=[report for report in os.listdir(\"./templates/temp/\") if report.find(\"shiftwrap_report\") >=0 and os.path.isdir(report) ==False]\n if len(all_reports)>0:\n newest_report=all_reports[0]\n\n for report in all_reports[1:]:\n if report > newest_report:\n os.remove(\"./templates/temp/\" + newest_report)\n newest_report = report\n else:\n os.remove(\"./templates/temp/\" + report)\n return render_template(\"./temp/\" + newest_report)\n\n else:\n newest_report=\"No Data\"\n return newest_report\n","sub_path":"script/Controler/ShiftWrapReportControler.py","file_name":"ShiftWrapReportControler.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"233832414","text":"import json\nimport requests\n\n\nfrom zhihu_walker.common.config import zhihu_config\nfrom zhihu_walker.common.tools import flatten_dict\n\n\ndef parse_data():\n limit = 5\n offset = 0\n url = zhihu_config['url'] + f'&limit={limit}&offset={offset}'\n headers = json.loads(zhihu_config['headers'])\n response = requests.get(url, headers=headers)\n\n datas = response.json()['data']\n\n result = dict()\n for data in datas:\n flatten_dict(data, result)\n break\n return result\n\n\ndef generate_orm_code(result):\n\n for key, value in result.items():\n if isinstance(value, str):\n print(f'{key} = Column(String(255), nullable=False, default=None)')\n elif isinstance(value, int):\n print(f'{key} = Column(Integer, nullable=False, default=None)')\n elif isinstance(value, float):\n print(f'{key} = Column(Float(20, 5), nullable=False, default=None)')\n\n\ndef generate_sql(result):\n sql = 'create table zhihu_live ('\n\n res = list()\n for key, value in result.items():\n if isinstance(value, str):\n res.append(f'{key} varchar(255)')\n elif isinstance(value, int):\n res.append(f'{key} int(10)')\n elif isinstance(value, float):\n res.append(f'{key} decimal(15, 5)')\n sql += ', '.join(res)\n sql += ');'\n print(sql)\n\n\nif __name__ == '__main__':\n result = parse_data()\n # generate_orm_code(result)\n generate_sql(result)\n","sub_path":"scripts/generate_orm_code.py","file_name":"generate_orm_code.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"474891267","text":"import tkinter as tk\r\nfrom Page import *\r\nfrom MovieSearcher import *\r\nfrom DatabaseHandler import *\r\nimport urllib3\r\nfrom PIL import ImageTk, Image\r\nimport io\r\n\r\nclass Page2(Page):\r\n def __init__(self, *args, **kwargs):\r\n Page.__init__(self, *args, **kwargs)\r\n\r\n self.moviesPosters = []\r\n\r\n frameList = tk.Frame(self)\r\n\r\n self.listbox = tk.Listbox(frameList, width=60)\r\n self.listbox.grid(row=0, column=0)\r\n self.listbox.bind('<<ListboxSelect>>', self.onselect)\r\n\r\n frameList.grid(row = 0, column = 0, pady = (20,0), padx = (25,0), sticky=tk.N)\r\n\r\n def loadAllToWatchMoviesAndLift(self):\r\n databaseHandler = DatabaseHandler()\r\n conn = databaseHandler.establishConnection()\r\n c = databaseHandler.getCursor(conn)\r\n c.execute(\"SELECT * FROM users_movies WHERE user_id=? AND status='to_watch'\", (Page.loginedUser[0],))\r\n rows = c.fetchall()\r\n print(rows)\r\n conn.close()\r\n self.listbox.delete(0, tk.END)\r\n self.moviesPosters = []\r\n for r in rows:\r\n self.listbox.insert(tk.END, r[1])\r\n self.moviesPosters.append(r[5])\r\n self.show()\r\n\r\n def onselect(self, evt):\r\n w = evt.widget\r\n index = int(w.curselection()[0])\r\n\r\n http = urllib3.PoolManager()\r\n\r\n url = f'http://image.tmdb.org//t//p//w300/{self.moviesPosters[index]}'\r\n response = http.request('GET', url)\r\n\r\n im = Image.open(io.BytesIO(response.data))\r\n image = ImageTk.PhotoImage(im)\r\n label1 = tk.Label(self, image=image)\r\n label1.photo = image\r\n label1.grid(row=0, column=1, pady=(20,0), padx=(20,0))\r\n","sub_path":"page2.py","file_name":"page2.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"243430032","text":"# Pemavor.com Autocomplete Scraper\n# Author: Stefan Neefischer (stefan.neefischer@gmail.com)\n\nimport concurrent.futures\nimport pandas as pd\nimport itertools\nimport requests\nimport string\nimport json\nimport time\n\nstartTime = time.time()\n# If you use more than 50 seed keywords you should slow down your requests - otherwise google is blocking the script\n# If you have thousands of seed keywords use e.g. WAIT_TIME = 1 and MAX_WORKERS = 10\n\nWAIT_TIME = 0.1\nMAX_WORKERS = 20\n# set the autocomplete language\nlang = \"en\"\n\ncharList = \" \" + string.ascii_lowercase + string.digits\n\ndef makeGoogleRequest(query):\n # If you make requests too quickly, you may be blocked by google \n time.sleep(WAIT_TIME)\n URL=\"http://suggestqueries.google.com/complete/search\"\n PARAMS = {\"client\":\"firefox\",\n \"hl\":lang,\n \"q\":query}\n headers = {'User-agent':'Mozilla/5.0'}\n response = requests.get(URL, params=PARAMS, headers=headers)\n if response.status_code == 200:\n suggestedSearches = json.loads(response.content.decode('utf-8'))[1]\n return suggestedSearches\n else:\n return \"ERR\"\n\ndef getGoogleSuggests(keyword):\n # err_count1 = 0\n queryList = [keyword + \" \" + char for char in charList]\n suggestions = []\n for query in queryList:\n suggestion = makeGoogleRequest(query)\n if suggestion != 'ERR':\n suggestions.append(suggestion)\n\n # Remove empty suggestions\n suggestions = set(itertools.chain(*suggestions))\n if \"\" in suggestions:\n suggestions.remove(\"\")\n\n return suggestions\n\n#read your csv file that contain keywords that you want to send to google autocomplete\ndf = pd.read_csv(\"keyword_seeds.csv\")\n# Take values of first column as keywords\nkeywords = df.iloc[:,0].tolist()\n\nresultList = []\n\nwith concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:\n futuresGoogle = {executor.submit(getGoogleSuggests, keyword): keyword for keyword in keywords}\n\n for future in concurrent.futures.as_completed(futuresGoogle):\n key = futuresGoogle[future]\n for suggestion in future.result():\n resultList.append([key, suggestion])\n\n# Convert the results to a dataframe\noutputDf = pd.DataFrame(resultList, columns=['Keyword','Suggestion'])\n\n# Save dataframe as a CSV file\noutputDf.to_csv('keyword_suggestions.csv', index=False)\nprint('keyword_suggestions.csv File Saved')\n\nprint(f\"Execution time: { ( time.time() - startTime ) :.2f} sec\")\n","sub_path":"suggest.py","file_name":"suggest.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"153789979","text":"import pickle\nimport pandas as pd\nimport numpy as np\nimport os\nfrom io import StringIO\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_selection import chi2\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.naive_bayes import MultinomialNB\n\ndef train_sentiment():\n print(\"training start\")\n df = pd.read_csv(os.path.join(os.path.dirname( __file__ ), 'sentiments.csv'))\n df.head()\n\n col = ['Emotion', 'Review']\n df = df[col]\n df = df[pd.notnull(df['Review'])]\n df.columns = ['Emotion', 'Review']\n df['category_id'] = df['Emotion'].factorize()[0]\n category_id_df = df[['Emotion', 'category_id']].drop_duplicates().sort_values('category_id')\n category_to_id = dict(category_id_df.values)\n id_to_category = dict(category_id_df[['category_id', 'Emotion']].values)\n df.head()\n\n tfidf = TfidfVectorizer(sublinear_tf=True, min_df=1, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english')\n features = tfidf.fit_transform(df.Review).toarray()\n labels = df.category_id\n features.shape\n\n N = 2\n for Product, category_id in sorted(category_to_id.items()):\n features_chi2 = chi2(features, labels == category_id)\n indices = np.argsort(features_chi2[0])\n feature_names = np.array(tfidf.get_feature_names())[indices]\n unigrams = [v for v in feature_names if len(v.split(' ')) == 1]\n bigrams = [v for v in feature_names if len(v.split(' ')) == 2]\n print(\"# '{}':\".format(Product))\n print(\" . Most correlated unigrams:\\n. {}\".format('\\n. '.join(unigrams[-N:])))\n print(\" . Most correlated bigrams:\\n. {}\".format('\\n. '.join(bigrams[-N:])))\n\n X_train, X_test, y_train, y_test = train_test_split(df['Review'], df['Emotion'], random_state = 0)\n\n #vector initialise\n count_vect = CountVectorizer()\n\n X_train_counts = count_vect.fit_transform(X_train)\n tfidf_transformer = TfidfTransformer()\n X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)\n clf = MultinomialNB().fit(X_train_tfidf, y_train)\n\n # Save the vectorizer\n vec_file = './vectorizer.pickle'\n pickle.dump(count_vect, open(vec_file, 'wb'))\n\n with open('./sentiment_custom_classifier', 'wb') as picklefile:\n pickle.dump(clf,picklefile)\n\n return True\n\ndef predict_custom_sentiment(para):\n loaded_vectorizer = pickle.load(open('./vectorizer.pickle', 'rb'))\n\n with open('./sentiment_custom_classifier', 'rb') as training_model:\n model = pickle.load(training_model)\n\n probability = (model.predict_proba(loaded_vectorizer.transform([para])))\n emotion = (model.predict(loaded_vectorizer.transform([para])))\n\n sentimentAnalysis = (probability[0][0] if emotion[0] == \"negative\" else probability[0][1])\n return [emotion[0] + \"-\" + str(sentimentAnalysis)]\n\nif __name__ == \"__main__\":\n train_sentiment()","sub_path":"customSentiment.py","file_name":"customSentiment.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"253991698","text":"from pyspark.sql import SparkSession,Row\nfrom pyspark.sql.functions import explode,array_max\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.functions import split,flatten\nfrom pyspark.sql.types import StructType\n\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.sql import Row, SQLContext\nfrom pyspark.sql.types import Row\nfrom pyspark import SparkContext\n\n\nspark = SparkSession.builder.appName(\"CommonHash\").getOrCreate()\n\n\n# userSchema= StructType().add(\"word\",\"string\").add(\"id\",\"integer\")\nuserSchema = (\n StructType()\n .add(\"id\", \"integer\")\n .add(\"Lang\", \"string\")\n .add(\"Date\", \"string\")\n .add(\"Source\", \"string\")\n .add(\"len\", \"integer\")\n .add(\"Likes\", \"integer\")\n .add(\"RT's\", \"string\")\n .add(\"Hashtags\", \"string\")\n .add(\"UserMentionNames\", \"string\")\n .add(\"UserMentionID\", \"string\")\n .add(\"name\", \"string\")\n .add(\"Place\", \"string\")\n .add(\"Followers\", \"integer\")\n .add(\"Friends\", \"integer\")\n)\n\n\nlines = (\n spark.readStream.format(\"csv\")\n .option(\"sep\", \";\")\n .schema(userSchema)\n .load(\"hdfs://localhost:9000/stream\")\n)\n\ndf = lines.withColumn(\"ratio\",lines.Followers/lines.Friends)\n\ndf = df.select(\"name\",\"ratio\").groupby(\"name\",\"ratio\").count()\ndf= df.select(\"name\",\"ratio\").sort(\"ratio\",ascending=False).limit(1)\n#df.createOrReplaceTempView(\"view\")\n#res=spark.sql(\"SELECT Name,ratio from view where ratio=(SELECT max(ratio) from view)\")\n# df = df.select(\"Name\",\"ratio\").where(\"ratio\"==r)\n# df = \n\n\n\n\n\nquery = df.writeStream.outputMode(\"complete\").format(\"console\").start()\n\nquery.awaitTermination(100)\nquery.stop()\n","sub_path":"adminmgr/media/code/A3/task2/BD_246_921_972_1342.py","file_name":"BD_246_921_972_1342.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"230344639","text":"from django.test import client\n\nimport test_utils\nfrom nose.tools import eq_\nfrom pyquery import PyQuery as pq\n\nimport amo\nfrom amo.urlresolvers import reverse\nfrom search import forms\n\n\ndef test_form_version_label():\n for app in amo.APP_USAGE:\n r = client.Client().get('/en-US/{0}/'.format(app.short))\n doc = pq(r.content)\n eq_(doc('#advanced-search label')[0].text,\n '%s Version' % unicode(app.pretty))\n\n\ndef test_korean():\n \"All forms, regardless of nationality, should have an 'Any' version.\"\n r = client.Client().get('/ko/firefox/')\n doc = pq(r.content)\n eq_(doc('#id_lver option')[0].values()[0], 'any')\n\n\nclass TestSearchForm(test_utils.TestCase):\n fixtures = ('base/appversion', 'addons/persona',)\n\n def test_get_app_versions(self):\n actual = forms.get_app_versions(amo.FIREFOX)\n expected = [('any', 'Any'), ('3.6', '3.6'),\n ('3.5', '3.5'), ('3.0', '3.0'), ]\n\n # So you added a new appversion and this broke? Sorry about that.\n eq_(actual, expected)\n\n def test_personas_selected(self):\n r = self.client.get(reverse('browse.personas'), follow=True)\n doc = pq(r.content)\n eq_(doc('#cat option:selected').val(), 'personas')\n\n # detail page\n r = self.client.get(reverse('addons.detail', args=[15663]),\n follow=True)\n doc = pq(r.content)\n eq_(doc('#cat option:selected').val(), 'personas')\n\n def test_no_personas(self):\n \"\"\"Sunbird, Mobile and Seamonkey don't have personas. So don't\n persuade people to search for them.\"\"\"\n apps = ('sunbird', 'mobile', 'seamonkey',)\n\n for app in apps:\n r = self.client.get('/en-US/%s/' % app, follow=True)\n doc = pq(r.content)\n eq_(len(doc('.cat-all [value=personas]')), 0,\n '%s shows personas' % app)\n","sub_path":"apps/search/tests/test_forms.py","file_name":"test_forms.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"20867835","text":"\n\n#calss header\nclass _FAST():\n\tdef __init__(self,): \n\t\tself.name = \"FAST\"\n\t\tself.definitions = [u'moving or happening quickly, or able to move or happen quickly: ', u'If your watch or clock is fast, it shows a time that is later than the correct time.', u'used to refer to photographic film that allows you to take pictures when there is not much light or when things are moving quickly', u'used to describe something that is full of speed and excitement: ', u'without moral principles: ', u'If the colour of a piece of clothing is fast, the colour does not come out of the cloth when it is washed.']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_fast.py","file_name":"_fast.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"77019936","text":"import time\r\nfrom datetime import datetime\r\n\r\nimport pandas as pd\r\nimport numpy as np \r\n \r\nfrom sklearn.impute import SimpleImputer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.metrics import classification_report\r\n \r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.tree import plot_tree\r\nimport matplotlib.pyplot as plt\r\n\r\ndef load_datasets(feature_paths, label_paths):\r\n feature = np.ndarray(shape=(0,7))\r\n label = np.ndarray(shape=(0,1))\r\n df_l=None\r\n for file in feature_paths:\r\n df = pd.read_csv(file, delimiter=',', na_values='?')\r\n df_l = df.copy()\r\n print(\"df size\",len(df.columns))\r\n # df = pd.read_csv(file, delimiter=',', na_values='?', header=None)\r\n # df['Time Occurred'] = pd.to_datetime(df['Date Occurred'],df['Time Occurred'])\r\n # print(df['Time Occurred'])\r\n # df['MO Codes']= df['MO Codes'].astype('category')\r\n df['Victim Sex']= df['Victim Sex'].astype('str')\r\n df['Victim Sex']= df['Victim Sex'].astype('category')\r\n print(df['Victim Sex'])\r\n df.info()\r\n # df.drop([ 'Area ID','Area Name','Date Reported','Date Occurred', 'Crime Code Description',\r\n df.drop([ 'Area ID','Area Name','Date Reported','Date Occurred', 'Time Occurred','Crime Code Description',\r\n # 'MO Codes','Victim Descent','Premise Description','Address','Cross Street'], \r\n 'MO Codes','Victim Descent','Premise Description','Address','Cross Street','Victim Sex'], \r\n axis=1,inplace=True)\r\n print('f df len',len(df))\r\n # print(\"df size\",len(df.columns))\r\n # df.dropna(inplace=True, how='all')\r\n # feature = np.concatenate((feature, df))\r\n\r\n imp = SimpleImputer(missing_values=np.nan,fill_value=0)\r\n # imp = SimpleImputer(missing_values='NaN',fill_value=0)\r\n imp.fit(df)\r\n df = imp.transform(df)\r\n feature = np.concatenate((feature, df))\r\n\r\n df2=df_l.take([4], axis=1)\r\n # df2=df.take([4], axis=1)\r\n # print(type(df))\r\n # print(\"df2 size\",len(df.columns))\r\n # print(df)\r\n # print(type(df2))\r\n # df2.drop(axis=0, index=0, columns=None, inplace=False)\r\n print('l df len',len(df2))\r\n # print(\"df2 size\",len(df2.columns))\r\n # print(df2)\r\n label = np.concatenate((label, df2))\r\n\r\n label = np.ravel(label)\r\n return feature, label\r\n \r\ndef main():\r\n ISOTIMEFORMAT='%Y-%m-%d %X'\r\n print(time.strftime(ISOTIMEFORMAT,time.localtime(time.time()))) #add by me\r\n ''' 数据路径 '''\r\n trainfilePaths = ['Traffic_Collision_Data_from_2010_to_Today.csv']\r\n testfilePaths = ['Traffic_Collision_Data_from_2010_to_Today.csv']\r\n ''' 读入数据 '''\r\n x,y = load_datasets(trainfilePaths,testfilePaths)\r\n # return\r\n x_train, x_test, y_train, y_test = \\\r\n train_test_split(x, y, test_size=0.3, random_state=0,stratify=y)\r\n print('Start DT training')\r\n dt = DecisionTreeClassifier(max_depth=7).fit(x_train, y_train)\r\n # dt = DecisionTreeClassifier(max_depth=4).fit(x_train, y_train)\r\n print('Training done')\r\n\r\n answer_dt = dt.predict(x_test)\r\n print(len(answer_dt))\r\n np.savetxt('module.txt',answer_dt,fmt='%d', delimiter=',')\r\n print('Prediction done')\r\n\r\n print('\\n\\nThe classification report for DT:')\r\n print(classification_report(y_test, answer_dt))\r\n print(time.strftime(ISOTIMEFORMAT,time.localtime(time.time()))) #add by me\r\n\r\n # 数据可视化\r\n plt.figure(figsize=(15,9))\r\n plot_tree(dt,filled=True)\r\n plt.show()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"cn_uni_mooc-sklearn/traffic/Traffic_D.py","file_name":"Traffic_D.py","file_ext":"py","file_size_in_byte":3627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"337313298","text":"import sys\nimport json\nimport os.path\nimport re\n\n# ToDo Implement clean cache function ? \n# ToDo implement parse functions for the dataset\n\n\n# returns True if the given root path contains a valid vocalset installation\ndef is_vocalset_root(path):\n test_file = 'female1/arpeggios/belt/f1_arpeggios_belt_c_a'\n\n if(os.path.isfile(os.path.join(path, test_file + '.ogg'))):\n return True\n \n if(os.path.isfile(os.path.join(path, test_file + '.wav'))):\n return True\n\n return False\n\n# returns the location of the vocalset.json configuration file an containing folder\ndef get_config_json_path():\n build_dir = os.path.abspath(os.path.join(__file__, os.pardir, os.pardir, os.pardir, 'build')) # sorry\n json_path = os.path.join(build_dir, 'vocalset.json')\n\n return build_dir, json_path \n\n# function returns the root path of the VocalSet installation\ndef cache_root_path(path):\n build_dir, json_file = get_config_json_path()\n\n data = {}\n data['vocalset'] = []\n data['vocalset'].append({'root_path': path})\n\n if not os.path.exists(build_dir):\n os.makedirs(build_dir)\n\n with open(json_file, 'w+') as outfile:\n json.dump(data, outfile)\n print('Updated VocalSet root path: \\n' + path)\n return\n\n raise Exception('Could not store the VocalSet root path in ' + json_file)\n \n\n# function returns the cached vocalset root path\ndef get_root_path(check_path = True):\n _, json_file_path = get_config_json_path()\n\n with open(json_file_path) as json_file:\n data = json.load(json_file)\n for cfg in data['vocalset']:\n root_path = cfg['root_path']\n if(check_path and not is_vocalset_root(root_path)):\n raise Exception('Cached VocalSet root path seems to be invalid: ' + root_path)\n\n return root_path\n\n\n \n raise Exception('Could not find the json containing a cached VocalSet root path. Try calling dataset.py <root_path>')\n\n\n\n\n# updates the vocalset root path in the configuration vocalset.json. Returns true if path is valid and cach could be updated\ndef set_vocalset_root_path(dataset_root_path):\n\n # check if it's a valid folder\n if(not os.path.isdir(dataset_root_path)):\n return False\n\n dataset_root_path = os.path.abspath(dataset_root_path)\n\n # test path\n if(is_vocalset_root(dataset_root_path)):\n cache_root_path(dataset_root_path)\n return True\n\n # test path/FULL \n dataset_root_path = os.path.join(dataset_root_path, 'FULL')\n if(is_vocalset_root(dataset_root_path)):\n cache_root_path(dataset_root_path)\n return True\n \n return False\n\ndef get_sample(types, modes, vowels, singers):\n\n # helper finds folders that fit a pattern describes as a list of filters\n def resolve_folders(path, filter_list):\n\n # return this path as valid if we have no filters\n if(len(filter_list) == 0):\n \n return [path]\n\n # take our filter and truncate the list\n dir_filters = filter_list[0]\n filter_list = filter_list[1:]\n\n directories = []\n for sub_dir in os.listdir(path):\n sub_path = os.path.join(path, sub_dir)\n if os.path.isdir(sub_path):\n if (sub_dir in dir_filters) or ('*' in dir_filters) or (len(dir_filters) == 0): \n found = resolve_folders(os.path.join(path, sub_path), filter_list)\n directories.extend(found)\n\n return directories\n\n\n # get root path\n root_path = get_root_path()\n\n # cleaning input \n listify = lambda arg: [arg] if (type(arg) == str) else arg\n vowels = listify(vowels)\n modes = listify(modes)\n types = listify(types)\n singers = listify(singers)\n\n # replace short version 'f1' - 'm1' with 'female1' - 'male1'\n for i, singer in enumerate(singers):\n singers[i] = re.sub(r\"^(m)(.*)\", r'male\\2', singers[i])\n singers[i] = re.sub(r\"^(f)(.*)\", r'female\\2', singers[i])\n\n \n # collect valid folders\n paths = resolve_folders(root_path, [singers, types, modes])\n\n # collect valid files within folders\n files = []\n for path in paths:\n for file in os.listdir(path):\n full_file = os.path.join(path,file)\n if os.path.isfile(full_file) and (file.endswith(\".wav\") or file.endswith(\".ogg\")): \n for vowel in vowels:\n if(re.match(f\".*_{vowel}\\.wav\", file)):\n files.append(full_file)\n\n\n return files\n\n\n# calling daaset.py <root_path> sets / caches the VocalSet root path in build/vocalset.json\nif __name__ == '__main__':\n \n if(len(sys.argv) == 2 and sys.argv[1] == '--get'):\n print(get_root_path())\n sys.exit(0)\n \n if(len(sys.argv) == 3 and sys.argv[1] == '--set'):\n dataset_root_path = os.path.normpath(sys.argv[2])\n was_set = set_vocalset_root_path(dataset_root_path)\n \n if(not was_set):\n print('Could not find a valid VocalSet root at ' + dataset_root_path) \n sys.exit(-1)\n\n sys.exit(0)\n\n\n\n print('dataset.py: Expected dataset.py --set <path> or dataset.py --get')\n \n sys.exit(-1)\n \n \n\n","sub_path":"hsvs/tools/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"223650335","text":"#coding=utf-8\nimport requests\nfrom bs4 import BeautifulSoup\nimport os\nfrom multiprocessing import Pool\nimport sys\nimport urllib.request\n# http://i.meizitu.net/2018/07/13f05.jpg\nclass Demo(object):\n\n def test(self):\n Hostreferer = {\n 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',\n 'Referer': 'http://www.mzitu.com'\n }\n Picreferer = {\n 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',\n 'Referer': 'http://i.meizitu.net'\n }\n pic = \"http://www.mzitu.com/142599/5\"\n html = requests.get(pic, headers=Hostreferer)\n mess = BeautifulSoup(html.text,\"html.parser\")\n pic_url = mess.find('img')\n print(pic_url)\n strname = 'E:\\\\test\\\\pythonpc\\\\test.jpg'\n html = requests.get(\"http://i.meizitu.net/2018/07/13f05.jpg\", headers=Picreferer)\n print(html)\n f = open('C:\\\\Users\\\\Administrator\\\\Desktop\\\\123.jpg','wb')\n f.write(html.content)\n f.close()\nd = Demo()\nd.test()","sub_path":"pc7.py","file_name":"pc7.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"567221478","text":"def module17():\n try:\n n1,n2 = 1,0\n r = n1/n2\n print('r =', r) \n except:\n print('Error: Input data or expression has error!')\n else:\n print('Else: No errors!')\n finally:\n print('Finally: Finish processing!')","sub_path":"examples/colab-py/md17.py","file_name":"md17.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"57354618","text":"# class Solution:\n# \"\"\"\n# @param str: A string\n# @return: all permutations\n# \"\"\"\n# def stringPermutation2(self, str):\n# # write your code here\n# str_arr = sorted(str)\n# res = []\n# self.dfs(str_arr, [], set(), res)\n# return res\n\n# def dfs(self, str_arr, path, visited_index, res):\n# if len(str_arr) == len(path):\n# res.append(''.join(path))\n# return \n \n# for i in range(len(str_arr)):\n# if i in visited_index:\n# continue\n \n# if i > 0 and str_arr[i] == str_arr[i - 1] and i - 1 not in visited_index:\n# continue\n \n# visited_index.add(i)\n# path.append(str_arr[i])\n# self.dfs(str_arr, path, visited_index, res)\n# path.pop()\n# visited_index.remove(i)\n \n# print(Solution().stringPermutation2(\"abb\"))\n\n\nclass Solution:\n \"\"\"\n @param str: A string\n @return: all permutations\n \"\"\"\n def stringPermutation2(self, str):\n # write your code here\n str_arr = list(str)\n str_arr.sort()\n res = []\n self.dfs(str_arr, res, [], set())\n return res\n\n def dfs(self, str_arr, res, path, visited_i):\n if len(path) == len(str_arr):\n res.append(\"\".join(path))\n return \n \n for i in range(len(str_arr)):\n if i in visited_i:\n continue\n \n if i > 0 and str_arr[i] == str_arr[i - 1] and i - 1 not in visited_i:\n continue\n \n path.append(str_arr[i])\n visited_i.add(i)\n self.dfs(str_arr, res, path, visited_i)\n visited_i.remove(i)\n path.pop()\n\n ","sub_path":"10 string permutation II.py","file_name":"10 string permutation II.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"644820046","text":"import web\nimport sender\nimport config\nimport utils\n\n\nurls = (\n '/', 'home',\n '/subscription', 'subscribe'\n)\n\napp = web.application(urls, globals())\nweb.config.debug = config.DEBUG\n# Session/debug tweak from http://webpy.org/cookbook/session_with_reloader\nif web.config.get('_session') is None:\n session = web.session.Session(app, web.session.DiskStore('sessions'))\n web.config._session = session\nelse:\n session = web.config._session\n\n\ndef csrf_token():\n if 'csrf_token' not in session:\n from uuid import uuid4\n session.csrf_token = uuid4().hex\n return session.csrf_token\n\nrender = web.template.render('templates/', globals={'csrf_token': csrf_token})\n\n\ndef csrf_protected(f):\n \"\"\"Usage:\n @csrf_protected\n def POST(self):\n ...\"\"\"\n def decorated(*args, **kwargs):\n inp = web.input()\n if not (('csrf_token' in inp) and inp.csrf_token == session.pop('csrf_token', None)):\n raise web.HTTPError(\n \"400 Bad request\",\n {'content-type': 'text/html'},\n 'Cross-site request forgery (CSRF) attempt (or stale browser form). <a href=\"/\">Back to the form</a>.')\n\n return f(*args, **kwargs)\n return decorated\n\n\nclass home:\n def GET(self):\n form = utils.subscribe_form()\n result = session.get('success', False)\n try:\n configuration = config.get_config(web.ctx.host)\n except KeyError:\n return render.switch(config.WEB_SERVICES.keys())\n if result:\n del session.success\n\n return render.index(configuration, form, result)\n\n\nclass subscribe:\n @csrf_protected\n def POST(self):\n form = utils.subscribe_form()\n configuration = config.get_config(web.ctx.host)\n\n if not form.validates():\n return render.index(configuration, form, False)\n else:\n session.success = True\n\n # prepare user data\n user_data = form.d\n email = user_data.pop('email')\n\n user_data['ip_address'] = web.ctx.ip\n if not user_data['ip_address'] or user_data['ip_address'] == '127.0.0.1':\n user_data['ip_address'] = web.ctx.env.get('HTTP_X_FORWARDED_FOR', web.ctx.ip)\n user_data['user_agent'] = web.ctx.env.get('HTTP_USER_AGENT', '')\n\n # send to zmq\n sender.form_handler(email, configuration['service_uri'], **user_data)\n\n raise web.seeother('/')\n\n\nif __name__ == \"__main__\":\n app.run()\n\napplication = app.wsgifunc()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"601908595","text":"import cauldron as cd\nimport matplotlib\nimport matplotlib.pyplot as plt\nplt.rcParams.update({'figure.max_open_warning': 0}) # NOQA\nimport seaborn as sns\nsns.set() # NOQA, need this for styling\nimport pandas as pd\n\nimport os, sys # NOQA\nsys.path.append('../../src/data')\nimport make_dataset # NOQA, need the lines above to get directories right\n\n# Import df from Cauldron shared memory\ndf = cd.shared.df\n\ncd.display.markdown(\n \"\"\"\n ## Assortment\n\n Assortment has a correlation with sales of 0.08. The 'Extra' assortment has\n outstanding sales figures, but there are very few of them. The 'Extended'\n assortment has much better sales than the 'Basic' format, and there are\n over 500 'Extended' stores in the Rossmann Germany business.\n \"\"\"\n)\n\n# Prep data for display\nopen = df[df.open == 1].copy()\navg_daily_sales_by_assortment = open.groupby('assortment').sales.mean()\nstore_count_by_assortment = open.groupby('assortment').store.nunique()\nassorts = ['Basic', 'Extra', 'Extended']\n\n# Create and display the chart\nfig, ax_l = plt.subplots()\nax_r = ax_l.twinx()\nax_l.bar(x=assorts, height=avg_daily_sales_by_assortment, color='green')\nax_l.set_ylim([0, 10000])\nax_r.plot(assorts, store_count_by_assortment.values, color='blue',\n linestyle='none', marker='o', markersize=10, markeredgewidth=5)\nax_r.set_ylim([0, 650])\nax_l.set_title('Average Daily Sales and Stores by Assortment')\nax_l.set_ylabel('Average Daily Sales (green bars)', color='green')\nax_l.set_yticklabels(['${:,.0f}'.format(x) for x in ax_l.get_yticks()])\nax_r.set_ylabel('Stores (blue dots)', color='blue')\nax_l.set_xlabel('Assortment')\nax_l.axhline(df.sales.mean())\ncd.display.pyplot(fig)\n\ncd.display.markdown(\n \"\"\"\n ## Store Type\n\n Store type is not strongly correlated with sales. Store type 'b' has daily\n average sales of about $10,000; but there are a handful of them. The other\n store types have daily average sales figures that are nearly\n indistinguishable.\n \"\"\"\n)\n\n# Prep data for display\nopen = df[df.open == 1].copy()\navg_daily_sales_by_store_type = open.groupby('store_type').sales.mean()\nstore_count_by_store_type = open.groupby('store_type').store.nunique()\n\n# Create and display the chart\nfig, ax_l = plt.subplots()\nax_r = ax_l.twinx()\nax_l.bar(x=avg_daily_sales_by_store_type.index,\n height=avg_daily_sales_by_store_type, color='green')\nax_l.set_ylim([0, 10100])\nax_r.plot(store_count_by_store_type, color='blue', linestyle='none',\n marker='o', markersize=10, markeredgewidth=5)\nax_r.set_ylim([0, 650])\nax_r.set_title('Average Daily Sales and Store Count by Store Type')\nax_l.set_ylabel('Avg Daily Sales (green bars)', color='green')\nax_l.set_yticklabels(['${:,.0f}'.format(x) for x in ax_l.get_yticks()])\nax_r.set_ylabel('Stores (blue dots)', color='blue')\nax_l.set_xlabel('Store Type');\ncd.display.pyplot(fig)\n","sub_path":"notebooks/EDA/S13-assortment_store_type.py","file_name":"S13-assortment_store_type.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"294376771","text":"from qr import qrc\nimport os\nSTATIC_URL_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)),'static')\n\n\n@app.route('/code', methods=['POST', 'GET'])\ndef code(): \n if request.method == 'POST':\n url = request.form['url']\n qrcod = qrc(url)\n img = qrcod.qrcode()\n return jsonify(img)\n else:\n return jsonify(errs)","sub_path":"easyurl/webSite/controllers/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"107608802","text":"\n\nfrom xai.brain.wordbase.nouns._stuff import _STUFF\n\n#calss header\nclass _STUFFS(_STUFF, ):\n\tdef __init__(self,): \n\t\t_STUFF.__init__(self)\n\t\tself.name = \"STUFFS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"stuff\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_stuffs.py","file_name":"_stuffs.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"321367078","text":"#! /usr/bin/python\n\n# Copyright 2013 Linaro Limited\n# Author Matt Hart <matthew.hart@linaro.org>\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n\nimport SocketServer\nimport logging\nimport socket\nimport time\nimport json\nfrom lavapdu.dbhandler import DBHandler\n\nclass ListenerServer(object):\n\n def __init__(self, settings):\n listen_host = settings[\"hostname\"]\n listen_port = settings[\"port\"]\n\n logging.getLogger().name = \"ListenerServer\"\n logging.getLogger().setLevel(settings[\"logging_level\"])\n logging.debug(\"ListenerServer __init__\")\n logging.info(\"listening on %s:%s\", listen_host, listen_port)\n\n self.server = TCPServer((listen_host, listen_port), TCPRequestHandler)\n self.server.settings = settings\n dbh = DBHandler(settings)\n dbh.create_db()\n dbh.close()\n del dbh\n\n def start(self):\n logging.info(\"Starting the ListenerServer\")\n self.server.serve_forever()\n\n\nclass TCPRequestHandler(SocketServer.BaseRequestHandler):\n #\"One instance per connection. Override handle(self) to customize action.\"\n def insert_request(self, data):\n logging.getLogger().name = \"TCPRequestHandler\"\n logging.getLogger().setLevel(self.server.settings[\"logging_level\"])\n array = data.split(\" \")\n delay = 10\n custom_delay = False\n now = int(time.time())\n if (len(array) < 3) or (len(array) > 4):\n logging.info(\"Wrong data size\")\n raise Exception(\"Unexpected data\")\n if len(array) == 4:\n delay = int(array[3])\n custom_delay = True\n hostname = array[0]\n port = int(array[1])\n request = array[2]\n if not (request in [\"reboot\", \"on\", \"off\"]):\n logging.info(\"Unknown request: %s\", request)\n raise Exception(\"Unknown request: %s\", request)\n if request == \"reboot\":\n logging.debug(\"reboot requested, submitting off/on\")\n self.queue_request(hostname, port, \"off\", now)\n self.queue_request(hostname, port, \"on\", now+delay)\n else:\n if custom_delay:\n logging.debug(\"using delay as requested\")\n self.queue_request(hostname, port, request, now+delay)\n else:\n self.queue_request(hostname, port, request, now)\n\n def queue_request(self, hostname, port, request, exectime):\n dbhandler = DBHandler(self.server.settings)\n sql = \"insert into pdu_queue (hostname,port,request,exectime) \" \\\n \"values ('%s',%i,'%s',%i)\" % (hostname, port, request, exectime)\n dbhandler.do_sql(sql)\n dbhandler.close()\n del dbhandler\n\n\n def handle(self):\n logging.getLogger().name = \"TCPRequestHandler\"\n request_ip = self.client_address[0]\n try:\n data = self.request.recv(4096).strip()\n socket.setdefaulttimeout(2)\n try:\n request_host = socket.gethostbyaddr(request_ip)[0]\n except socket.herror as e: #pylint: disable=invalid-name\n #logging.debug(\"Unable to resolve: %s error: %s\" % (ip,e))\n request_host = request_ip\n logging.info(\"Received a request from %s: '%s'\", request_host, data)\n self.insert_request(data)\n self.request.sendall(\"ack\\n\")\n except Exception as e: #pylint: disable=invalid-name\n logging.debug(e.__class__)\n logging.debug(e.message)\n self.request.sendall(\"nack\\n\")\n self.request.close()\n\n\nclass TCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):\n allow_reuse_address = True\n daemon_threads = True\n #pass\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n logging.getLogger().setLevel(logging.DEBUG)\n logging.debug(\"Executing from __main__\")\n filename = \"/etc/lavapdu/lavapdu.conf\"\n print(\"Reading settings from %s\" % filename)\n with open(filename) as stream:\n jobdata = stream.read()\n json_data = json.loads(jobdata)\n ss = ListenerServer(json_data[\"daemon\"])\n ss.start()\n","sub_path":"lavapdu/socketserver.py","file_name":"socketserver.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"473778366","text":"# 길이가 m인 수열을 모두 구하는 프로그램을 작성하시오.\n\n# 풀이\n# 1. M, N을 입력 받는다.\n# 2. 중복순열 처럼 구현하면 된다.\n\n\ndef dfs(a, lst):\n if a == m:\n print(\" \".join(lst))\n return\n for j in range(n):\n lst[a] = nums[j]\n dfs(a + 1, lst)\n\n\nn, m = map(int, input().split())\nnums = [str(i + 1) for i in range(n)]\nres = [0] * m\ndfs(0, res)\n","sub_path":"jiwon/basic_bruteforce_week3/nm3.py","file_name":"nm3.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"425045190","text":"from pykrx.stock.index.core import MKD20011\nfrom pykrx.comm.util import singleton\nfrom pandas import DataFrame\n\n\n@singleton\nclass IndexTicker:\n def __init__(self):\n self.df = DataFrame()\n\n def get_ticker(self, date, market):\n self._get(date, market)\n cond = (self.df['date'] == date) & (self.df['ind_tp_cd'] == market)\n return self.df[cond].index.tolist()\n\n def get_id(self, date, market, ticker):\n self._get(date, market)\n cond = (self.df.index == ticker) & (self.df['date'] == date)\n if len(self.df[cond]) == 0:\n print(\"NOT FOUND\")\n return None\n return self.df.loc[cond, 'idx_ind_cd'][0]\n\n def get_market(self, date, market, ticker):\n self._get(date, market)\n cond = self.df.index == ticker\n return self.df.loc[cond, 'ind_tp_cd'][0]\n \n def _get(self, date, market):\n try:\n cond = (self.df['date'] == date) & (self.df['ind_tp_cd'] == market)\n if len(self.df[cond]) == 0:\n raise KeyError\n except KeyError:\n index = {\"KOSPI\": \"02\", \"KOSDAQ\": \"03\"}.get(market, \"KOSPI\")\n df = MKD20011().read(date, index)\n if len(df) == 0:\n return df\n\n df = df.set_index('idx_nm')\n df['date'] = date\n df['ind_tp_cd'] = df['ind_tp_cd'].apply(\n lambda x: \"KOSPI\" if x == \"1\" else \"KOSDAQ\")\n self.df = self.df.append(df)\n\n\nif __name__ == \"__main__\":\n import pandas as pd\n pd.set_option('display.expand_frame_repr', False)\n\n# tickers = IndexTicker().get_ticker(\"20190412\", \"KOSPI\")\n# print(tickers)\n index_id = IndexTicker().get_id(\"20190412\", \"KOSPI\", \"코스피\")\n print(index_id)\n print(IndexTicker().get_market(\"20190412\", \"KOSPI\", \"코스피\"))","sub_path":"pykrx/stock/index/ticker.py","file_name":"ticker.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"501864478","text":"# encoding: utf-8\n\n\"\"\"\n@author: LongJY\n@file: job.py\n@time: 2018/10/8 下午10:15\n\"\"\"\nfrom models.SourceModels import UserUser, ShopGoodsOrder, UserAddres, SysCommunity, \\\n ShopOrderActivityGood, SysArea\nfrom sqlalchemy import and_\nfrom sqlalchemy import func, case\nfrom etlproject.app import EtlPro\nimport pandas as pd\nimport datetime\nfrom models import TarModels\n\n\nnoontime = datetime.datetime.now()\nnow = datetime.datetime.now()\nzeroToday = now - datetime.timedelta(hours=now.hour, minutes=now.minute, seconds=now.second,\n microseconds=now.microsecond)\nzeroYesterday = zeroToday - datetime.timedelta(days=13)\nzeroTomorrow = zeroToday - datetime.timedelta(days=10)\norderDatahanld = EtlPro(zeroTomorrow, zeroYesterday)\n\n\ndef orderImport():\n \"\"\"\n 订单数据导出, 开始时间向前推14天\n :return: list\n \"\"\"\n lst = orderDatahanld.sourceSession.query(\n ShopGoodsOrder.order_id, ShopGoodsOrder.order_no, ShopGoodsOrder.create_time,\n func.date_format(ShopGoodsOrder.create_time, '%Y-%m-%d').label('create_date'), ShopGoodsOrder.pay_time,\n func.date_format(ShopGoodsOrder.pay_time, '%Y-%m-%d').label('pay_date'), ShopGoodsOrder.order_state,\n ShopOrderActivityGood.goods_id, ShopOrderActivityGood.title, ShopGoodsOrder.order_number,\n ShopGoodsOrder.single_price, ShopGoodsOrder.total_price, ShopGoodsOrder.discount_price,\n ShopGoodsOrder.actual_price,\n (case([(ShopGoodsOrder.refund_flag == 1, ShopGoodsOrder.actual_price)], else_=0)).label(\n 'refund_price'),\n ShopGoodsOrder.user_id, UserUser.contact_name, UserAddres.house_number,\n UserUser.contact_tel, SysCommunity.community_id, SysCommunity.community_name,\n SysArea.area_id, SysArea.area_name, ShopGoodsOrder.out_trade_no\n ).outerjoin(\n ShopOrderActivityGood, ShopGoodsOrder.order_id == ShopOrderActivityGood.order_id\n ).outerjoin(\n UserUser, ShopGoodsOrder.user_id == UserUser.user_id\n ).outerjoin(\n UserAddres, UserUser.user_address_id == UserAddres.user_address_id\n ).outerjoin(\n SysCommunity, UserAddres.community_id == SysCommunity.community_id\n ).outerjoin(\n SysArea, SysCommunity.district_area_id == SysArea.area_id\n ).filter(\n and_(UserUser.user_type == 3,\n UserAddres.house_number.notlike(\"%测试%\"),\n ShopGoodsOrder.create_time >= orderDatahanld.sqlBeginTime,\n ShopGoodsOrder.create_time < orderDatahanld.sqlEndTime)\n ).all()\n orderData = pd.DataFrame(lst)\n\n # for i in range(len(orderData)):\n # if i % 2 == 0:\n # orderData.loc[i, 'order_number'] = 5888\n # orderData.loc[i, 'single_price'] = 10000\n # orderData.loc[i, 'total_price'] = 20000\n # orderData.loc[i, 'discount_price'] = 30000\n # orderData.loc[i, 'actual_price'] = 40000\n # orderData.loc[i, 'refund_price'] = 123\n return orderData\n\n\ndef hanld():\n orderData = orderImport()\n checkoutData = orderDatahanld.checkDataImport()\n orderDatahanld.dataCheck('Order', orderData)\n # print(orderData)\n\n # orderDatahanld.dataInsert()\n # print(checkoutData)\n\n\nif __name__ == '__main__':\n hanld()","sub_path":"app01/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"326078400","text":"\"\"\"Grocery calculator \"\"\"\nclass grocery:\n def __init__(self,dict,li):\n self.dict=dict\n self.li=li\n \n\n def print_men(self):\n self.tota=0\n print('Here is your list you choose to buy-->>')\n for i in self.li:\n if i in self.dict:\n self.tota=self.dict[i]*self.li[i]\n print({i:self.tota})\n print('------------')\n\n def calculate(self):\n self.total=0\n self.total_1=0\n for i in self.li:\n if i in self.dict:\n self.total=self.dict[i]*self.li[i]\n self.total_1+=self.total\n else:\n print(i,'not found , Try another store')\n \n \n\n def pay(self):\n print('Your total is Rs.',self.total_1)\n print('Here are the options to pay--->>>')\n print('Phonepe')\n print('GPay')\n print('UPi')\n \n\n\ndic={\n 'maggi':25,\n 'pizza':90,\n 'momos':70,\n 'chai':35,\n 'samosa':20,\n 'bread pakoda':20,\n 'paratha':35\n }\n\ndef your_list():\n x={}\n print('How many elements you wanna buy??')\n for i in range(int(input('Here: '))):\n gro=input('Grocery: ')\n quan=int(input('Enter quan: '))\n x.update({gro:quan})\n\n return x\n\nli=your_list()\n\ngro=grocery(dic,li)\ngro.print_men()\ngro.calculate()\ngro.pay()\n","sub_path":"Grocery.py","file_name":"Grocery.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"88050210","text":"import socket\r\nfrom tkinter import Tk, mainloop, Button, Entry, LEFT, BOTTOM, TOP\r\nimport threading\r\nimport os \r\nimport netifaces\r\nimport upnpclient\r\n\r\n\r\nport = 8888\r\ncompan_ip = input(\"Podaj IP adresata/IPv4: \")\r\n\r\nclear = lambda: os.system('cls')\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nsock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nbutton_list = []\r\nEntry_list = []\r\n\r\n#This function is ultimetli accurate if you have many interfaces\r\ndef Finding_local_ip():\r\n inf = netifaces.interfaces()\r\n gate = netifaces.gateways()\r\n gate = gate['default'][netifaces.AF_INET]\r\n gate = gate[0].split('.')\r\n for x in inf:\r\n try:\r\n interface = netifaces.ifaddresses(x)\r\n interface = interface[2][0]['addr'].split('.')\r\n if gate[:3] == interface[:3]:\r\n res = '.'.join(interface)\r\n return res\r\n except:\r\n pass\r\nhost = Finding_local_ip()\r\n\r\ndef Port_forwarding():\r\n try:\r\n devices = upnpclient.discover()\r\n status = devices[0].WANIPConn1.GetStatusInfo()\r\n if status['NewConnectionStatus'] != 'Connected':\r\n for i, x in enumerate(devices):\r\n status = x.WANIPConn1.GetStatusInfo()\r\n if status == 'Connected':\r\n d = devices[i]\r\n break\r\n else:\r\n d = devices[0]\r\n d.WANIPConn1.AddPortMapping(\r\n NewRemoteHost='0.0.0.0',\r\n NewExternalPort=port,\r\n NewProtocol='TCP',\r\n NewInternalPort=port,\r\n NewInternalClient=host,\r\n NewEnabled='1',\r\n NewPortMappingDescription='P2P',\r\n NewLeaseDuration=10000)\r\n print('Forwarding Success!!!')\r\n return True\r\n except:\r\n print('Device not found')\r\n return False\r\n\r\n\r\ndef Echo(conn,addr):\r\n with conn:\r\n #print('Connected by ',addr)\r\n while True:\r\n data = conn.recv(1024)\r\n if not data:\r\n break\r\n conn.sendall(data)\r\n print(addr,\": \",data.decode('utf-8'))\r\ndef Listener():\r\n try:\r\n forwarding = Port_forwarding()\r\n if forwarding:\r\n with sock as s:\r\n s.bind((host, port))\r\n while True:\r\n s.listen()\r\n conn, addr = s.accept()\r\n if conn !=None:\r\n Echo(conn,addr)\r\n else:\r\n pass\r\n except:\r\n print(\"Connection lost\")\r\n\r\ndef Conn_Establish():\r\n button_list[0].destroy()\r\n button_list[1].configure(state=\"normal\")\r\n button_list.clear()\r\n \r\n clear()\r\n sock2.connect((compan_ip,port))\r\n\r\n sock2.send(b\"Witaj\")\r\n print(\"Connection success\")\r\n\r\ndef Messages():\r\n text = Entry_list[0].get()\r\n print(\"You: \",text)\r\n sock2.send(bytes(text, 'utf-8'))\r\n\r\ndef Main():\r\n master = Tk()\r\n master.geometry(\"300x300\")\r\n \r\n b1 = Button(master, text=\"Click\" ,command=Conn_Establish)\r\n b1.pack(side=LEFT)\r\n button_list.append(b1)\r\n \r\n b2 = Button(master, text=\"Send message\", command=Messages)\r\n b2.pack(side=BOTTOM)\r\n b2.configure(state=\"disabled\")\r\n button_list.append(b2)\r\n\r\n E = Entry(master)\r\n E.pack(side=TOP)\r\n E.focus_set()\r\n Entry_list.append(E)\r\n \r\n mainloop()\r\n\r\nm = threading.Thread(target=Main,name='canvas')\r\nx= threading.Thread(target=Listener,name='listener')\r\n\r\nm.start()\r\nx.start()\r\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"183381151","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport io\nimport os\nimport math\nimport logging\nimport time\nimport json\nimport hashlib\nimport string\nimport random\nimport urllib\nfrom threading import Thread\nfrom datetime import timedelta, datetime as _dt\n\nimport requests\nimport schedule\nfrom django.utils.http import urlquote\nfrom django.utils import timezone as dt\nfrom django.db import connection\nfrom django.conf import settings\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom channels import Group\nfrom channels.sessions import channel_session\n\nfrom social_auth.models import SteamUser\nfrom betting.models import CoinFlipGame, GameStatus, Deposit, TempGameHash, MarketItem, PropItem, Message\nfrom betting.models import SendRecord, SteamrobotApiItem, UserAmountRecord, Promotion\nfrom betting.serializers import DepositSerializer, SteamerSerializer, TempGameHashSerializer, MessageSerializer, SendRecordSerializer\nfrom betting.business.cache_manager import update_coinflip_game_in_cache, get_online, get_steam_bot_status, get_current_coinflip_games\nfrom betting.business.deposit_business import is_connection_usable\nfrom betting.business.cache_manager import get_current_jackpot_id, update_current_jackpot_id, read_inventory_from_cache\nfrom betting.business.redis_con import get_redis\nfrom betting.business.deposit_business import format_coinflip_game, format_jackpot_game\nfrom betting.utils import aware_datetime_to_timestamp, get_maintenance\n\n\n_logger = logging.getLogger(__name__)\n\n_trade_list_key = 'trade_list'\n_send_list = 'send_list'\n\n\ndef get_last_chat_msg():\n max_count = settings.CHAT_MESSAGE_LAST_COUNT\n messages = Message.objects.all().order_by('-timestamp')[:max_count]\n msgs = []\n for m in messages:\n msgs.append(MessageSerializer(m).data)\n msgs = msgs[::-1]\n return msgs\n\n\ndef format_jackpot_joiners(game, winner):\n deposits = game.deposits.filter(accept=1).all()\n joiner_map = {}\n for d in deposits:\n if d.steamer.steamid not in joiner_map:\n joiner_map[d.steamer.steamid] = {\n 'steamer': SteamerSerializer(d.steamer).data,\n 'amount': d.amount,\n 'chance': d.amount/game.total_amount\n }\n else:\n joiner = joiner_map[d.steamer.steamid]\n joiner['amount'] += d.amount\n joiner['chance'] = joiner['amount'] / game.total_amount\n joiners = []\n for k, v in joiner_map.items():\n joiners.extend([v for i in range(int(math.ceil(v['chance']/0.04)))])\n win_index = random.randint(10, 25)\n random.shuffle(joiners)\n joiners.insert(win_index, winner)\n return joiners, win_index\n\n\ndef get_all_coinflip_history(page=1):\n dt_now = dt.now()\n dt_begin = dt_now - timedelta(days=6)\n page = page if page <= 10 else 10\n items = []\n games = CoinFlipGame.objects.filter(game_type=0, end=1, create_time__gte=dt_begin.date()).all().order_by('-create_time')\n paginator = Paginator(games, settings.DEFAULT_PAGINATION_PAGE)\n try:\n ret_games = paginator.page(page)\n except EmptyPage:\n ret_games = paginator.page(paginator.num_pages)\n for g in ret_games:\n cf_data = format_coinflip_game(g)\n if cf_data:\n items.append(cf_data)\n total_count = paginator.count\n total_count = total_count if total_count <= 100 else 100\n ret = {\n 'total_count': total_count,\n 'items': items,\n 'page': page\n }\n return ret\n\n\ndef get_game_summary(user, game_type, dt_begin):\n amount_records = UserAmountRecord.objects.filter(\n steamer__steamid=user.steamid,\n game__game_type=game_type,\n create_time__gte=dt_begin\n ).all()\n amounts = [i.amount for i in amount_records]\n amount = round(sum(amounts), 2)\n count = len(amounts)\n games = [i.game.total_amount for i in amount_records]\n join = round(sum(games), 2)\n wins = filter(lambda x: x > 0, amounts)\n win = round(len(wins)*100.0/count if count > 0 else 0, 2)\n return {\n 'count': count,\n 'amount': amount,\n 'join': join,\n 'win': win\n }\n\n\ndef get_my_game_history(user, game_type, format_func, page=1, **kwargs):\n dt_now = dt.now()\n local_now = dt.localtime(dt_now)\n local_begin = local_now.replace(hour=0, minute=0, second=0)\n dt_begin = local_begin - timedelta(days=6)\n items = []\n games = CoinFlipGame.objects.filter(\n game_type=game_type, end=1,\n create_time__gte=dt_begin.date(),\n create_time__lte=dt_now,\n deposits__steamer__steamid=user.steamid\n ).all().order_by('-create_time')\n paginator = Paginator(games, settings.DEFAULT_PAGINATION_PAGE)\n try:\n ret_games = paginator.page(page)\n except EmptyPage:\n ret_games = paginator.page(paginator.num_pages)\n for g in ret_games:\n data = format_func(g)\n if data:\n items.append(data)\n total_summary = get_game_summary(user, game_type, dt_begin)\n today_summary = get_game_summary(user, game_type, local_begin)\n ret = {\n 'total_count': paginator.count,\n 'items': items,\n 'total_summary': total_summary,\n 'today_summary': today_summary,\n 'page': page\n }\n return ret\n\n\ndef get_my_coinflip_history(user, **kwargs):\n return get_my_game_history(user, game_type=0, format_func=format_coinflip_game, **kwargs)\n\n\ndef get_my_jackpot_history(user, **kwargs):\n return get_my_game_history(user, game_type=1, format_func=format_jackpot_game, **kwargs)\n\n\ndef format_coinflip_game_all(game, end=False, **kwargs):\n dt_now = dt.now()\n ts = aware_datetime_to_timestamp(dt_now)\n ret = {\n 'hash': game.hash,\n 'closed': False,\n 'date': dt_now.strftime('%Y-%m-%d %H:%M:%S'),\n 'gid': game.uid,\n 'ts_get': ts,\n 'winner': None,\n 'joined': None,\n 'deposit': []\n }\n total_amount = 0.0\n total_items = 0\n deposits = game.deposits.all()\n items = []\n closed = True if len(deposits) >= 2 else False\n for deposit in deposits:\n deposit_s = DepositSerializer(deposit)\n deposit_data = deposit_s.data\n steamer_s = SteamerSerializer(deposit.steamer)\n deposit_data.update(steamer_s.data)\n deposit_data['totalItems'] = len(deposit_data['items'])\n deposit_data['totalAmount'] = sum(d['amount'] for d in deposit_data['items'])\n ret['deposit'].append(deposit_data)\n total_items += deposit_data['totalItems']\n total_amount += deposit_data['totalAmount']\n items.extend(deposit_data['items'])\n amount_o = {\n 'min': total_amount * 0.9,\n 'max': total_amount * 1.1\n }\n items = sorted(items, key=lambda x: x['amount'], reverse=True)\n items_more = total_items - 6\n ret.update({\n 'totalItems': total_items,\n 'totalAmount': total_amount,\n 'closed': closed,\n 'amount_o': amount_o,\n 'itemsShow': items[:6],\n 'itemsMore': items_more if items_more > 0 else 0\n })\n return ret\n\n\ndef format_deposit(deposit, end=False, **kwargs):\n dt_now = dt.now()\n ts = aware_datetime_to_timestamp(dt_now)\n ret = {\n 'hash': deposit.game.hash,\n 'closed': False,\n 'date': dt_now.strftime('%Y-%m-%d %H:%M:%S'),\n 'gid': deposit.game.id,\n 'ts_get': ts,\n 'winner': {},\n 'joined': {},\n 'deposit': []\n }\n deposit_s = DepositSerializer(deposit)\n deposit_data = deposit_s.data\n steamer_s = SteamerSerializer(deposit.steamer)\n deposit_data.update(steamer_s.data)\n deposit_data['totalItems'] = len(deposit_data['items'])\n deposit_data['totalAmount'] = sum(d['amount'] for d in deposit_data['items'])\n ret['deposit'].append(deposit_data)\n return ret\n\n\ndef get_last_jackpot_histories(count=4):\n histories = []\n games = CoinFlipGame.objects.filter(game_type=1, end=1).all().order_by('-create_time')[:count]\n for game in games:\n histories.append(format_jackpot_game(game))\n return histories\n\n\ndef get_jackpot_game(gid):\n ret = None\n game = CoinFlipGame.objects.filter(uid=gid).first()\n if game:\n ret = format_jackpot_game(game)\n return ret\n\n\ndef ws_send_jk_current(data):\n jk_msg = ['jk', 'update', data]\n Group('jackpot').send({'text': json.dumps(jk_msg)})\n\n\ndef ws_send_jk_new(data):\n jk_msg = ['jk', 'new', data]\n Group('jackpot').send({'text': json.dumps(jk_msg)})\n\n\ndef ws_send_online(data):\n online_msg = ['online', data]\n Group('chat_room').send({'text': json.dumps(online_msg)})\n\n\ndef ws_send_bot_status(status):\n bot_msg = ['bot', {'status': status}]\n Group('chat_room').send({'text': json.dumps(bot_msg)})\n\n\ndef update_online():\n try:\n m = get_maintenance()\n if m:\n return\n\n online = get_online()\n ws_send_online(online)\n bot_status = get_steam_bot_status()\n ws_send_bot_status(bot_status)\n except Exception as e:\n _logger.exception(e)\n\n\n_steamrobot_api_url = \"http://api.steamrobot.me/api/get_item_price/?hash_name={hash_name}\"\n_steamrobot_api_base = \"http://api.steamrobot.me/api/get_item_price/\"\n\n\ndef get_item_price_from_steamrobot(hash_name):\n try:\n _logger.warning(hash_name)\n params = {\n 'hash_name': hash_name\n }\n resp = requests.get(_steamrobot_api_base, params=params, timeout=settings.STEAM_REQUEST_TIMEOUT)\n item_data = json.loads(resp.content, encoding='utf-8')\n item = None\n if item_data:\n item = {\n u'hash_name': item_data.get(u'hash_name', None),\n u'item_refer_igxe_steam_price': item_data.get(u'item_refer_igxe_steam_price', None),\n u'item_refer_igxe_price': item_data.get(u'item_refer_igxe_price', None),\n u'steam_sale_price_dollar': item_data.get(u'steam_sale_price_dollar', None),\n }\n if item:\n md5_data = hashlib.md5(item[u'hash_name'].encode('utf-8'))\n hash_key = md5_data.hexdigest()\n SteamrobotApiItem.objects.update_or_create(md5=hash_key, defaults=item)\n except Exception as e:\n _logger.exception(e)\n\n\ndef steamrobot_Api_Item_Update():\n items = SteamrobotApiItem.objects.all()\n for item in items:\n get_item_price_from_steamrobot(item.hash_name)\n\n\ndef run_schedule_task():\n while True:\n schedule.run_pending()\n time.sleep(1)\n\n\ndef setup_schedule_task():\n try:\n schedule.every(10).seconds.do(update_online)\n schedule.every().day.at('01:00').do(steamrobot_Api_Item_Update)\n th = Thread(target=run_schedule_task, args=())\n th.start()\n except Exception as e:\n _logger.exception(e)\n\n\ndef get_current_all_coinflip_games():\n dt_now = dt.now()\n running_games = get_current_coinflip_games()\n running_count = len(running_games)\n if running_count >= 10:\n return running_games\n\n all_games = []\n all_games.extend(running_games)\n least_count = 10 - running_count\n last_games = CoinFlipGame.objects.filter(\n game_type=0, end=1, create_time__gte=dt_now.date()\n ).order_by('-update_time').all()[:least_count]\n for gm in last_games:\n all_games.append(format_coinflip_game(gm))\n return all_games\n\n\ndef create_promotion(ref_code, steamer):\n last_ref = Promotion.objects.filter(steamer__steamid=steamer.steamid).first()\n if last_ref is None:\n ref = SteamUser.objects.filter(ref_code=ref_code).first()\n if ref and ref != steamer:\n Promotion.objects.create(ref=ref, steamer=steamer, pointed=False)\n\n\ndef get_promotion_count(steamer):\n return Promotion.objects.filter(ref=steamer).all().count()\n","sub_path":"betting/betting_business.py","file_name":"betting_business.py","file_ext":"py","file_size_in_byte":11711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"420512206","text":"'''\nAligns RADAR and position data\n\n@author: David + Mason\n'''\n#Import required modules\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom spline2 import interp_3\ndef align_data(radar_data,motion_data,radar_point_one,motion_point_one,motion_point_last, lefttrim, righttrim):\n '''\n Inputs: \n radar_data: contains pulses, timestamps, and range bins\n motion_data: contains position data for a set of times\n radar_point_one, motion_point_one, motion_point_last: currently unused\n lefttrim: leftmost pulse outputted in data\n righttrim: rightmost pulse outputted in data\n \n Outputs: \n Returns aligned data\n \n Summary:\n RADAR data and motion data have different sampling rates (Motion: 360, RADAR: ?), so the data needs to be aligned\n '''\n #Takes relevant RADAR data\n Pulses = radar_data[0][radar_point_one:20000]\n radar_time = radar_data[1][radar_point_one-1:] - (radar_data[1][radar_point_one-1])\n starting_radar_time = radar_time[1]\n \n #Takes relevant motion data\n new_motion_data = motion_data[motion_point_one:motion_point_last] \n motion_time = []\n for i in range(len(new_motion_data)):\n motion_time.append(1/360*1000*i) \n \n #Calculates aligned data\n iterated_radar_time = starting_radar_time\n final_motion_list = []\n count = 0\n for i in range(len(motion_time)):\n if radar_time[count] <= motion_time[i]:\n count += 1\n iterated_radar_time += starting_radar_time\n final_motion_list.append(new_motion_data[i])\n \n #Truncate data\n final_motion_list = final_motion_list[lefttrim:righttrim]\n Pulses = Pulses[lefttrim:righttrim ,:]\n Final = [Pulses,final_motion_list,len(final_motion_list)+radar_point_one]\n \n #Return outputs\n print(len(Pulses))\n print(len(final_motion_list))\n #print(final_motion_list)\n return Final\n\n'''\n #Linear Interpolation\n for i in range(len(Pulses)):\n floor_val = (constant*i) - math.floor(constant*i)\n ceil_val = 1 - floor_val\n temp_list = []\n for x in range(0,3):\n #print(math.floor(constant*i))\n floored = floor_val * new_motion_data[math.floor(constant*i)][x]\n ceiling = ceil_val * new_motion_data[math.ceil(constant*i)][x]\n temp_list.append(floored+ceiling)\n aligned_motion_data.append(temp_list)\n'''\n","sub_path":"Python/data_align.py","file_name":"data_align.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"254976800","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# @Time : 16/10/10 14:17\n\n\nimport time\n\nfrom kafka import KafkaConsumer\n\n\nclass Consumer(object):\n def __init__(self, topic, group, server, sleep_time):\n self.topic = topic\n self.group = group\n self.server = server\n self.sleep_time = sleep_time\n\n def run(self):\n consumer = KafkaConsumer(self.topic, group_id=self.group,\n bootstrap_servers=self.server, auto_offset_reset='earliest')\n for message in consumer:\n print (\"%s:%d:%d: key=%s value=%s\" % (message.topic, message.partition,\n message.offset, message.key,\n message.value.decode('utf-8')))\n\n time.sleep(self.sleep_time)\n\nif __name__ == '__main__':\n a = Consumer(topic='test', group='python-test', zk_server='10.211.55.7:9092',sleep_time=1)\n a.run()\n","sub_path":"kafka-consumer/consumer.py","file_name":"consumer.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"642345615","text":"#!/usr/bin/env python3\n\nimport ast\nimport os\nimport subprocess\nimport json\nimport uuid\n\nclass Jobs():\n \"\"\" Jobs library for viki \"\"\"\n\n ### Jobs internals\n\n def __init__(self):\n \"\"\" Initialize jobs handler\n Vars for use:\n home: Viki's home directory. Usually /usr/local/viki\n jobs_path: Path to Viki's jobs directory. Usually /usr/local/viki/jobs\n job_config_filename: Name of the config for each individual job. Usually 'config.json'\n \"\"\"\n\n # Change to just /home/viki eventually\n self.home = \"/usr/local/viki\"\n\n # Path to the jobs directory relative to self.home\n self.jobs_path = self.home + \"/\" + \"jobs\"\n\n # Name of job configuration file\n self.job_config_filename = \"config.json\"\n\n def _write_job_file(self, file, text):\n \"\"\" _write_job_file\n Takes a filename and textblob and\n attempts to write the text to that file\n \"\"\"\n print('---> _write_job_file')\n print('---> _write_job_file: Arg: file: ' + str(file))\n print('---> _write_job_file: Arg: test: ' + str(text))\n\n if not file or not text:\n return False\n\n # This will not work if the directory does not exist\n with open(file, 'w') as file_obj:\n file_obj.write(json.dumps(text))\n\n return file_obj.close()\n\n def _read_job_file(self, file):\n \"\"\" _read_job_file\n Takes a filename and returns the string\n Filename must be the full path of the file, not just the name\n contents of that file or False if it does not exist\n \"\"\"\n if not file:\n return False\n\n with open(file, 'r') as file_obj:\n ret = file_obj.read()\n file_obj.close()\n\n return ret\n\n def _run_shell_command(self, command, file):\n \"\"\" _run_shell_command\n string:command Shell command to run\n string:file path Where the command results (stdout) are stored\n Runs the given command and stores results in a file\n Returns Tuple (True|False, Return code)\n \"\"\"\n\n print('----> _run_shell_command')\n print('----> Arg: command: ' + command)\n print('----> Arg: output file: ' + file)\n\n # This fixes Popen not correctly storing the output of\n # echo \"some string\" in the output file\n command = \"'\" + command + \"'\"\n print('----> command with single quotes: ' + command)\n\n # Generate output file for run results\n output_file = open(file, 'a')\n\n process = subprocess.Popen(\n command,\n stdout=output_file,\n stderr=subprocess.STDOUT,\n shell=True\n )\n\n while process.poll() is None:\n # Not finished\n pass\n\n output_file.close()\n return_code = process.poll()\n\n return True if return_code == 0 else False, return_code\n\n def _dirty_rm_rf(self, dir):\n \"\"\" Executes a quick and dirty rm -rf dirName\n Use subprocess because its easier to let bash do this than Python\n \"\"\"\n subprocess.call('rm -rf ' + dir, shell=True)\n\n\n ### Job functions\n\n def get_jobs(self):\n \"\"\"\n List jobs in ~/viki/jobs\n Takes no parameters\n \"\"\"\n message = \"Ok\"\n success = \"1\"\n jobs_list = []\n\n try:\n\n # Get all job dirs\n jobs_list = next(os.walk(self.jobs_path))\n jobs_list = jobs_list[1]\n\n except OSError as error:\n message = str(error)\n success = \"0\"\n\n ret = { \"success\":success, \"message\":message, \"jobs\":jobs_list }\n\n return ret\n\n\n def get_job_by_name(self, name):\n \"\"\"\n Get details of a single job by name\n string:name Name of specific job\n \"\"\"\n success = \"1\"\n message = \"Ok\"\n contents = \"\"\n\n try:\n\n if name is None:\n raise ValueError('Missing required field: jobName')\n\n job_dir = self.jobs_path + \"/\" + name\n\n if os.path.isdir(job_dir) and os.path.exists(job_dir + \"/\" + self.job_config_filename):\n contents = self._read_job_file(job_dir + \"/\" + self.job_config_filename)\n else:\n raise OSError('Job directory not found')\n\n except (OSError, ValueError) as error:\n message = str(error)\n success = \"0\"\n\n return { \"success\":success, \"message\":message, \"name\":name, \"config_json\":contents }\n\n\n def create_job(self, new_name, json_text):\n \"\"\" Adds a job \"\"\"\n message = \"Job created successfully\"\n success = \"1\"\n\n print('----> createJob')\n\n try:\n\n # Generate path and file name\n job_dir = self.jobs_path + \"/\" + new_name\n job_filename = job_dir + \"/\" + self.job_config_filename\n\n # Bail if\n if os.path.exists(job_dir):\n raise SystemError('Job directory already exists')\n else:\n os.mkdir(job_dir)\n\n # Create Json array for _write_job_file\n # todo: Would we be able to avoid this if we remove the str() from around request.get_json() ?\n json_obj = ast.literal_eval(json_text)\n print('----> json_obj: ' + str(json_obj))\n\n if not json_obj['description']:\n raise ValueError('Missing description')\n\n if not json_obj['steps']:\n raise ValueError('Missing steps')\n\n json_obj['runNumber'] = 0\n json_obj['lastSuccessfulRun'] = 0\n json_obj['lastFailedRun'] = 0\n json_obj['name'] = new_name\n\n # Create job file\n self._write_job_file(job_filename, json_obj)\n\n except (ValueError, SystemError) as error:\n message = str(error)\n success = \"0\"\n\n ret = {\"success\":success, \"message\":message}\n\n return ret\n\n\n def update_job(self, name):\n \"\"\" Update an existing job \"\"\"\n success = \"1\"\n message = \"-- Under Construction --\"\n job_filename = \"Placeholder\"\n\n # Remove existing job conf\n if os.path.exists(job_filename):\n self._dirty_rm_rf(job_filename)\n\n return { \"success\":success, \"message\":message }\n\n\n def run_job(self, name):\n \"\"\" Run a specific job \"\"\"\n success = \"1\"\n message = \"Run successful\"\n return_code = 0\n\n # Create job directory and file path names\n job_dir = self.jobs_path + \"/\" + name\n job_config_json_file = job_dir + \"/\" + \"config.json\"\n\n try:\n\n # Check job directory exists\n # Otherwise raise OSError\n if not os.path.isdir(job_dir):\n raise OSError('Job not found')\n\n # Check config json file exists\n # Otherwise raise OSError\n if not os.path.isfile(job_config_json_file):\n raise OSError('Job file not found')\n\n # Read the file and load the json inside it\n # Otherwise raise OSError\n jobJson = json.loads(self._read_job_file(job_config_json_file))\n if jobJson is False or jobJson is None:\n raise OSError('Job file could not be read')\n\n # Generate a tmp directory to work in\n # Use uuid4() because it creates a truly random uuid\n # and doesnt require any arguments and uuid1 uses\n # the system network addr.\n tmp_cwd = \"/tmp/viki-\" + str(uuid.uuid4())\n print('----> tmpdir: ' + tmp_cwd)\n os.mkdir(tmp_cwd)\n\n # Create filename path for output file\n # todo: Move this to store the output in a new directory\n # where it will not get removed after each run\n filename = tmp_cwd + \"/\" + \"output.txt\"\n\n # Grab the json array \"steps\" from\n # jobs/jobName/config.json file\n jobSteps = jobJson['steps']\n\n # Execute them individually\n # If any of these steps fail then we stop execution\n # The steps are stored as an array of strings executed in order\n # Example in sample/config.json\n for step in jobSteps:\n\n # Debug output file - todo: **remove me eventually**\n filename = \"/usr/local/viki/jobs/testoutput.txt\"\n\n # Every time we run a step via _run_shell_command it returns a tuple:\n # success True|False and the return code of the command\n successBool, return_code = self._run_shell_command(step, filename)\n\n # If unsuccessful stop execution\n if not successBool:\n print('----> Job step failed: ' + str(step))\n print(' ---> with exit code: ' + str(return_code))\n raise SystemError('Build step failed')\n\n # Clean up tmp workdir\n self._dirty_rm_rf(tmp_cwd)\n\n except (OSError, subprocess.CalledProcessError, SystemError) as error:\n message = str(error)\n success = \"0\"\n\n return { \"success\":success, \"message\":message, \"return_code\":return_code }\n\n def delete_job(self, name):\n \"\"\" Removes a job by name\n Takes a job's name and removes the directory that the job lives in\n \"\"\"\n success = \"1\"\n message = \"Job deleted\"\n\n try:\n\n if name is None:\n raise ValueError('Missing job name')\n\n job_dir = self.jobs_path + '/' + name\n\n # Check job directory exists\n # Otherwise raise OSError\n if not os.path.isdir(job_dir):\n raise OSError('Job not found')\n\n # Remove the job directory\n self._dirty_rm_rf(job_dir)\n\n except (OSError, ValueError) as error:\n message = str(error)\n success = \"0\"\n\n return { \"success\":success, \"message\":message }\n","sub_path":"lib/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":9959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"244607093","text":"__author__ = 'James'\nimport pygame\nimport math\nimport sys\nfrom pygame.locals import*\n\n# Initialization:\n\npygame.init()\nscreen = pygame.display.set_mode((1024,768))\ndone = False\nclock = pygame.time.Clock()\ncar = pygame.image.load('car.jpg')\nk_up = k_down = k_left = k_right = 0\nspeed = direction = 0\nposition = (500,500)\nX_RIGHT_BOUNDARY = 1024\nX_LEFT_BOUNDARY = 0\nY_UP_BOUNDARY = 0\nY_DOWN_BOUNDARY = 768\nTURN_SPEED = 5\nACCELERATION = 2\nMAX_FORWARD_SPEED = 10\nMAX_REVERSE_SPEED = -5\nBLACK = (0,0,0)\n\nwhile not done:\n clock.tick(30)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n if not hasattr(event, 'key'):\n continue\n\n down = event.type == KEYDOWN\n print(down)\n if event.key == K_RIGHT:\n k_right = down * -TURN_SPEED\n elif event.key == K_LEFT:\n k_left = down * TURN_SPEED\n elif event.key == K_UP:\n k_up = down * ACCELERATION\n elif event.key == K_DOWN:\n k_down = down * -ACCELERATION\n elif event.key == K_ESCAPE:\n sys.exit(0)\n\n screen.fill(BLACK)\n\n # Simulation\n\n speed += (k_up + k_down)\n if speed > MAX_FORWARD_SPEED:\n speed = MAX_FORWARD_SPEED\n if speed < MAX_REVERSE_SPEED:\n speed = MAX_REVERSE_SPEED\n direction += (k_right + k_left)\n # New position based on current position, speed and direction\n x, y = position\n rad = direction * math.pi / 180\n x += -speed*math.sin(rad)\n y += -speed*math.cos(rad)\n if x > X_RIGHT_BOUNDARY:\n x = X_RIGHT_BOUNDARY\n if x < X_LEFT_BOUNDARY:\n x = X_LEFT_BOUNDARY\n if y < Y_UP_BOUNDARY:\n y = Y_UP_BOUNDARY\n if y > Y_DOWN_BOUNDARY:\n y = Y_DOWN_BOUNDARY\n\n position = (x, y)\n\n # Rendering\n rotated = pygame.transform.rotate(car, direction)\n rect = rotated.get_rect()\n rect.center = position\n screen.blit(rotated, rect)\n pygame.display.flip()\n\n","sub_path":"PyGame Practice/Basics.py","file_name":"Basics.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"630917597","text":"import pygame as pg\n\nimport math\nimport random\nfrom pygame import gfxdraw\nfrom colorsys import hsv_to_rgb\n# Constantes :\nFPS = 60 # les fps tabernak\nWIND = 750 # dimentions de la fentere\npg.init()\nf = pg.display.set_mode(size=(WIND, WIND))\npg.display.set_caption(\"Pendulum\")\nfpsClock = pg.time.Clock()\nfont = pg.font.Font('../consolas.ttf', 30) #police//roxane\nb = 1\npositions=[]\ndreams=list()\nnbs=3\nlongu=WIND/2/nbs\nnbs-=1\n\nclass Pos:\n def __init__(self,x=0,y=0):\n self.x=x\n self.y=y\n self.taille=WIND/2\n def get_pos(self):\n return (self.x,self.y)\n \nclass Pendule:\n def __init__(self,x,y):\n self.pos=Pos(x,y)\n self.angle=-random.random()*math.pi\n self.coef=50\n self.mouv=0\n self.boul=Pos(self.pos.x+math.cos(self.angle)*self.coef,self.pos.y+math.sin(self.angle)*self.coef)\n def update(self):\n if self.pos.x>self.boul.x:\n self.mouv-=0.001\n else:\n self.mouv+=0.001\n self.angle+=self.mouv\n self.angle=self.angle%math.tau\n \n def draw(self):\n self.boul.x=self.pos.x+math.cos(self.angle)*longu\n self.boul.y=self.pos.y+math.sin(self.angle)*longu\n pg.draw.line(f,(255,255,255),self.pos.get_pos(),self.boul.get_pos())\n pg.draw.circle(f,(255,0,0),self.boul.get_pos(),5) \ndef dist(a:Pos,b:Pos):\n return math.sqrt((a.x-b.x)**2+(a.y-b.y)**2)\n\nmouv_speed=10\ndreams.append(Pendule(WIND/2,WIND/2))\nfor i in range(nbs):\n dreams.append(Pendule(dreams[i].boul.x,dreams[i].boul.y))\ntry:\n while b:\n b+=1\n # Actualiser:\n pg.display.flip()\n # Appliquer les images de fond sur la fenetre\n s = pg.Surface((WIND, WIND)) \n s.set_alpha(150)\n s.fill((0, 0, 0))\n f.blit(s, (0, 0))\n \n \n dreams[0].update()\n for i in range(nbs):\n dreams[i+1].pos=dreams[i].boul\n dreams[i+1].update()\n positions.append((Pos(dreams[-1].boul.x,dreams[-1].boul.y),dreams[0].angle))\n ind=0\n for ind,posz in enumerate(positions):\n coul=hsv_to_rgb(posz[1]/math.tau,1,255)\n if ind>0:\n pg.draw.line(f,coul,(posz[0].x,posz[0].y),(positions[ind-1][0].x,positions[ind-1][0].y))\n pg.draw.rect(f,coul,(posz[0].x,posz[0].y,1,1))\n ind+=1\n if ind>1000:\n positions.pop(0)\n for rev in dreams:\n rev.draw()\n \n \n \n pointer = pg.mouse\n pos = pointer.get_pos()\n\n for event in pg.event.get(): # QUAND la touche est appuyée\n if event.type == pg.QUIT:\n b = False\n print(\" Fin du jeu babe\")\n elif event.type == pg.KEYUP:\n if event.dict['key']==pg.K_a:\n for i in dreams:\n i.angle=0\n if event.dict['key']==pg.K_SPACE:\n dreams[0].__init__(WIND/2,WIND/2)\n for i in range(nbs):\n dreams[i+1].__init__(dreams[i].boul.x,dreams[i].boul.y)\n positions=[]\n \n \"\"\"if event.dict['key']==pg.K_a:\"\"\"\n \n elif event.type == pg.MOUSEBUTTONUP:\n \"\"\"if event.button==1: #click gauche\n pos=event.pos\n\n\n if event.button==3: #click droit\n \n elif event.button==4: #vers le haut\n zoom+=0.01\n elif event.button==5: #vers le bas\n zoom-=0.01\"\"\"\n\n #text = font.render(str(reve.angle), True, (255,255,255))\n #textRect = text.get_rect() \n #f.blit(text, (0,0))\n\n\n fpsClock.tick(FPS)\nexcept :\n pg.quit()\n raise\nfinally:\n pg.quit()\n","sub_path":"gravitation or not.py","file_name":"gravitation or not.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"361792546","text":"#!/usr/bin/env python\n# _*_coding:utf-8_*_\n\n\"\"\"\n@Time : 2019/12/2 7:58 下午\n@Author: zhangqiang\n@File: CallFirstMainWin.py\n@Software: PyCharm\n@Title:\n\"\"\"\nimport sys\nfrom PyQt5.QtWidgets import QApplication,QMainWindow\n\nfrom firstMainWin import Ui_MainWindow\nclass MyMainWindow(QMainWindow,Ui_MainWindow):\n def __init__(self,parent = None):\n super(MyMainWindow, self).__init__(parent)\n self.setupUi(self)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n myWin = MyMainWindow()\n myWin.show()\n sys.exit(app.exec_())","sub_path":"PyQt5快速开发与实战/Chapter03/CallFirstMainWin.py","file_name":"CallFirstMainWin.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"206292872","text":"from apps.scheduling import models\nfrom rest_framework import generics\nfrom apps.scheduling.api.v1.schedules import Serializer\nfrom apps.scheduling.api.v1 import utils\nfrom django.db.models import Q\n\n\nclass SchedulesView(generics.ListCreateAPIView):\n queryset = models.Schedules.objects.all()\n serializer_class = Serializer.SchedulesSerializer\n pagination_class = utils.DefaultPagination\n \n def get_queryset(self):\n queryset = super(SchedulesView, self).get_queryset()\n params = self.request.query_params\n\n query = Q()\n\n title = params.get('title', None)\n if title is not None:\n query &= Q(title__icontains=title.strip())\n\n start_at = utils.to_datetime(params.get('start_at'), start=True)\n if start_at is not None:\n query &= Q(start_at__gte=start_at)\n\n finish_at = utils.to_datetime(params.get('finish_at'), end=True)\n if finish_at is not None:\n query &= Q(finish_at__lte=finish_at)\n \n room = params.get('room', None)\n if room is not None and room.isnumeric():\n query &= Q(room__pk=str(room))\n\n return queryset.filter(query)\n\n\nclass SchedulesDetailView(generics.RetrieveUpdateDestroyAPIView):\n queryset = models.Schedules.objects.all()\n serializer_class = Serializer.SchedulesSerializer","sub_path":"magluiza/apps/scheduling/api/v1/schedules/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"94008019","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# uz7z.py\n\n\nimport os\nimport sys\nimport asyncio\n\nimport py7zlib\n\nasync def read(f, name):\n print(f\"开始读取 {name}\")\n data = f.getmember(name).read()\n print(f\"完成读取 {name}\")\n return data\n\nasync def write(path, data):\n print(f\"开始写入{path}\")\n with open(path, \"wb\") as new_f:\n new_f.write(data)\n print(f\"写入完成 {path}\")\n\n\nasync def exc(file):\n try:\n f = py7zlib.Archive7z(open(file,\"rb\"))\n root, _ = os.path.splitext(file)\n for name in f.getnames():\n path = os.path.join(root, name)\n if not os.path.exists(os.path.dirname(path)):\n os.makedirs(os.path.dirname(path))\n try:\n try:\n data = await read(f, name)\n if data:\n await write(path, data)\n else:\n open(path, \"wb\").close()\n except py7zlib.NoPasswordGivenError as e:\n return f\"{file} 需要密码\"\n except Exception as e:\n return f\"为未知错误{e}\"\n except py7zlib.NoPasswordGivenError as e:\n return f\"{file} 需要密码\"\n else:\n os.remove(path)\n return f\"{file} 解压完成[删除源文件]\"\n\n\nasync def get_allname(dir):\n filenames = []\n for root, _, file in os.walk(dir):\n for name in file:\n _, tp = os.path.splitext(name)\n if tp == \".7z\":\n filenames.append(os.path.join(root,name))\n return filenames\n\nasync def exc_all(dir):\n tarks = [asyncio.ensure_future(exc(filename)) for filename in await get_allname(dir)]\n results = await asyncio.gather(*tarks)\n for result in results:\n print(result)\n\n\ndef main():\n if sys.argv[1]:\n dir = os.path.abspath(sys.argv[1].replace(\"\\\\\", \"\"))\n if os.path.exists(dir) and os.path.isdir(dir):\n choice = input(f\"{dir} [yes/n]\")\n if choice == \"yes\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(exc_all(dir)) \n loop.close\n\nif __name__ == \"__main__\":\n main()\n ","sub_path":"utils/un7z.py","file_name":"un7z.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"335838496","text":"#!/usr/bin/env\n\nfrom Tkinter import *\nventana = Tk()\nventana.title(\"Nombre de la ventana\")\nlabel1 = Label(ventana,text=\"asd\")\nbtn = Button(ventana,text=\"hola\", bg=\"blue\")\ntxtval = StringVar()\ntxt = Entry(ventana, textvariable = txtval)\n\n\nlabel2=Label(ventana, text=\"Ingrese sus datos: \")\nlabel2.grid(row=3, column =1)\n\nlabel1.grid(row=1,column=1)\nbtn.grid(row=2, column=1)\ntxt.grid(row=3, column=2)\n\nbtn2 = Button(ventana,text=\"ok\")\nbtn2.grid(row=4, column=1)\nventana.mainloop()\n","sub_path":"untitled.py","file_name":"untitled.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"371395232","text":"from collections import defaultdict\nimport glob\nimport os\nimport unittest\n\nimport sublime\n\n\nTEST_DELIM = '\\n---///---\\n'\nTEST_HEADER_DELIM = '***\\n'\nTEST_RESULTS_DELIM = '\\n---\\n'\n\n_converters = defaultdict(lambda: (lambda x: str(x)))\n_converters ['mode'] = str\n_converters ['count'] = int\n\n\ndef _make_args(args):\n arg_dict = {}\n for a in args:\n name, value = a.split(':')\n arg_dict[name] = _converters[name](value)\n return arg_dict\n\n\nclass ViCmdTest (object):\n\n def __init__(self, cmd_name, args, description,\n before_text, after_text, file_name, test_nr):\n self.cmd_name = cmd_name\n self.args = args\n self.description = description\n self.before_text = before_text\n self.after_text = after_text\n self.file_name = file_name\n self.test_nr = test_nr\n\n @property\n def message(self):\n return \"Failure in File: {0} Test Nr.: {1} -- {2}\".format(self.file_name,\n self.test_nr, self.description)\n\n @staticmethod\n def from_text(text, file_name, test_nr):\n ''' creates a test instance from a textual representation\n '''\n header, body = text.split(TEST_HEADER_DELIM, 1)\n header, description = header.split('\\n', 1)\n cmd_name, args = header.split(' ', 1)\n args = _make_args(args.split())\n before, after = body.split(TEST_RESULTS_DELIM)\n return ViCmdTest(cmd_name, args, description, before, after,\n file_name, test_nr)\n\n\nclass ViCmdTester (unittest.TestCase):\n \"\"\"\n Runs tests based in cmd-test spec files (cmd-test).\n\n Subclasses must implement setUp() and in it set self.path_to_test_specs.\n \"\"\"\n\n def get_tests(self):\n \"\"\"\n Yields `ViCmdTest`s found under the self.path_to_test_specs dir.\n \"\"\"\n specs = glob.glob(os.path.join(self.path_to_test_specs, \"*.cmd-test-solo\"))\n if specs:\n specs = specs[0:1]\n else:\n specs = glob.glob(os.path.join(self.path_to_test_specs, \"*.cmd-test\"))\n\n for s in specs:\n s = os.path.abspath(s)\n content = None\n with open(s, 'rt') as f:\n content = f.read()\n tests = content.split(TEST_DELIM)\n for i, t in enumerate(tests):\n yield ViCmdTest.from_text(t, s, i)\n\n def append(self, text):\n self.view.run_command('append', {'characters': text})\n\n def reset(self):\n if getattr(self, \"view\", None):\n self.view.close()\n self.view = sublime.active_window().new_file()\n self.view.set_scratch(True)\n\n def set_sels(self):\n \"\"\"\n Enables adding selections to the buffer text using a minilanguage:\n\n S = add empty sel before S and delete S\n x = add empty sel before x\n v = add sel from before the first 'v' to after the last contiguous 'v'\n \"\"\"\n self.view.sel().clear()\n\n normal_mode_regs = self.view.find_all(r'x')\n for nmr in normal_mode_regs:\n self.view.sel().add(sublime.Region(nmr.a))\n\n if len(self.view.sel()) > 0:\n return\n\n visual_mode_regs = self.view.find_all(r'v+')\n for vmr in visual_mode_regs:\n self.view.sel().add(vmr)\n\n if len(self.view.sel()) > 0:\n return\n\n visual_mode_regs = self.view.find_all(r'S')\n for vmr in visual_mode_regs:\n self.view.sel().add(sublime.Region(vmr.a))\n self.view.run_command('right_delete')\n","sub_path":"tests/cmd_tester.py","file_name":"cmd_tester.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"46680761","text":"# -*- coding: UTF-8 -*-\nimport base64\nimport requests\nimport json\nimport sys\n# sys.path.append('G:\\\\爬虫\\\\py_aiplat_demo\\\\SDK')\nimport apiutil\nimport os\nimport time\n\n'''\n人脸分析打分\n'''\nApp_ID = 'ai提供的id'\nApp_Key = 'ai提供的key'\n\n\ndef ai_image(image_data):\n time.sleep(0.5)\n ai_obj = apiutil.AiPlat(App_ID, App_Key)\n print('-----------')\n rsp = ai_obj.getRenlianFenxi(image_data)\n if rsp['ret'] == 0:\n for i in rsp['data']['face_list']:\n print(i['beauty'])\n print('----')\n return int(i['beauty'])\n else:\n # print('无返回')\n print(rsp['ret'])\n return int(rsp['ret'])\n\n\nif __name__ == '__main__':\n num_files = 0\n\n os.chdir('女神吧妹子图')\n path = os.getcwd()\n for i in os.listdir(path):\n num_files += 1\n print(num_files)\n for root, dirs, files in os.walk(path):\n # print(files)\n for each in files:\n f = open(root + '\\\\' + each, 'rb')\n ls_f = f.read()\n beauty = ai_image(ls_f)\n f.close()\n if beauty != 0:\n if beauty < 80:\n os.remove(root + '\\\\' + each)\n elif beauty == 16404:\n os.remove(root + '\\\\' + each)\n else:\n print('ok')\n time.sleep(0.5)","sub_path":"untitled2/AI评分.py","file_name":"AI评分.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"332736450","text":"# 当消费者处理了特定的数据后,生产者需要对此立刻感知\n\nfrom queue import Queue\nfrom threading import Thread, Event\nimport time\n\n\n# A thread that produces data\ndef producer(out_q):\n while True:\n data = time.time()\n # make an (data, event) pair and hand it to the consumer\n event = Event()\n out_q.put((data, event))\n # wait for consumer to process the item\n event.wait()\n time.sleep(5)\n print('now ---- continue to produce data')\n\n# a thread that consumer data\ndef consumer(in_q):\n while True:\n # get some data\n data, event = in_q.get()\n # process data\n print(data)\n\n # indicate completion\n event.set()\n\n\nq = Queue()\nt1 = Thread(target=producer, args=(q,))\nt2 = Thread(target=consumer, args=(q,))\n\nt1.start()\nt2.start()\n","sub_path":"th_alogri_python/concurrent/12-5.py","file_name":"12-5.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"396347361","text":"# import fitz\nimport pytesseract\nfrom PIL import Image\nimport io\nimport cv2\nimport numpy as np\nfrom pdf2image import convert_from_bytes\nimport re\nfrom core import logging\nimport sys\nimport traceback\nlogger = logging.getLogger(__name__)\n\n\ndef de_skew(image, show=False, delta=0):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = 255 - gray\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n coords = np.column_stack(np.where(thresh > 0))\n angle = cv2.minAreaRect(coords)[-1]\n logger.debug('Angle: ', angle)\n if angle == 90:\n angle = 0\n elif angle < -45:\n angle = -(90 + angle)\n else:\n angle = -angle\n angle = angle + delta\n logger.debug(angle)\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, angle, 1.0)\n rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n if show:\n cv2.imshow(\"Unrotated\", image)\n cv2.imshow(\"Rotated\", rotated)\n cv2.waitKey(0)\n else:\n print(\"Don't show....\")\n return rotated\n\n\ndef get_image(data):\n image = convert_from_bytes(data.read())[0]\n return image\n\n\ndef crop(image, x=0, y=0, h=2338, w=1653, show=False, is_gray=True):\n if not is_gray:\n image = get_grayscale(image)\n crop_img = image[y:y + h, x:x + w]\n if show:\n cv2.imshow(\"cropped\", crop_img)\n cv2.waitKey(0)\n return crop_img\n\n\ndef get_grayscale(image):\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\ndef remove_noise(image):\n return cv2.medianBlur(image, 5)\n\n\ndef thresholding(image, threshold=240):\n ret, th_img = cv2.threshold(image, thresh=threshold, maxval=255, type=cv2.THRESH_BINARY)\n return th_img\n\n\ndef dilate(image):\n kernel = np.ones((5, 5), np.uint8)\n return cv2.dilate(image, kernel, iterations=1)\n\n\ndef erode(image):\n kernel = np.ones((5, 5), np.uint8)\n return cv2.erode(image, kernel, iterations=1)\n\n\ndef opening(image):\n kernel = np.ones((5, 5), np.uint8)\n return cv2.morphologyEx(image, cv2.MORPH_OPEN, kernel)\n\n\ndef canny(image):\n return cv2.Canny(image, 100, 200)\n\n\n# def deskew(image):\n# coords = np.column_stack(np.where(image > 0))\n# angle = cv2.minAreaRect(coords)[-1]\n# logger.debug(angle)\n# if angle < -45:\n# angle = -(90 + angle)\n# else:\n# angle = -angle\n# logger.debug(angle)\n# (h, w) = image.shape[:2]\n# center = (w // 2, h // 2)\n# M = cv2.getRotationMatrix2D(center, angle, 1.0)\n# rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n# return rotated\n\n\ndef match_template(image, template):\n return cv2.matchTemplate(image, template, cv2.TM_CCOEFF_NORMED)\n\n\ndef image_to_string(image):\n config = r'--oem 3 --psm 6'\n return pytesseract.image_to_string(image, lang='eng', config=config)\n\n\ndef get_ref_number(image, regex):\n text = image_to_string(image)\n lines = text.split(\"\\n\")\n logger.debug(\"\\n\")\n logger.debug(\"========================================================\")\n logger.debug(regex)\n i = 0\n for line in lines:\n ret = re.search(regex, line.strip())\n i = i+1\n logger.debug(i, '. ', line)\n\n if ret:\n groups = tuple(filter(lambda x: x, ret.groups()))\n val = groups[0] if groups and len(groups) else None\n if val:\n val = val.upper()\n return val\n\n\ndef threshold_trials(orig_img, regex, thresholds, i=0):\n img = orig_img.copy()\n threshold = thresholds[i]\n logger.debug(\"Try with threshold: \", threshold)\n img = thresholding(np.array(img), threshold=threshold)\n ref_number = get_ref_number(img, regex)\n if ref_number:\n return ref_number\n j = i+1\n if j < len(thresholds):\n return threshold_trials(orig_img, regex, thresholds, i=j)\n\n\ndef zoom_in(img, zoom):\n return cv2.resize(img, None, fx=zoom, fy=zoom)\n\n\ndef extract_ref_number(pdf_data, regex, **kwargs):\n try:\n threshold = kwargs.get('threshold')\n zoom = kwargs.get('zoom')\n show = kwargs.get('zoom', 0)\n if threshold:\n del kwargs['threshold']\n if zoom:\n del kwargs['zoom']\n else:\n zoom = 1.0\n img = np.array(get_image(pdf_data))\n img = crop(img, **kwargs)\n img = de_skew(img, show=False)\n img = zoom_in(img, zoom)\n ref_number = get_ref_number(img, regex)\n print('Show?....... ', show)\n if not ref_number:\n thresholds = [threshold, threshold-7, threshold+7, threshold-14, threshold+14, threshold*2/3.5, threshold*2/3]\n logger.debug(\"Try with thresholds: \", thresholds)\n ref_number = threshold_trials(img, regex, thresholds)\n return ref_number\n except Exception as ex:\n print(\"Exception in user code:\")\n print(\"-\"*60)\n traceback.print_exc(file=sys.stdout)\n print(\"-\"*60)\n return None\n\n\ndef show_wait_destroy(winname, img):\n cv2.imshow(winname, img)\n cv2.moveWindow(winname, 500, 0)\n cv2.waitKey(0)\n cv2.destroyWindow(winname)\n\n\ndef apply_corrections(ref_number, corrections):\n if corrections and ref_number:\n res = list(ref_number)\n for c in corrections:\n pos = c['pos']\n x = res[pos]\n if x == c['val']:\n res[pos] = c['rep']\n return ''.join(res)\n else:\n return ref_number\n\n\ndef auto_remove_scratches():\n logger.debug(\"Removing scratches...\")\n file = \"C:\\\\Users\\\\godfred.nkayamba\\\\Downloads\\\\failed\\\\C.pdf\"\n with open(file, 'rb') as pdf_data:\n img = np.array(get_image(pdf_data))\n logger.debug(img.shape)\n corrections = [{'pos': 1, 'val': '2', 'rep': 'Z'}]\n C_regex = '[ ]{0,1}(\\w{15,})[\\({ ]'\n A_kwargs = {'x': 700, 'y': 20, 'h': 500, 'w': 800, 'threshold': 230}\n C_kwargs = {'x': 700, 'y': 600, 'h': 400, 'w': 800, 'threshold': 225}\n kwargs = C_kwargs\n regex = C_regex\n threshold = kwargs.get('threshold')\n if threshold:\n del kwargs['threshold']\n img = crop(img, **kwargs)\n img = de_skew(img, show=False)\n\n # ret, binary = cv2.threshold(img, threshold*2/3.5, 255, cv2.THRESH_BINARY)\n # ref_number = get_ref_number(binary, regex)\n thresholds = [threshold, threshold+7, threshold-1, threshold*2/3.5, threshold*2/3]\n logger.debug(\"Try with thresholds: \", thresholds)\n ref_number = threshold_trials(img, regex, thresholds)\n logger.debug(corrections)\n if ref_number and corrections and len(corrections):\n ref_number = apply_corrections(ref_number, corrections)\n logger.debug(ref_number)\n # cv2.imshow(\"Orig\", orig)\n # cv2.imshow(\"Binary\", binary)\n # cv2.waitKey(0)\n\n\n# auto_remove_scratches()\n\n\ndef remove_lines(image, line_spec=(1, 6)):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]\n\n # Remove horizontal\n horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (64, 2))\n detected_lines = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, horizontal_kernel, iterations=2)\n cnts = cv2.findContours(detected_lines, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n cnts = cnts[0] if len(cnts) == 2 else cnts[1]\n for c in cnts:\n cv2.drawContours(image, [c], -1, (255, 255, 255), 2)\n\n # Repair image\n repair_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 6))\n result = 255 - cv2.morphologyEx(255 - image, cv2.MORPH_CLOSE, repair_kernel, iterations=1)\n\n # cv2.imshow('thresh', thresh)\n # cv2.imshow('detected_lines', detected_lines)\n # cv2.imshow('image', image)\n cv2.imshow('result', result)\n cv2.waitKey()\n","sub_path":"backend_rest/sales/ocr2.py","file_name":"ocr2.py","file_ext":"py","file_size_in_byte":7861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"430452391","text":"import math\nfrom checker import *\n\ndebug=True\nclass StateContainer:\n # hashed container for State lookup\n def __init__(self, state_list=None):\n if state_list is None:\n self.container = {}\n else:\n for state in state_list:\n if not self.has(state):\n self.add(state)\n else:\n print(\"state is already in container\")\n\n def has(self, state):\n if state.board_hash() in self.container:\n return self.container[state.board_hash()]\n else:\n return False\n\n def __iter__(self):\n return iter(self.container.values())\n\n def __len__(self):\n return len(self.container)\n\n def __contains__(self, state):\n return self.has(state)\n\n def add(self, state):\n if not self.has(state):\n self.container[state.board_hash()] = state\n\n def remove(self, state):\n self.container.pop(state.board_hash())\n\n\nclass AlphaBeta(Checker):\n \"\"\"\n convention: alpha is not flipped. beta is always flipped. first player is always alpha\n \"\"\"\n\n # interacts with a checker game and finds the best move for the AI player\n # every board passed to AlphaBeta assumes optimization for positive player\n # you have to flip the board, basically.\n def __init__(self, alpha_depth=5, beta_depth=5, human_first=False, human_second=False, max_rounds=300):\n super(AlphaBeta, self).__init__()\n # currently no visited states check is implemented\n # the reason is because I do not know how to keep an even depth if multiple states wound up together\n # # the same position is drastically different given who plays first\n # # a king in check runs if the king plays, and mates if the checker plays\n # self.max_nodes = StateContainer()\n # self.min_nodes = StateContainer()\n\n # be careful that self.state and self.root_node.state should always be the same object\n if debug:\n assert not self.state.flipped\n self.root_node = AlphaNode(self.state, None)\n self.alpha_depth = alpha_depth\n self.beta_depth=beta_depth\n self.last_state = None\n self.human_first=human_first\n self.human_second=human_second\n self.max_rounds=max_rounds\n\n def start_game(self):\n print(\"Current board\")\n self.print_board(player_view=False)\n for i in range(self.max_rounds):\n ret=True\n if i%2==0:\n if self.human_first:\n self.verbose_human_play()\n else:\n ret=self.auto_play()\n print(\"Alpha played\")\n self.state.get_flipped_state().print_board(player_view=False)\n else:\n if self.human_second:\n self.verbose_human_play()\n else:\n ret=self.auto_play()\n print(\"Beta played\")\n self.state.get_flipped_state().print_board(player_view=False)\n if not ret:\n print(\"Game finished\")\n break\n if ret:\n print(\"Game reached max rounds\")\n self.print_winner()\n\n def print_winner(self):\n pieces=self.root_node.evaluate()\n if pieces>0:\n print(\"First player wins\")\n elif pieces<0:\n print(\"Second player wins\")\n else:\n print(\"Draw\")\n\n def auto_play(self, verbose=False):\n if debug:\n assert self.root_node.state is self.state\n if verbose:\n self.state.print_board()\n if self.root_node.is_alpha:\n self.root_node.alpha_prune(math.inf, self.alpha_depth)\n else:\n self.root_node.beta_prune(-math.inf, self.beta_depth)\n\n if self.root_node.choice is not None:\n # will be None if self.root_node is terminal and has no choice left\n self.last_state=self.root_node.state\n self.root_node = self.root_node.choice\n self.state = self.root_node.state\n if verbose:\n print(\"becomes\")\n self.state.print_board()\n return True\n else:\n return False\n\n def verbose_human_play(self):\n state=self.ask_human_for_action(self.state)\n self.human_play(state)\n\n def human_play(self, human_action):\n if debug:\n assert self.root_node.state is self.state\n if len(self.root_node.children)==0:\n self.root_node.make_children()\n # bug with flipping incorrectly.\n child = self.root_node.find_child(human_action.get_flipped_state())\n self.root_node = child\n self.state = self.root_node.state\n\n def print_board(self,player_view=False):\n self.root_node.state.print_board(player_view=player_view)\n\n\nclass AlphaBetaNode:\n \"\"\"\n Modified significantly from wikipedia alpha beta pruning algorithm.\n The tree is permanent\n Alpha prune and beta prune is separate, which eliminates unused alpha param for alpha nodes, vice versa.\n Wikipedia has two max functions for alpha nodes, which is redundant.\n Every prune step holds a reference to the choice\n The logic from my pruning function is very visible.\n \"\"\"\n\n def __init__(self, state, from_node):\n # self.is_alpha = is_alpha\n # # alpha value is the lower bound of the minimax value of this alpha node\n # if is_alpha:\n # self.alpha = None\n # else:\n # self.beta = None\n self.state = state\n self.parent = None\n self.children = []\n self.choice = None\n self.from_node=from_node\n\n def find_child(self, state):\n for child in self.children:\n if child.state.board_hash() == state.board_hash():\n return child\n\n def is_terminal(self):\n return (self.state.board >= 0).all() or (self.state.board <= 0).all()\n\n\nclass AlphaNode(AlphaBetaNode):\n def __init__(self, state, from_node):\n super(AlphaNode, self).__init__(state,from_node)\n self.is_alpha = True\n self._alpha = None\n\n def make_children(self):\n if debug:\n assert len(self.children) == 0\n states = self.state.get_legal_actions()[0]\n for state in states:\n opponent_state = state.get_flipped_state()\n child_node = BetaNode(opponent_state,self)\n self.children.append(child_node)\n\n def alpha_prune(self, parent_beta, depth):\n if debug:\n assert self.is_alpha\n if depth == 0 or self.is_terminal():\n return self.evaluate()\n\n if len(self.children) == 0:\n self.make_children()\n\n # the lower bound of what this max node can achieve\n lower_bound_alpha = -math.inf\n for child in self.children:\n # if minchild does not explore all grandchildren,\n # then the minchild has discovered an option with value lower than lower_bound_alpha\n # which will be returned, and it will not update lower_bound_alpha here\n child_value = child.beta_prune(lower_bound_alpha, depth - 1)\n if child_value > lower_bound_alpha:\n lower_bound_alpha = child_value\n self.choice = child\n if lower_bound_alpha >= parent_beta:\n # parent wants to minimize\n # parent has an option with less payoff\n # parent will not choose this alpha node\n # this alpha node will not need to explore its children\n break\n self._alpha = lower_bound_alpha\n return lower_bound_alpha\n\n def evaluate(self):\n # should not be called unless the depth is reached or the game ends\n # zero sum compliant\n assert not self.state.flipped\n return self.state.board.sum()\n\n\nclass BetaNode(AlphaBetaNode):\n def __init__(self, state, from_node):\n super(BetaNode, self).__init__(state, from_node)\n self.is_alpha = False\n self._beta = None\n\n def make_children(self):\n if debug:\n assert len(self.children) == 0\n states = self.state.get_legal_actions()[0]\n for state in states:\n if debug:\n assert ((self.state.board>0).sum()==(state.board>0).sum())\n assert ((self.state.board<0).sum()>=(state.board<0).sum())\n opponent_state = state.get_flipped_state()\n child_node = AlphaNode(opponent_state, self)\n self.children.append(child_node)\n\n def beta_prune(self, parent_alpha, depth):\n if debug:\n assert not self.is_alpha\n if depth == 0 or self.is_terminal():\n return self.evaluate()\n\n if len(self.children) == 0:\n self.make_children()\n\n upper_bound_beta = math.inf\n for child in self.children:\n child_value = child.alpha_prune(upper_bound_beta, depth - 1)\n if child_value < upper_bound_beta:\n upper_bound_beta = child_value\n self.choice = child\n if upper_bound_beta <= parent_alpha:\n break\n self._beta = upper_bound_beta\n return upper_bound_beta\n\n def evaluate(self):\n if debug:\n assert self.state.flipped\n return -self.state.board.sum()\n\n def evaluate_defensive(self):\n if debug:\n assert self.state.flipped\n board= (-self.state.board)\n board=board[board>0]\n return board.sum()\n\n def evaluate_offensive(self):\n if debug:\n assert self.state.flipped\n board= (-self.state.board)\n board=board[board<0]\n return board.sum()\n","sub_path":"demodir/yesterday/alphabeta.py","file_name":"alphabeta.py","file_ext":"py","file_size_in_byte":9732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"388873067","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 17 08:07:21 2020\n\n@author: nathna barloy\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport sys\nimport time\nimport math\n\nfilepath = sys.argv[1]\n\nimg = cv2.imread(filepath)\nback = cv2.imread('.\\\\image_test\\\\jeu1\\\\back.jpg')\nback = cv2.cvtColor(back, cv2.COLOR_BGR2GRAY)\nstart = time.time()\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\ngray = cv2.subtract(gray, back)\ngray = cv2.medianBlur(gray,5)\n\n\n\n\n\nret,th = cv2.threshold(gray,0,255,cv2.THRESH_BINARY)\n#h = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 31, 5)\nth = cv2.bitwise_not(th)\n\n\"\"\"\nth2 = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,11,2)\nth3 = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,11,2)\n \nblur = cv2.GaussianBlur(gray,(5,5),0)\nret3,th5 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\nkernel = np.ones((5,5), np.uint8)\nop2 = cv2.morphologyEx(th3, cv2.MORPH_OPEN, kernel)\nkernel = np.ones((9,9), np.uint8)\nop3 = cv2.morphologyEx(th3, cv2.MORPH_OPEN, kernel)\n\"\"\"\n\n\"\"\"\ndef m(image, p, q) :\n tot = 0\n for i in range(len(image)) :\n for j in range(len(image[0])) :\n tot += int(image[i,j]==0) * (i**p) * (j**q)\n return tot\n\nm00 = m(th, 0, 0)\nug = m(th, 1, 0)/m00\nvg = m(th, 0, 1)/m00\n\n\ndef mu(image,p, q) :\n tot = 0\n for i in range(len(image)) :\n for j in range(len(image[0])) :\n tot += int(image[i,j]==0) * ((i-ug)**p) * ((j-vg)**q)\n return tot\n\ntheta = math.atan(2*mu(th,1,1)/(mu(th,2,0)-mu(th,0,2)))/2\n\"\"\"\ny, x = np.nonzero(th)\nmx, my = np.mean(x), np.mean(y)\nx = x - mx\ny = y - my\ncoords = np.vstack([x, y])\ncov = np.cov(coords)\nevals, evecs = np.linalg.eig(cov)\nsort_indices = np.argsort(evals)[::-1]\nx_v1, y_v1 = evecs[:, sort_indices[0]] # Eigenvector with largest eigenvalue\ntheta = math.atan2(y_v1, x_v1)\n\n\n\ndef generate_arc(R, amin, amax) :\n angle = amin\n res = [(int(R*np.cos(angle)), int(R*np.sin(angle)))]\n da = 1/R\n while angle <= amax :\n x = int(R*np.cos(angle))\n y =int( R*np.sin(angle))\n if (x,y)!=res[-1] :\n res.append((x,y))\n angle += da\n return res\n\n\ndef isBlack(R, amin, amax) :\n intmx = int(mx)\n intmy = int(my)\n try :\n for coord in generate_arc(R, amin, amax) :\n if th[intmy+coord[1], intmx+coord[0]]!=0 :\n return False\n return True\n except IndexError :\n return True\n\ndef isWhite(R, amin, amax) :\n intmx = int(mx)\n intmy = int(my)\n prec = 0\n seen = False\n for coord in generate_arc(R, amin, amax) :\n new = th[intmy+coord[1], intmx+coord[0]]\n if prec==0 and new!=0 :\n if not seen :\n seen = True\n else :\n return False\n prec = new\n return True\n\ndef nbBlock(R, amin, amax) :\n intmx = int(mx)\n intmy = int(my)\n prec = 0\n count = 0\n for coord in generate_arc(R, amin, amax) :\n try :\n new = th[intmy+coord[1], intmx+coord[0]]\n except IndexError :\n new = 0\n if prec==0 and new!=0 :\n count += 1\n prec = new\n return count\n\n\n#hyperparameters\nalpha = math.pi/9\nbeta = 0.7\ngamma = math.pi/3\n\namin = theta+math.pi-alpha\namax = theta+math.pi+alpha\n\n\n#find the tip\nRmax = max(len(th), len(th[0]))\nRmin = 0\nwhile Rmax-Rmin>1 :\n Rmed = (Rmax+Rmin)/2\n if isBlack(Rmed, amin, amax) :\n Rmax = Rmed\n else :\n Rmin = Rmed\nLtip = Rmax\n\n#find the min\nRmax = Ltip\nRmin = 0\nwhile Rmax-Rmin>1 :\n Rmed = (Rmax+Rmin)/2\n if isWhite(Rmed, amin, amax) :\n Rmin = Rmed\n else :\n Rmax = Rmed\nLmin = Rmin\n\nif Ltip/Lmin<beta :\n answer = 'rock'\nelse :\n Ljudge = (Ltip+Lmin)/2\n c = nbBlock(Ljudge, theta+gamma, theta+2*math.pi-gamma)\n print(c)\n answer = 'scissors' if c<=3 else 'paper'\n\nprint(time.time()-start)\nprint(answer)\n\n\n\nplt.imshow(th, 'gray')\nplt.plot([mx,mx], [my-10,my+10], color='red')\nplt.plot([mx-10, mx+10], [my,my], color='red')\nco = np.array(generate_arc(Ltip, amin, amax))\nplt.plot(co[:,0]+mx, co[:,1]+my, color='blue')\nco = np.array(generate_arc(Lmin, amin, amax))\nplt.plot(co[:,0]+mx, co[:,1]+my, color='green')\nco = np.array(generate_arc(Ljudge, theta+gamma, theta+2*math.pi-gamma))\nplt.plot(co[:,0]+mx, co[:,1]+my, color='yellow')\nplt.show()\n","sub_path":"image_transfo.py","file_name":"image_transfo.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"213351798","text":"import tensorflow as tf\nfrom sklearn.datasets import load_iris\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\ntrain=pd.read_csv('Train.csv')\ntest=pd.read_csv('Test.csv')\ntrain_data=train[['AMOUNT_MAX', 'PROBABILITY_ThirdQuartile', 'Repeater_Index_Max', 'Back_and_Forth_Counter_Max', 'Forward_Count_Max', 'Quarterly_Positive_Momentum_Max',\n 'RATIO_REPEATER_TO_TOTAL', 'OVERALL_SLOPE', 'OVERALL_SLOPE_MIDPOINT', 'OPPORTUNITY_HUNG_FOR_LONG']]\ntrain_target=train['TGT_LOST']\ntrain_target1=pd.get_dummies(train_target).values\npca=PCA(n_components=10)\nX=pca.fit_transform(train_data)\n\n#figsize就是控制图片大小,可去掉\nf=plt.figure(figsize=(60,60))\nax=f.add_subplot(111)\nax.scatter(X[:,0][train_target==0],X[:,1][train_target==0],c='r')\nax.scatter(X[:,0][train_target==1],X[:,1][train_target==1],c='y')\nax.set_title('数据分布图')\nplt.show()\n\nx=tf.placeholder(dtype=tf.float32,shape=[None,10],name=\"input\")\ny=tf.placeholder(dtype=tf.float32,shape=[None,2],name=\"output\")\n\nw=tf.get_variable(\"weight\",shape=[10,2],dtype=tf.float32,initializer=tf.truncated_normal_initializer(stddev=0.1))\nbais=tf.get_variable(\"bais\",shape=[2],dtype=tf.float32,initializer=tf.constant_initializer(0))\ny_1=tf.nn.bias_add(tf.matmul(x,w),bais)\n\n#labels是实际值actual value, logits是预测值pred value\nloss=tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_1))\nx0min,x0max=X[:,0].min(),X[:,0].max()\nx1min,x1max=X[:,1].min(),X[:,1].max()\n\nwith tf.Session() as sess:\n accuracy=tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(y,1),tf.arg_max(y_1,1)),tf.float32))\n train_step=tf.train.AdamOptimizer().minimize(loss)\n my=tf.arg_max( y_1,1)\n sess.run(tf.global_variables_initializer())\n for i in range(1000001):\n sess.run(train_step,feed_dict={x:X,y:train_target1})\n if i%500==0:\n accuracy_print=sess.run(accuracy,feed_dict={x:X,y:train_target1})\n print(accuracy_print)\n \n test_x=test[['AMOUNT_MAX', 'PROBABILITY_ThirdQuartile', 'Repeater_Index_Max', 'Back_and_Forth_Counter_Max', 'Forward_Count_Max', 'Quarterly_Positive_Momentum_Max',\n 'RATIO_REPEATER_TO_TOTAL', 'OVERALL_SLOPE', 'OVERALL_SLOPE_MIDPOINT', 'OPPORTUNITY_HUNG_FOR_LONG']]\n pred=sess.run(my,feed_dict={x:test_x})\n\nnp.savetxt(\"predict.csv\", pred);\n#一共16707行test数据,预测准确的有13488行,准确率81%,训练时间3:30->5:30(2个小时),训练次数1000001\n\npred.shape\npred\n\nf2=plt.figure(figsize=(60,60))\nax2=f2.add_subplot(111)\nax2.scatter(X[:,0][pred==0],X[:,1][pred==0],c='r')\nax2.scatter(X[:,0][pred==1],X[:,1][pred==1],c='y')\nax2.set_title('数据分布图')\nplt.show()","sub_path":"MergeOpportunityLossPred代码.py","file_name":"MergeOpportunityLossPred代码.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"531054886","text":"import paho.mqtt.client as mqtt\nimport socket\nimport time\n\nimport asyncio\nfrom aiocoap import *\n\n\nfrom tornado.ioloop import IOLoop, PeriodicCallback\nfrom tornado import gen\nfrom tornado.websocket import websocket_connect\n\nimport tornado.web\nimport tornado.websocket\nimport tornado.ioloop\n\nimport time\n\n\n# MQTT\nclass MQTTClient(object):\n def __init__(self):\n self.response = None\n self.ms = 0\n self.client = mqtt.Client()\n self.client.connect('10.0.0.246', 1883, 60)\n self.client.on_connect = self.on_connect\n self.client.on_message = self.on_message\n\n def on_connect(self, client, userdata, flags, rc):\n self.client.subscribe(\"topic/out\")\n\n def on_message(self, client, userdate, msg):\n self.ms = int(round(time.time()*1000))-self.ms\n msg = msg.payload.decode().split(',')\n length = len(msg)\n self.response = [self.ms, length]\n self.client.disconnect()\n \n\n def publish(self, topic, msg):\n self.response = None\n self.ms = int(round(time.time()*1000))\n self.client.publish(topic, msg)\n while self.response == None:\n self.client.loop_forever()\n self.client.reconnect()\n return self.response\n\n def disconnect(self):\n self.client.disconnect()\n \n# COAP\nclass COAPClient(object):\n def __init__(self):\n self.ms = 0\n self.length = 0\n self.context = None\n asyncio.get_event_loop().run_until_complete(self.create_context())\n\n async def create_context(self):\n self.context = await Context.create_client_context()\n\n async def put(self, msg):\n self.ms = int(round(time.time()*1000))\n request = Message(code=PUT, payload=bytes(msg, 'utf-8'))\n request.opt.uri_host = '10.0.0.236'\n request.opt.uri_path = (\"other\", \"block\")\n response = await self.context.request(request).response\n self.ms = int(round(time.time()*1000))-self.ms\n self.length = len(response.payload.decode().split(','))\n \n def return_value(self):\n return [self.ms, self.length]\n\n# WebSocket\n\nclass Client(object):\n def __init__(self, url, timeout):\n self.url = url\n self.timeout = timeout\n self.ioloop = IOLoop.instance()\n self.ws = None\n self.ms = 0\n self.length = 0\n \n @gen.coroutine\n def connect(self):\n print (\"trying to connect\")\n \n try:\n self.ws = yield websocket_connect(self.url)\n except Exception as e:\n print(\"connection error\")\n else:\n print(\"connected\")\n self.ms = int(round(time.time()*1000))\n self.ws.write_message(\"msg\")\n msg = yield self.ws.read_message()\n self.ms = int(round(time.time()*1000))-self.ms\n self.length = 1\n print(msg)\n self.ioloop.stop()\n\n\n def return_value(self):\n return [self.ms, self.length]\n\n\n\ndef start_profiling():\n\tmqtt_instance = MQTTClient() \n\t\n\tfor x in range(0, 5):\n\t\tprint('[MQTT] Publishing #'+str(x+1))\n\t\trc = mqtt_instance.publish(\"topic/incoming\", 'msg')\n\t\tprint('[MQTT] Received')\n\t\tprint(rc[0])\n\t\tprint(rc[1])\n\t\t\n\tcoap_instance = COAPClient()\n\tfor x in range(0, 5):\n\t\tprint('[COAP] PUT msg #'+str(x+1))\n\t\tasyncio.get_event_loop().run_until_complete(coap_instance.put('msg'))\n\t\tprint('[COAP] Received')\n\t\trc = coap_instance.return_value()\n\t\tprint(rc[0])\n\t\tprint(rc[1])\n\t\t\n\tclient = Client(\"ws://10.0.0.236:3000\", 5)\n\tfor x in range(0, 5):\n\t\tprint('Websocket msg#'+str(x+1))\n\t\tclient.connect()\n\t\tclient.ioloop.start()\n\t\trc = client.return_value()\n\t\tprint(rc[0])\n\t\tprint(rc[1])\n\t\n\nstart_profiling()\n","sub_path":"test/4_test/client_main.py","file_name":"client_main.py","file_ext":"py","file_size_in_byte":3692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"305881512","text":"'''\nBarath Vedantham\nCIS 41A Fall 2017\nUnit G take-home assignment\n'''\n\n# First script\n\ndef main():\n find_highest_pop_state()\n\ndef find_highest_pop_state():\n states_file = open(\"States.txt\", 'r')\n line = states_file.readline()\n largest_pop = -1 # needs to be set to any val lower than the smallest\n # population size\n for line in states_file:\n fields = line.split(' ')\n state_abbrev = fields[0]\n region = fields[1]\n population = int(fields[2])\n\n if population > largest_pop and region == \"Midwest\":\n largest_pop = population\n largest_pop_state = state_abbrev\n\n print(\"Highest population state in the Midwest is:\", largest_pop_state, largest_pop)\n\nmain()\n\n'''\nResults:\nHighest population state in the Midwest is: IL 12802000\n'''\n","sub_path":"cis41a/take-home/take-home_G/states.py","file_name":"states.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"580900205","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 07 16:37:13 2015\n\n@author: Dimi\n\"\"\"\nfrom __future__ import division\nfrom cvxopt import matrix, solvers\nimport numpy as np\n#from l1 import l1, l1blas\nimport scipy as sc\n#solvers.options['show_progress'] = 1\n\n\n\ndef nullspace(A, atol=1e-13, rtol=0):\n \"\"\"Compute an approximate basis for the nullspace of A.\n\n The algorithm used by this function is based on the singular value\n decomposition of `A`.\n\n Parameters\n ----------\n A : ndarray\n A should be at most 2-D. A 1-D array with length k will be treated\n as a 2-D with shape (1, k)\n atol : float\n The absolute tolerance for a zero singular value. Singular values\n smaller than `atol` are considered to be zero.\n rtol : float\n The relative tolerance. Singular values less than rtol*smax are\n considered to be zero, where smax is the largest singular value.\n\n If both `atol` and `rtol` are positive, the combined tolerance is the\n maximum of the two; that is::\n tol = max(atol, rtol * smax)\n Singular values smaller than `tol` are considered to be zero.\n\n Return value\n ------------\n ns : ndarray\n If `A` is an array with shape (m, k), then `ns` will be an array\n with shape (k, n), where n is the estimated dimension of the\n nullspace of `A`. The columns of `ns` are a basis for the\n nullspace; each element in numpy.dot(A, ns) will be approximately\n zero.\n \"\"\"\n A = np.atleast_2d(A)\n u, s, vh = np.linalg.svd(A)\n tol = max(atol, rtol * s[0])\n nnz = (s >= tol).sum()\n ns = vh[nnz:].conj().T\n return ns\n\n\n\ndef null(a, rtol=1e-5):\n u, s, v = np.linalg.svd(a)\n rank = (s > rtol*s[0]).sum()\n return v[rank:].T.copy()\n\ndef formulate_sparse2(X , y):\n \"\"\"\n N,M = np.shape(X)\n r = np.linalg.matrix_rank(X)\n if r < np.ceil(N/2):\n print \"Size of nullspace LARGE!!!!!!!!!!!!!!!!\"\n print \"rank\"\n print r\n print \"N\"\n print N\n W = null(X.T)\n y.shape = (y.size,1)\n #print np.linalg.pinv(X.T).shape\n #print y.shape\n b = np.dot(np.linalg.pinv(X.T) , y)\n \n m,n = W.shape\n \n Ap = matrix(np.concatenate([np.concatenate([W,-np.eye(m)],axis = 1) , np.concatenate([-W , -np.eye(m)],axis=1)],axis =0))\n \n bp = matrix(np.concatenate([-b,b], axis = 0))\n cp = matrix(np.concatenate( [np.zeros((n,1)) , np.ones((m,1))] ,axis = 0 ))\n \n sol = solvers.lp(cp,Ap,bp)\n sol = sol['x'][:n]\n \n print W.shape\n print b.shape\n sol = l1(matrix(W), matrix(-b))\n \n ap = b + np.dot(W, np.asarray(sol))\n #print \"nullspace\"\n #print W.shape\n #print \"rank\"\n #print np.linalg.matrix_rank(X.T)\n #print X.T.shape\n \"\"\"\n \n N,M = np.shape(X)\n y.shape = (y.size,1)\n try:\n b = np.dot(np.linalg.pinv(X.T),y)\n except np.linalg.linalg.LinAlgError as err:\n b = np.dot(sc.linalg.pinv(X.T),y)\n r = np.linalg.matrix_rank(X)\n \"\"\"\n if r < N:\n W = nullspace(X.T)\n \n if r < np.ceil(N/2):\n print \"Size of nullspace LARGE!!!!!!!!!!!!!!!!\"\n print \"rank\"\n print r\n print \"# of unknowns\"\n print N\n print \"nullspace size\"\n print W.shape\n \n Mp,Np=W.shape\n Ap = np.hstack((np.vstack((W,-W)),np.vstack((-np.eye(Mp),-np.eye(Mp)))))\n cp = np.vstack((np.zeros((Np,1)),np.ones((Mp,1))))\n bp = np.vstack((-b,b))\n Ap = matrix(Ap)\n bp = matrix(bp)\n cp = matrix(cp)\n \n \n SOL = solvers.lp(cp,Ap,bp)\n \n if (SOL['status'] != 'optimal'):\n print 'Solution of the current problem non optimal - Primal or dual infeasible'\n SOL = 0\n ap = b\n else:\n \n SOL = SOL['x']\n SOL = np.array(SOL[0:Np,0])\n \n ap = b+np.dot(W,SOL)\n else:\n \"\"\"\n ap = b\n #difference between rank and\n return (ap)#, r - W.shape[1]) \n \n \n \n","sub_path":"formulate_sparse2.py","file_name":"formulate_sparse2.py","file_ext":"py","file_size_in_byte":4036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"409997772","text":"\n# sum of elements in the array \n# complexity : O(n)\n\nprint(\"Enter the elements : \",end=\" \")\ninputArray = list(map(int,input().split(\" \")))\n\nsum =0\nfor element in inputArray:\n sum = sum + element\n\nprint(\"Sum of elements is :\", sum)\n\n\n\n\n\n","sub_path":"ArrayElementsSum.py","file_name":"ArrayElementsSum.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"525600370","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/lib/python2.7/site-packages/braincore/examples/brainfuck.py\n# Compiled at: 2014-11-26 01:27:29\nfrom __future__ import print_function\nfrom braincore import Cell\nimport sys\nif sys.version_info.major == 3:\n _input = input\nelse:\n _input = raw_input\n\nclass BrainFuck:\n\n def __init__(self, script=None):\n self.setup(script)\n\n def setup(self, script=None):\n self.loc_pointer = 0\n self.cell = Cell()\n self.output = ''\n self.script = script or ''\n self.script_len = len(self.script)\n\n def run(self):\n for op in self.get_ops():\n if op == '>':\n self.cell.right()\n elif op == '<':\n self.cell.left()\n elif op == '+':\n self.cell.inc()\n elif op == '-':\n self.cell.dec()\n elif op == '.':\n self.print_char()\n elif op == ',':\n self.read_char()\n elif op == '[':\n self.jz()\n elif op == ']':\n self.jnz()\n\n def repl(self):\n while True:\n try:\n script = _input('fuck > ')\n except EOFError:\n break\n\n self.setup(script)\n self.run()\n print(repr(self.output))\n print('ord output:', [ ord(c) for c in self.output ])\n print('current cell', ord(self.cell))\n print('current cell # ', ord(self.cell.pointer))\n\n def get_ops(self):\n while self.loc_pointer < self.script_len:\n yield self.script[self.loc_pointer]\n self.loc_pointer += 1\n\n def print_char(self):\n self.output += self.cell.read()\n\n def read_char(self):\n self.cell.write(ord(stdin.read(1)))\n\n def jz(self):\n if not self.cell:\n self.loc_pointer = self.script[self.loc_pointer:].index(']')\n\n def jnz(self):\n if self.cell:\n self.loc_pointer = self.script[:self.loc_pointer].rindex('[') - 1\n\n\ndef main():\n\n def print_usage():\n print('usage: %s script.bf')\n\n if not len(sys.argv) > 1:\n print_usage()\n exit(1)\n script = open(sys.argv[1]).read()\n sexec = BrainFuck(script)\n sexec.run()\n print(sexec.output)\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/braincore-2014.12.5.linux-x86_64.tar/brainfuck.py","file_name":"brainfuck.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"225128004","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom django.core.mail import send_mail\n\nfrom django.conf import settings\nfrom .forms import contactForm\n\ndef contact(request):\n title = 'Contact'\n form = contactForm(request.POST or None)\n confirm_message = None\n if form.is_valid():\n name = form.cleaned_data['name']\n message = form.cleaned_data['message']\n subject = 'Enlacerv'\n emailFrom = form.cleaned_data['email']\n message = '%s \\n\\n%s \\n\\n%s' %(name,emailFrom,message,)\n emailTo = [settings.EMAIL_HOST_USER]\n send_mail(subject,message,emailFrom,emailTo,fail_silently=True)\n title = \"¡Muchas Gracias!\"\n confirm_message = \"A la brevedad nos pondremos en contacto con usted.\"\n form = None\n\n context = {'title': title,'form':form, 'confirm_message':confirm_message}\n templates = 'contact.html'\n return render(request,templates,context)","sub_path":"contact/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"418181043","text":"#PIN 10 CLEAR naranja\n#PIN 11 CLOCK amarillo\n#PIN 13 OE negro\n#PIN 12 LATCH verde\n#PIN 14 DATAIN azul\n\n#Librerias necesarias\nimport RPi.GPIO as GPIO\nimport time\n\n#Data Pins needed on the RPi\nDATAIN=14 #DS\nLATCH=12 #STCP\nCLOCK=11 #SHCP\nCLEAR=10 #MR Low\nOE=13 #Output Enable Low\n\nicsleep=0.06 \n#defining all the single LEDs\nled1=0x80 #10000000\nled2=0x40 #01000000\nled3=0x20 #00100000\nled4=0x10 #00010000\nled5=0x08 #00001000\nled6=0x04 #00000100\nled7=0x02 #00000010\nled8=0x01 #00000001\n\n#GPIO definition\ndef setup():\n GPIO.setmode(GPIO.BCM)\n GPIO.cleanup()\n GPIO.setup(DATAIN,GPIO.OUT)\n GPIO.setup(CLOCK,GPIO.OUT)\n GPIO.setup(LATCH,GPIO.OUT)\n GPIO.setup(CLEAR,GPIO.OUT)\n GPIO.setup(OE,GPIO.OUT)\n\n GPIO.output(LATCH,False) #Latch is used to output the saved data\n GPIO.output(CLEAR,True) #Clear must always be true. False clears registers\n GPIO.output(OE,False) #Output Enable speaks for itself. Must be False to display\n GPIO.output(CLOCK,False) #Used to shift the value of DATAIN to the register\n GPIO.output(DATAIN,False)#Databit to be shifted into the register\ndef cleanup():\n #Set all leds to off\n writenumber(0)\n #writeout stored in character\n writeout()\n #writeout \"nothing\"\n writeout()\n time.sleep(0.7)\n GPIO.cleanup()\n \n#shifts in a bit (but does not write it yet)\ndef shift(input):\n if input == 1:\n input=True\n else:\n input=False\n\n GPIO.output(DATAIN,input)\n GPIO.output(CLOCK,GPIO.HIGH)\n GPIO.output(CLOCK,GPIO.LOW)\n GPIO.output(DATAIN,GPIO.LOW)\n\n#writes the stored data from register out to pins\ndef writeout():\n #Display LEDs\n GPIO.output(LATCH,GPIO.HIGH)\n #needed to read characters. otherwise the characters would be display to fast after each other\n time.sleep(icsleep)\n GPIO.output(LATCH,GPIO.LOW)\n\nshift(led1)\nsetup()\n","sub_path":"Circuito integrado/circuitoIntegrado.py","file_name":"circuitoIntegrado.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"495771809","text":"# coding=utf-8\n\nfrom Page.NavigateBar import NavigateBar\nfrom Page.HomePage import HomePage\nfrom Utils.Paths import FIREFOX_DRIVER_DIR\nfrom Src.PUnittest import PUnittest\nfrom Utils.Logger import testLogger\nfrom Utils.Decorator import my_testcase\nfrom selenium import webdriver\nfrom Utils.ParseConfig import parseConfig\nfrom Src.BrowserEngine import browserEngine\nimport unittest\nimport time\n\nSCREENSHOT_SWICTH = parseConfig.screenshot_config('ScreenShotSwitch')\n\nclass TestSubPageTitle(PUnittest):\n\n driver = webdriver.Firefox(executable_path=FIREFOX_DRIVER_DIR)\n homePage = HomePage(driver)\n navigateBar = NavigateBar(driver)\n\n @classmethod\n def setUpClass(cls):\n cls.homePage.browser.navigate_to('https://www.utest.com')\n\n @classmethod\n def tearDownClass(cls):\n cls.homePage.browser.quit()\n\n def setUp(self):\n testLogger.info(' {0} >> {1} '.format(self.__class__.__name__, self._testMethodName).center(80, '*'))\n\n def tearDown(self):\n testLogger.info('-' * 80 + '\\n')\n\n @my_testcase\n def test_articles_title(self):\n navigate_bar_visible = self.navigateBar.navigate_bar().is_displayed()\n if not navigate_bar_visible:\n self.homePage.expand_navigate_button().click()\n self.navigateBar.articles_button().click()\n time.sleep(2)\n title_label_text = self.homePage.title_label().get_text()\n self.assertEqual(title_label_text, 'Software Testing Articles')\n\n @my_testcase\n def test_training_title(self):\n navigate_bar_visible = self.navigateBar.navigate_bar().is_displayed()\n if not navigate_bar_visible:\n self.homePage.expand_navigate_button().click()\n self.navigateBar.training_button().click()\n time.sleep(2)\n title_label_text = self.homePage.title_label().get_text()\n self.assertEqual(title_label_text, 'Software Testing Courses')\n\n @my_testcase\n def test_tools_title(self):\n navigate_bar_visible = self.navigateBar.navigate_bar().is_displayed()\n if not navigate_bar_visible:\n self.homePage.expand_navigate_button().click()\n self.navigateBar.tools_button().click()\n time.sleep(2)\n title_label_text = self.homePage.title_label().get_text()\n self.assertEqual(title_label_text, 'uxuxuxuxuxuxu')\n\n @my_testcase\n def test_formus_title(self):\n navigate_bar_visible = self.navigateBar.navigate_bar().is_displayed()\n if not navigate_bar_visible:\n self.homePage.expand_navigate_button().click()\n self.navigateBar.forums_button().click()\n time.sleep(2)\n title_label_text = self.homePage.title_label().get_text()\n self.assertEqual(title_label_text, 'Software Testing Forums')\n\n @my_testcase\n def test_projects_title(self):\n navigate_bar_visible = self.navigateBar.navigate_bar().is_displayed()\n if not navigate_bar_visible:\n self.homePage.expand_navigate_button().click()\n self.navigateBar.projects_button().click()\n time.sleep(2)\n title_label_text = self.homePage.title_label().get_text()\n self.assertEqual(title_label_text, 'xxxxxxxxxxx')\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"TestSuite/TestSubPageTile.py","file_name":"TestSubPageTile.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"218161660","text":"# -*- coding: utf-8 -*-\n# TensorBoard 를 이용하기 위해 각종 변수들을 설정하고 저장하는 방법\n\nimport tensorflow as tf\nimport numpy as np\n\ndata = np.loadtxt('./data.csv', delimiter=',', unpack=True, dtype='float32')\n\n# 털, 날개, 기타, 포유류, 조류\n# x_data = 0, 1\n# y_data = 2, 3, 4\n# transpose 왜 하는거지??\nx_data = np.transpose(data[0:2])\ny_data = np.transpose(data[2:])\n\n# -------------------- 신경망 모델 구성 -------------------- #\nglobal_step = tf.Variable(0, trainable=False, name='global_step')\n\nX = tf.placeholder(tf.float32)\nY = tf.placeholder(tf.float32)\n\n# with tf.name_scope 으로 묶은 블럭은 텐서보드에서 한 레이어 안에 표현해준다.\n# tf.name_scope 에 넘길 이름에는 띄어쓰기가 허용이 되지 않는다.\nwith tf.name_scope('layer1'):\n W1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.), name='W1')\n L1 = tf.nn.relu(tf.matmul(X, W1))\n\nwith tf.name_scope('layer2'):\n W2 = tf.Variable(tf.random_uniform([10, 20], -1., 1.), name='W2')\n L2 = tf.nn.relu(tf.matmul(L1, W2))\n\nwith tf.name_scope('output'):\n W3 = tf.Variable(tf.random_uniform([20, 3], -1., 1.), name='W3')\n model = tf.matmul(L2, W3)\n\nwith tf.name_scope('optimizer'):\n cost = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=Y, logits=model))\n\n optimizer = tf.train.AdamOptimizer(learning_rate=0.01)\n train_op = optimizer.minimize(cost, global_step=global_step)\n\n # 텐서보드에 사용되는 데이터를 생성하기 위해서는 summary 기능을 사용한다.\n # tf.summary.scalar(name, tensor, ) 를 이용하면 single scalar value 를 저장할 수 있다.\n tf.summary.scalar('cost', cost)\n\n# -------------------- 신경망 모델 학습 -------------------- #\nsess = tf.Session()\nsaver = tf.train.Saver(tf.global_variables())\n\ncheckpoint = tf.train.get_checkpoint_state('./model')\nif checkpoint and tf.train.checkpoint_exists(checkpoint.model_checkpoint_path):\n saver.restore(sess, checkpoint.model_checkpoint_path)\nelse:\n sess.run(tf.global_variables_initializer())\n\n# 텐서보드에서 볼 텐서들을 수집한다.\nmerged = tf.summary.merge_all()\n# 저장할 그래프와 텐서값들을 저장할 디렉토리를 설정한다.\nwriter = tf.summary.FileWriter('./logs', sess.graph)\n# 저장한 로그는 학습 후 다음 명령어를 통해 로컬 서버를 실행시킨 뒤 확인할 수 있다.\n# tensorboard --logdir=./logs\n# http://localhost:6006\n\n# 학습 진행\nfor step in range(100):\n sess.run(train_op, feed_dict={X: x_data, Y: y_data})\n\n print('Step: %d, ' % sess.run(global_step),\n 'Cost: %.3f' % sess.run(cost, feed_dict={X: x_data, Y: y_data}))\n\n # 적절한 시점에 저장할 값들을 수집 및 저장한다. (ex. 특정 step 마다)\n summary = sess.run(merged, feed_dict={X: x_data, Y: y_data})\n writer.add_summary(summary, global_step=sess.run(global_step))\n\nsaver.save(sess, './model/dnn.ckpt', global_step=global_step)\n\n# -------------------- 결과 확인 -------------------- #\n# 0: 기타 1: 포유류 2: 조류\nprediction = tf.argmax(model, 1)\ntarget = tf.argmax(Y, 1)\nprint('예측값:', sess.run(prediction, feed_dict={X: x_data}))\nprint('실제값:', sess.run(target, feed_dict={Y: y_data}))\n\ncheck_prediction = tf.equal(prediction, target)\naccuracy = tf.reduce_mean(tf.cast(check_prediction, tf.float32))\nprint('정확도: %.2f' % sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data}))\n","sub_path":"03_TensorBoard, Saver/02_TensorBoard.py","file_name":"02_TensorBoard.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"386799975","text":"\"\"\"HML\"\"\"\nfrom __future__ import print_function\nimport datetime\n\nimport psycopg2.extras\nimport pytest\nfrom pyiem.nws.products.hml import parser as hmlparser\nfrom pyiem.util import get_dbconn, get_test_file\n\n\n@pytest.fixture\ndef dbcursor():\n \"\"\"Get database conn.\"\"\"\n dbconn = get_dbconn('hads')\n # Note the usage of RealDictCursor here, as this is what\n # pyiem.twistedpg uses\n return dbconn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n\ndef test_190313_missingstage(dbcursor):\n \"\"\"Figure out why this HML is missing stage info.\"\"\"\n prod = hmlparser(get_test_file(\"HML/HMLDMX.txt\"))\n assert not prod.warnings\n prod.sql(dbcursor)\n dbcursor.execute(\"\"\"\n SELECT * from hml_observed_data_2019 WHERE station = 'JANI4'\n and valid > '2019-03-13' and valid < '2019-03-14'\n \"\"\")\n assert dbcursor.rowcount == 8\n\n\ndef test_160826_hmlarx(dbcursor):\n \"\"\"Lets dance\"\"\"\n prod = hmlparser(get_test_file(\"HML/HMLARX.txt\"))\n prod.sql(dbcursor)\n assert not prod.warnings\n assert prod.data[0].stationname == \"CEDAR RIVER 2 S St. Ansgar\"\n\n\ndef test_161010_timing():\n \"\"\"test how fast we can parse the file, over and over again\"\"\"\n sts = datetime.datetime.now()\n for _ in range(100):\n hmlparser(get_test_file(\"HML/HMLARX.txt\"))\n ets = datetime.datetime.now()\n rate = (ets - sts).total_seconds() / 100.\n print(\"sec per parse %.4f\" % (rate,))\n assert rate < 1.\n","sub_path":"pyiem/nws/products/tests/test_hml.py","file_name":"test_hml.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"522239650","text":"#!/usr/bin/python\n\nimport math\nimport sys\n\ndef recipe_batches(recipe, pantry):\n\tnum_batches = sys.maxsize\n\tfor ingredient, amount in recipe.items():\n\t\tif ingredient in pantry:\n\t\t\tpantry_amount = pantry[ingredient]\n\t\t\tmultiplier = pantry_amount/amount\n\t\t\tif multiplier < num_batches:\n\t\t\t\tnum_batches = multiplier\n\t\telse:\n\t\t\treturn 0 \n\treturn int(num_batches)\n\n\n\nif __name__ == '__main__':\n # Change the entries of these dictionaries to test \n # your implementation with different inputs\n recipe = { 'milk': 100, 'butter': 50, 'sugar': 10, 'flour': 5 }\n ingredients = { 'milk': 232, 'butter': 100, 'flour': 51 }\n print(\"{batches} batches can be made from the available ingredients: {ingredients}.\".format(batches=recipe_batches(recipe, ingredients), ingredients=ingredients))\n","sub_path":"recipe_batches/recipe_batches.py","file_name":"recipe_batches.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"483470421","text":"def validate_brackets(input_brackets):\n open_list = [\"(\",\"[\",\"{\"]\n close_list = [\")\",\"]\",\"}\"]\n stack = []\n for i in input_brackets:\n if i in open_list:\n stack.append(i)\n elif i in close_list:\n pos = close_list.index(i)\n if ((len(stack) > 0) and (open_list[pos] == stack[len(stack)-1])):\n stack.pop()\n else:\n return False\n if len(stack) == 0:\n return True\n else:\n return False\n\nif __name__=='__main__':\n string_one = \"{[]{}}\"\n print(validate_brackets(string_one))\n\n string_two = \"{[][{}}\"\n print(validate_brackets(string_two))\n","sub_path":"python/code_challenges/stack-queue-brackets/stack-queue-brackets.py","file_name":"stack-queue-brackets.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"537536441","text":"from pyspark.ml.feature import BucketedRandomProjectionLSH\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.sql.functions import col\nfrom pyspark.sql import SparkSession\n\nif __name__ == \"__main__\":\n spark = SparkSession \\\n .builder \\\n .appName(\"BucketedRandomProjectionLSHExample\") \\\n .getOrCreate()\n\n dataA = [(0, Vectors.dense([1.0, 1.0]),),\n (1, Vectors.dense([1.0, -1.0]),),\n (2, Vectors.dense([-1.0, -1.0]),),\n (3, Vectors.dense([-1.0, 1.0]),)]\n dfA = spark.createDataFrame(dataA, [\"id\", \"features\"])\n\n dataB = [(4, Vectors.dense([1.0, 0.0]),),\n (5, Vectors.dense([-1.0, 0.0]),),\n (6, Vectors.dense([0.0, 1.0]),),\n (7, Vectors.dense([0.0, -1.0]),)]\n dfB = spark.createDataFrame(dataB, [\"id\", \"features\"])\n\n key = Vectors.dense([1.0, 0.0])\n\n brp = BucketedRandomProjectionLSH(inputCol=\"features\", outputCol=\"hashes\", bucketLength=2.0,\n numHashTables=3)\n model = brp.fit(dfA)\n\n # Feature Transformation\n print(\"The hashed dataset where hashed values are stored in the column 'hashes':\")\n model.transform(dfA).show()\n\n # Compute the locality sensitive hashes for the input rows, then perform approximate\n # similarity join.\n # We could avoid computing hashes by passing in the already-transformed dataset, e.g.\n # `model.approxSimilarityJoin(transformedA, transformedB, 1.5)`\n print(\"Approximately joining dfA and dfB on Euclidean distance smaller than 1.5:\")\n model.approxSimilarityJoin(dfA, dfB, 1.5, distCol=\"EuclideanDistance\")\\\n .select(col(\"datasetA.id\").alias(\"idA\"),\n col(\"datasetB.id\").alias(\"idB\"),\n col(\"EuclideanDistance\")).show()\n\n # Compute the locality sensitive hashes for the input rows, then perform approximate nearest\n # neighbor search.\n # We could avoid computing hashes by passing in the already-transformed dataset, e.g.\n # `model.approxNearestNeighbors(transformedA, key, 2)`\n print(\"Approximately searching dfA for 2 nearest neighbors of the key:\")\n model.approxNearestNeighbors(dfA, key, 2).show()\n\n spark.stop()\n","sub_path":"ml/bucketed_random_projection_lsh_example.py","file_name":"bucketed_random_projection_lsh_example.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"63260920","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nDOCUMENTATION = \"\"\"\n lookup: searchfile\n author: Daniel Oh\n version_added: \"1.1\"\n short_description: read a file stream\n description:\n - This plugin will return contents of a certain file.\n options:\n _terms:\n description: path(s) of files to read\n required: True\n\"\"\"\n\nfrom ansible.errors import AnsibleError, AnsibleParserError\nfrom ansible.plugins.lookup import LookupBase\n\nclass LookupModule(LookupBase):\n def run(self, terms, variables=None, **kwargs):\n\n returndata = []\n\n for singleterm in terms:\n display.debug(\"Terms that looks for the file: %s\" % singleterm)\n lookupdata = self.find_file_in_search_path(variables, 'files', singleterm)\n display.vvvv(u\"Your lookupdata: %s\" % lookupdata)\n try:\n if lookupfile:\n contents, show_data = self._loader._get_file_contents(lookupfile)\n ret.append(contents.rstrip())\n else:\n raise AnsibleParserError()\n except AnsibleParserError:\n raise AnsibleError(\"Can't find a file: %s\" % singleterm)\n return returndata\n\n\n","sub_path":"Chapter 6/plugins/lookup/myplugin.py","file_name":"myplugin.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"98159841","text":"# -*- coding: utf-8 -*-\n# @Time : 3/3/18 3:40 AM\n# @Author : alpface\n# @Email : xiaoyuan1314@me.com\n# @File : urls.py\n# @Software: PyCharm\n\nfrom django.conf.urls import url\nfrom django.views.decorators.csrf import csrf_exempt\nfrom admin.views import *\n\nurlpatterns = [\n url(r'^tag/$', TagView.as_view(), name='tag'),\n url(r'^link/$', LinkView.as_view(), name='link'),\n url(r'^users/$', UsersView.as_view(), name='users'),\n url(r'^profile/$', ProfileView.as_view(), name='profile'),\n url(r'^visitor/$', VisitorListView.as_view(), name='visitor'),\n url(r'^upload/$', csrf_exempt(UploadView.as_view()), name='upload'),\n url(r'^categories/$', CategoryView.as_view(), name='categories'),\n url(r'^article/add/$', ArticleAddView.as_view(), name='article-add'),\n url(r'^article/list/$', ArticleListView.as_view(), name='article-list'),\n url(r'^article/edit/$', ArticleEditView.as_view(), name='article-edit'),\n url(r'^article/body/$', ArticleBodyView.as_view(), name='article-body'),\n url(r'^message/os/$', MessageOSView.as_view(), name='message-os'),\n url(r'^message/comment/$', MessageCommentView.as_view(), name='message-comment'),\n url(r'^$', DashboardView.as_view(), name='dashboard'),\n]\n","sub_path":"admin/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"226167338","text":"from sys import maxsize, stdin, stdout\nimport heapq\n\n\nclass WeightedGraph:\n def __init__(self):\n self.graph = [[]]\n self.list = []\n self.d = {}\n\n def size_of_matrix(self):\n return len(self.graph)\n\n def print_matrix(self):\n return self.graph\n\n def size_of_list(self):\n return len(self.list)\n\n def add_vertex(self, v):\n self.graph[0] += [v]\n self.d.update({v: [maxsize, None]})\n for i in range(1, self.size_of_matrix()):\n self.graph[i] += [0]\n self.graph += [[0] * self.size_of_matrix()]\n\n def add_directed_link(self, v1, v2, weight):\n if self.graph[self.graph[0].index(v1) + 1][self.graph[0].index(v2)] == 0:\n self.graph[self.graph[0].index(v1) + 1][self.graph[0].index(v2)] = weight\n self.d.update({v2: [weight, v1]})\n\n else:\n print('Error: these vertex already have rib.')\n\n def relax(self, u, v, weight):\n if self.d[v][0] > self.d[u][0] + weight:\n self.d[v][0] = self.d[u][0] + weight\n self.d[v][1] = u\n\n def initialize_single_source(self, s):\n for key, value in self.d.items():\n self.d[key][0] = maxsize\n self.d[key][1] = None\n self.d[s][0] = 0\n\n def paths(self, w):\n self.initialize_single_source(w)\n q = []\n for key in self.d.keys():\n q += [key]\n while len(q) != 0:\n u = heapq.heappop(q)\n self.list.append(u)\n for key in self.d.keys():\n if self.graph[self.graph[0].index(u) + 1][self.graph[0].index(key)] != 0:\n self.relax(u, key, self.graph[self.graph[0].index(u) + 1][self.graph[0].index(key)])\n array = []\n print(self.d)\n for key, value in self.d.items():\n if self.d[key][1] is None:\n if self.d[key][0] < maxsize:\n array += [[key]]\n else:\n array += [[None]]\n else:\n ar = []\n this_key = key\n while self.d[this_key][1] is not None:\n ar += [this_key]\n this_key = self.d[this_key][1]\n ar += [this_key]\n ar.reverse()\n array += [ar]\n return array\n\n\nif __name__ == '__main__':\n graph = WeightedGraph()\n graph.add_vertex(1)\n graph.add_vertex(2)\n graph.add_vertex(3)\n graph.add_vertex(4)\n graph.add_vertex(5)\n graph.add_directed_link(1, 4, 8)\n graph.add_directed_link(1, 2, 1)\n graph.add_directed_link(2, 3, 2)\n graph.add_directed_link(3, 4, 3)\n graph.add_directed_link(2, 4, 1)\n print(graph.paths(1))\n\n","sub_path":"lab16/Kravchenko/lab16.py","file_name":"lab16.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"172923701","text":"# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport mock\nfrom webob import exc\n\nfrom murano.api.v1 import environments\nfrom murano.common import policy\nfrom murano.db import models\nfrom murano.openstack.common import timeutils\nimport murano.tests.api.base as test_base\nimport murano.tests.utils as test_utils\n\n\n@mock.patch.object(policy, 'check')\nclass TestEnvironmentApi(test_base.ControllerTest, test_base.MuranoTestCase):\n RPC_IMPORT = 'murano.db.services.environments.rpc'\n\n def setUp(self):\n super(TestEnvironmentApi, self).setUp()\n self.controller = environments.Controller()\n\n def test_list_empty_environments(self, mock_policy_check):\n \"\"\"Check that with no environments an empty list is returned\"\"\"\n self._mock_policy_setup(mock_policy_check, 'list_environments')\n\n req = self._get('/environments')\n result = self.controller.index(req)\n self.assertEqual({'environments': []}, result)\n\n def test_create_environment(self, mock_policy_check):\n \"\"\"Create an environment, test environment.show()\"\"\"\n self._mock_policy_setup(mock_policy_check, 'create_environment')\n\n fake_now = timeutils.utcnow()\n timeutils.utcnow.override_time = fake_now\n\n uuids = ('env_object_id', 'network_id', 'environment_id')\n mock_uuid = self._stub_uuid(uuids)\n\n expected = {'tenant_id': self.tenant,\n 'id': 'environment_id',\n 'name': 'my_env',\n 'networking': {},\n 'version': 0,\n 'created': fake_now,\n 'updated': fake_now}\n\n body = {'name': 'my_env'}\n req = self._post('/environments', json.dumps(body))\n result = self.controller.create(req, body)\n self.assertEqual(expected, result)\n\n expected['status'] = 'ready'\n\n # Reset the policy expectation\n self._mock_policy_setup(mock_policy_check, 'list_environments')\n\n req = self._get('/environments')\n result = self.controller.index(req)\n\n self.assertEqual({'environments': [expected]}, result)\n\n expected['services'] = []\n\n # Reset the policy expectation\n self._mock_policy_setup(mock_policy_check, 'show_environment',\n target={'environment_id': uuids[-1]})\n\n req = self._get('/environments/%s' % uuids[-1])\n result = self.controller.show(req, uuids[-1])\n\n self.assertEqual(expected, result)\n self.assertEqual(3, mock_uuid.call_count)\n\n def test_missing_environment(self, mock_policy_check):\n \"\"\"Check that a missing environment results in an HTTPNotFound\"\"\"\n self._mock_policy_setup(mock_policy_check, 'show_environment',\n target={'environment_id': 'no-such-id'})\n\n req = self._get('/environments/no-such-id')\n self.assertRaises(exc.HTTPNotFound, self.controller.show,\n req, 'no-such-id')\n\n def test_update_environment(self, mock_policy_check):\n \"\"\"Check that environment rename works\"\"\"\n self._mock_policy_setup(mock_policy_check, 'update_environment',\n target={'environment_id': '12345'})\n\n fake_now = timeutils.utcnow()\n timeutils.utcnow.override_time = fake_now\n\n expected = dict(\n id='12345',\n name='my-env',\n version=0,\n networking={},\n created=fake_now,\n updated=fake_now,\n tenant_id=self.tenant,\n description={\n 'Objects': {\n '?': {'id': '12345'}\n },\n 'Attributes': {}\n }\n )\n e = models.Environment(**expected)\n test_utils.save_models(e)\n\n fake_now = timeutils.utcnow()\n timeutils.utcnow.override_time = fake_now\n\n del expected['description']\n expected['services'] = []\n expected['status'] = 'ready'\n expected['name'] = 'renamed env'\n expected['updated'] = fake_now\n\n body = {\n 'name': 'renamed env'\n }\n req = self._post('/environments/12345', json.dumps(body))\n result = self.controller.update(req, '12345', body)\n\n self._mock_policy_setup(mock_policy_check, 'show_environment',\n target={'environment_id': '12345'})\n req = self._get('/environments/12345')\n result = self.controller.show(req, '12345')\n\n self.assertEqual(expected, result)\n\n def test_delete_environment(self, mock_policy_check):\n \"\"\"Test that environment deletion results in the correct rpc call\"\"\"\n self._mock_policy_setup(mock_policy_check, 'delete_environment',\n target={'environment_id': '12345'})\n\n fake_now = timeutils.utcnow()\n expected = dict(\n id='12345',\n name='my-env',\n version=0,\n networking={},\n created=fake_now,\n updated=fake_now,\n tenant_id=self.tenant,\n description={\n 'Objects': {\n '?': {'id': '12345'}\n },\n 'Attributes': {}\n }\n )\n e = models.Environment(**expected)\n test_utils.save_models(e)\n\n rpc_task = {\n 'tenant_id': self.tenant,\n 'model': {'Attributes': {}, 'Objects': None},\n 'token': None\n }\n\n req = self._delete('/environments/12345')\n result = self.controller.delete(req, '12345')\n\n self.mock_engine_rpc.handle_task.assert_called_once_with(rpc_task)\n\n # Should this be expected behavior?\n self.assertEqual(None, result)\n","sub_path":"murano/tests/api/v1/test_environments.py","file_name":"test_environments.py","file_ext":"py","file_size_in_byte":6319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"480724326","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build\\bdist.win-amd64\\egg\\chatette_qiu\\adapters\\factory.py\n# Compiled at: 2019-03-27 03:00:31\n# Size of source mod 2**32: 742 bytes\n__doc__ = '\\nModule `chatette_qiu.adapters.factory`.\\nDefines a factory method that allows to create an adapter from a string name.\\n'\nfrom chatette_qiu.adapters.jsonl import JsonListAdapter\nfrom chatette_qiu.adapters.rasa import RasaAdapter\n\ndef create_adapter(adapter_name):\n \"\"\"\n Instantiate an adapter and returns it given the name of the adapter as a str.\n Names are:\n - 'rasa': RasaAdapter\n - 'jsonl': JsonListAdapter\n \"\"\"\n if adapter_name is None:\n return\n else:\n adapter_name = adapter_name.lower()\n if adapter_name == 'rasa':\n return RasaAdapter()\n if adapter_name == 'jsonl':\n return JsonListAdapter()\n raise ValueError('Unknown adapter was selected.')","sub_path":"pycfiles/ChatExchange-0.0.4-py3-none-any/factory.cpython-36.py","file_name":"factory.cpython-36.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"328856352","text":"\n\nfrom xai.brain.wordbase.adjectives._boozy import _BOOZY\n\n#calss header\nclass _BOOZIER(_BOOZY, ):\n\tdef __init__(self,): \n\t\t_BOOZY.__init__(self)\n\t\tself.name = \"BOOZIER\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"boozy\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_boozier.py","file_name":"_boozier.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"399195343","text":"import numpy as np\n\n'''\nhttp://iamtrask.github.io/2015/07/12/basic-python-network/\nhttps://medium.com/technology-invention-and-more/how-to-build-a-simple-neural-network-in-9-lines-of-python-code-cc8f23647ca1\nhttps://yjango.gitbooks.io/superorganism/content/ren_gong_shen_jing_wang_luo.html\n'''\n\n# sigmoid function\ndef nonlin(x,deriv=False):\n if(deriv==True):\n return x*(1-x)\n\n return 1/(1+np.exp(-x))\n\n# print('nonlin-->\\n')\n# print(nonlin(-4.6))\n# print(nonlin(4.8))\n# print(nonlin(5))\n# print(nonlin(-4.8))\n# print(nonlin(9.6))\n# input dataset\n\nX = np.array([ [0,0,1],\n [0,1,1],\n [1,0,1],\n [1,1,1] \n ])\n\n# output dataset \ny = np.array([[0,0,1,1]]).T \n\n# seed random numbers to make calculation\n\n# deterministic (just a good practice)\n\nnp.random.seed(1)\n# initialize weights randomly with mean 0\n\nsyn0 = 2*np.random.random((3,1)) - 1\n \n\nfor iter in range(3):\n # forward propagation\n l0 = X\n l1 = nonlin(np.dot(l0,syn0)) \n # how much did we miss?\n l1_error = y - l1\n # print('l1_error:', l1_error)\n # print('nonlin(l1,True):', nonlin(l1,True))\n # multiply how much we missed by the\n # slope of the sigmoid at the values in l1\n l1_delta = l1_error * nonlin(l1,True)\n # print('l1_delta:', l1_delta)\n # print('np.dot(l0.T,l1_delta):', np.dot(l0.T,l1_delta) )\n # update weights\n syn0 += np.dot(l0.T,l1_delta) \n\nprint(\"Output After Training:\\n\",\n l1,\"\\nsyn0:\", syn0)\n\n","sub_path":"05Deep Learning with Python- A Hands-on Introduction/ch3Feed Forward Neural Networks/i5a tiny toy network.py","file_name":"i5a tiny toy network.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"342497367","text":"#!/usr/bin/env python3\n#-*- coding:utf-8 -*-\n# File: /Users/king/Python初级算法/code/4/intersect_last.py\n# Project: /Users/king/Python初级算法/code/4\n# Created Date: 2019/01/26\n# Author: hstking hst_king@hotmail.com\n\n\ndef intersect(nums1, nums2):\n # rList = []\n # if len(nums1) > len(nums2):\n # nums1, nums2 = nums2, nums1\n # for n in nums1:\n # if n in nums2:\n # rList.append(n)\n # nums2.remove(n)\n # return rList\n # nums1 比 nums2小很多\n\n\n\n # rList = []\n # nums1.sort()\n # nums2.sort()\n # p1 = 0 #point for nums1\n # p2 = 0 #point for nums2\n # while p1 < len(nums1) and p2 < len(nums2):\n # if nums1[p1] < nums2[p2]:\n # p1 += 1\n # elif nums1[p1] == nums2[p2]:\n # rList.append(nums1[p1])\n # p1 += 1\n # p2 += 1\n # else:\n # p2 += 1\n # return rList\n #排序后算法\n\n\n\n\n # 内存不够\n if len(nums1) > len(nums2):\n nums1, nums2 = nums2, nums1\n rList = []\n left = 0\n right = len(nums1) - 1\n while right < len(nums2):\n temp = nums2[left: right]\n for n in nums1:\n if n in temp:\n rList.append(n)\n temp.remove(n)\n left = right\n if right + len(nums1) >= len(nums2):\n right = len(nums2)\n else:\n right += len(nums1)\n return rLis\n # 分段读取nums2\n\n \n\n\n\n\nif __name__ == \"__main__\":\n # nums1 = [4, 9, 5]\n # nums2 = [9, 4, 9, 8, 4]\n nums1 = [1, 2, 2, 1]\n nums2 = [2, 2]\n rList = intersect(nums1, nums2)\n print(\"The intersection is : %s\" %rList)","sub_path":"book-code/图解LeetCode初级算法-源码/4/intersect_last.py","file_name":"intersect_last.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"297772767","text":"#!/usr/bin/env python\n\n\"\"\"Report the historical change in a stock price over a time period.\n\nFor a given stock ticker symbol, this tool downloads the stock price history.\nThen, for some specified time period (e.g., 25 business days), it analyzes every\nsuch time period from some start point (either the earliest date in the data, or\na specified date) to the current date, with increments of one business day. For\neach time period, it computes the greatest price decrease or increase from the\nclose of the start date to any point in the period, as a ratio of the closing\nprice at the start of the period. The result is plotted as a cumulative\ndistribution function.\n\"\"\"\n\nimport analyze\nimport history\nimport parser\n\ndef main():\n # Parse the command-line arguments.\n arg_parser = parser.Parser(__doc__)\n arg_parser.run()\n args = arg_parser.args\n\n # Download the closing and high/low data.\n data = history.Data(args)\n data.run()\n\n # Compute the min/max ratios in each time period, and compute PDF/CDF.\n stats = analyze.Stats(data, args)\n stats.compute()\n stats.plot_distributions()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"stock_change.py","file_name":"stock_change.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"607572176","text":"# Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.\n\nimport os\nimport os.path\nimport plistlib\nimport re\nimport yaml\nfrom argparse import ArgumentParser\nfrom typing import *\nfrom pithy.ansi import *\nfrom pithy.dict import dict_set_defaults\nfrom pithy.io import *\nfrom pithy.fs import *\nfrom pithy.json import load_json, parse_json, write_json\nfrom pithy.string import find_and_clip_suffix\nfrom pithy.task import *\n\n\nCRAFT_PROJECT_DIR = 'CRAFT_PROJECT_DIR'\nCRAFT_CONFIG_PATH = 'CRAFT_CONFIG_PATH'\nCRAFT_SWIFT_PATH = 'CRAFT_SWIFT_PATH'\nXCODE_DEVELOPER_DIR = 'XCODE_DEVELOPER_DIR'\nXCODE_TOOLCHAIN_DIR = 'XCODE_TOOLCHAIN_DIR'\n\n\ndef load_craft_config():\n\n try: project_dir = os.environ[CRAFT_PROJECT_DIR]\n except KeyError:\n project_dir = rel_path(find_project_dir())\n if project_dir is None: exit(f'craft error: could not identify project directory.')\n os.environ[CRAFT_PROJECT_DIR] = project_dir\n\n try: config_path = os.environ[CRAFT_CONFIG_PATH]\n except KeyError:\n names = [n for n in list_dir(project_dir) if path_name_stem(n) == 'craft']\n if not names: exit(f'craft error: no craft file in project dir: {project_dir!r}')\n if len(names) > 1: exit(f'craft error: multiple craft files in project dir: {project_dir!r}; {\", \".join(names)}')\n config_path = norm_path(path_join(project_dir, names[0]))\n os.environ[CRAFT_CONFIG_PATH] = config_path\n\n try: swift_path = os.environ[CRAFT_SWIFT_PATH]\n except KeyError: swift_path = path_for_cmd('swift')\n os.environ[CRAFT_SWIFT_PATH] = swift_path\n\n # TODO: Xcode is macOS only.\n try: xcode_dev_dir = os.environ[XCODE_DEVELOPER_DIR]\n except KeyError:\n xcode_dev_dir = find_dev_dir()\n os.environ[XCODE_DEVELOPER_DIR] = xcode_dev_dir\n\n try: xcode_toolchain_dir = os.environ[XCODE_TOOLCHAIN_DIR]\n except KeyError:\n xcode_toolchain_dir = find_toolchain_dir(swift_path, xcode_dev_dir)\n os.environ[XCODE_TOOLCHAIN_DIR] = xcode_toolchain_dir\n\n config = parse_craft(config_path)\n config['config-path'] = config_path\n config['project-dir'] = project_dir\n config['swift-path'] = swift_path\n config['xcode-dev-dir'] = xcode_dev_dir\n config['xcode-toolchain-dir'] = xcode_toolchain_dir\n\n c = CraftConfig(**{k.replace('-', '_'): v for (k, v) in config.items()}) # TODO: validate types.\n\n if not is_sub_path(c.build_dir): exit(f'craft error: build-dir must be a subpath: {c.build_dir!r}')\n\n if c.target_macOS and not re.fullmatch(r'\\d+\\.\\d+', c.target_macOS):\n exit(f\"craft error: target-macOS should be 'MAJOR.MINOR' number; received {c.target_macOS!r}\")\n\n return c\n\n\ndef parse_craft(path):\n try: f = open(path)\n except FileNotFoundError: exit(f'craft error: craft file does not exist: {path!r}')\n if path_ext(path) != '.yaml': exit(f'craft error: caft file must be a `.yaml` file.') # TODO: relax this restriction.\n with f: d = yaml.load(f)\n for k, v in d.items():\n if k in craft_nonconfigurable_keys: exit(f'craft error: key is not configurable: {k!r}')\n if k not in craft_configurable_keys: exit(f'craft error: invalid craft config key: {k!r}')\n missing_keys = craft_required_keys.difference(d)\n if missing_keys: exit('\\n '.join([f'craft error: missing required keys in {path!r}:', *sorted(missing_keys)]))\n dict_set_defaults(d, craft_config_defaults)\n return d\n\n\ndef update_swift_package_json(config) -> Any:\n make_dirs(config.build_dir)\n src = 'Package.swift'\n dst = f'{config.build_dir}/swift-package.json'\n if product_needs_update(dst, source=src):\n dev_dir = config.xcode_dev_dir\n lib_pm_4_dir = f'{config.xcode_toolchain_dir}/usr/lib/swift/pm/4'\n cmd = [\n 'swiftc',\n '--driver-mode=swift',\n '-swift-version', '4',\n '-target', 'x86_64-apple-macosx10.10',\n '-sdk', dev_dir + '/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.13.sdk',\n '-I', lib_pm_4_dir,\n '-L', lib_pm_4_dir,\n '-lPackageDescription',\n 'Package.swift',\n '-fileno', '1',\n ]\n o = runO(cmd, exits=True)\n data = parse_json(o)\n with open(dst, 'w') as f:\n write_json(f, data)\n return data\n else:\n return load_json(open(dst))\n\n\nclass CraftConfig(NamedTuple):\n build_dir: str\n config_path: str\n copyright: str\n project_dir: str\n target_macOS: str\n swift_path: str\n xcode_dev_dir: str\n xcode_toolchain_dir: str\n product_name: Optional[str] = None\n product_identifier: Optional[str] = None\n sources: str = 'src'\n resources: Dict[str, str] = {}\n ts_modules: Dict[str, str] = {}\n\n @property\n def target_triple_macOS(self) -> str: return f'x86_64-apple-macosx{self.target_macOS}'\n\n\ncraft_required_keys = frozenset({\n 'copyright'\n})\n\ncraft_config_defaults = {\n 'copyright': 'Dedicated to the public domain under CC0: https://creativecommons.org/publicdomain/zero/1.0/.',\n 'build-dir': '_build',\n 'target-macOS': '10.13',\n}\n\n# TODO: derive this from CraftConfig class def.\ncraft_configurable_keys = frozenset({\n *craft_required_keys,\n *craft_config_defaults,\n 'product-name',\n 'product-identifier',\n 'sources',\n 'resources',\n 'ts-modules',\n})\n\ncraft_nonconfigurable_keys = frozenset({\n 'config-path',\n 'project-dir',\n 'xcode-dev-dir',\n})\n\n\ndef find_dev_dir() -> str:\n dev_dir_line = runO('xcode-select --print-path',\n exits=\"craft error: 'xcode-select --print-path' failed; could not determine XCODE_DEVELOPER_DIR.\")\n return dev_dir_line.rstrip('\\n')\n\n\ndef find_toolchain_dir(swift_path:str, dev_dir:str) -> str:\n if swift_path is None: exit('no `swift` executable found in PATH.')\n path = real_path(swift_path)\n parts = path_split(swift_path)\n for i, part in enumerate(parts):\n if path_ext(part) == '.xctoolchain':\n return path_join(*parts[:i+1])\n # default to dev dir.\n return f'{dev_dir}/Toolchains/XcodeDefault.xctoolchain'\n\n\nclass Private(NamedTuple):\n sym: str\n\n\ndef handle_yaml_private(loader, node) -> str:\n return Private(sym=resolve_yaml_node(node.value))\n\ndef resolve_yaml_node(node: Any) -> Any:\n if isinstance(node, yaml.Node): return resolve_yaml_node(node.value)\n if isinstance(node, list): return [resolve_yaml_node(n) for n in node]\n if isinstance(node, dict): return {resolve_yaml_node(k): resolve_yaml_node(v) for k, v in node.value.items()}\n return node\n\n\n# NOTE: modifies the global default yaml Loader object.\nyaml.add_constructor('!private', handle_yaml_private)\n\n","sub_path":"craft/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"49851921","text":"from Jumpscale import j\n\n\nclass Farms:\n def __init__(self, session, url):\n self._session = session\n self._base_url = url\n j.data.schema.add_from_path(\n \"/sandbox/code/github/threefoldtech/jumpscaleX_threebot/ThreeBotPackages/tfgrid/directory/models\"\n )\n self._model = j.data.schema.get_from_url(\"tfgrid.directory.farm.1\")\n\n def list(self, threebot_id=None):\n url = self._base_url + \"/farms\"\n if threebot_id:\n url += f\"?owner={threebot_id}\"\n resp = self._session.get(url)\n farms = []\n for farm_data in resp.json():\n farm = self._model.new(datadict=farm_data)\n farms.append(farm)\n return farms\n\n def new(self):\n return self._model.new()\n\n def register(self, farm):\n resp = self._session.post(self._base_url + \"/farms\", json=farm._ddict)\n return resp.json()[\"id\"]\n\n def get(self, farm_id):\n resp = self._session.get(self._base_url + f\"/farms/{farm_id}\")\n return self._model.new(datadict=resp.json())\n","sub_path":"JumpscaleLibs/clients/explorer/farms.py","file_name":"farms.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"331840836","text":"\n\nfrom xai.brain.wordbase.adjectives._acid import _ACID\n\n#calss header\nclass _ACIDS(_ACID, ):\n\tdef __init__(self,): \n\t\t_ACID.__init__(self)\n\t\tself.name = \"ACIDS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"acid\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_acids.py","file_name":"_acids.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"353101569","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# \n# minphaseshift.py\n# aopy\n# \n# Created by Alexander Rudy on 2013-07-25.\n# Copyright 2013 Alexander Rudy. All rights reserved.\n# \n\n\"\"\"\nShow the total phase error as a function of subaperture shifts.\n\"\"\"\n\nfrom __future__ import (absolute_import, unicode_literals, division,\n print_function)\nimport matplotlib\nimport numpy as np\nfrom aopy.atmosphere.wind import BlowingScreen, ManyLayerScreen\nfrom aopy.util.math import depiston, circle\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nimport time, itertools\n\ndef shift_phase(phase,shift):\n \"\"\"docstring for shift_phase\"\"\"\n import scipy.ndimage.interpolation\n return scipy.ndimage.interpolation.shift(\n input = phase,\n shift = shift,\n order = 3, #Linear interpolation!\n mode = 'constant', #So we blank out the non-overlapping ones.\n )\n\nap = circle(14,15)\n\nx_shifts, y_shifts = np.mgrid[-40:40:0.5,-40:40:0.5]\nshape = x_shifts.shape\nscreen = ManyLayerScreen((30,30),50,vel=[2.1,3.4])\nphi_a = screen.get_screen(0)\nphi_b = screen.get_screen(1)\nerr = np.zeros(shape,dtype=np.float)\nfor x,y in itertools.product(*map(range,shape)):\n shift = [x_shifts[x,y],y_shifts[x,y]]\n new_phi = shift_phase(phi_a,shift)\n err[x,y] = np.log10(np.sum(np.abs(phi_b - new_phi)) / np.sum(new_phi != 0.0))\n# err = np.reshape(err,x_shifts.shape)\nfig = plt.figure(figsize=(6,4))\nax = fig.add_subplot(111)\nax.plot(y_shifts[81,:],err[81,:])\nax.set_xlabel(r\"Shift ($v_x\\; \\mathrm{(m/s)}$)\")\nax.set_ylabel(\"Error (arbitrary)\")\nplt.savefig(\"figures/GN_1d_errfunc.pdf\")\nfig = plt.figure()\nax = fig.add_subplot(111)\nim = ax.imshow(err,interpolation='nearest',extent=[-40,40,-40,40])\nax.set_xlabel(r\"$v_x\\; \\mathrm{(m/s)}$\")\nax.set_ylabel(r\"$v_y\\; \\mathrm{(m/s)}$\")\ncb = fig.colorbar(im)\ncb.set_label(\"Error (arbitrary)\")\nplt.savefig(\"figures/GN_errfunc.pdf\")","sub_path":"examples/minphaseshift.py","file_name":"minphaseshift.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"4930263","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Question\nfrom django.shortcuts import get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import Choice\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.staticfiles.templatetags.staticfiles import static\n\n# Create your views here.\n\n@csrf_exempt\ndef index(request):\n latest_question_list = Question.objects.order_by('pub_date')[:5]\n output = '<head><link rel=\"stylesheet\" href=\"' + static('polls/style.css') + '\"></head> <ul>'\n for question in latest_question_list:\n output = output + '<li><a href=\"/polls/' +str(question.id) + '/\">' + question.question_text + '</a></li>'\n output = output + \"</ul>\"\n return HttpResponse(output)\n\n@csrf_exempt \ndef detail(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n question_id = int(question_id)\n output = \"\"\n if question_id %10 == 1 and question_id != 11:\n output = \"You're looking at the %sst question.\" % question_id\n elif question_id %10 == 2 and question_id != 12:\n output = \"You're looking at the %snd question.\" % question_id\n elif question_id %10 == 3 and question_id != 13:\n output = \"You're looking at the %srd question.\" % question_id\n else:\n output = \"You're looking at the %sth question.\" % question_id\n output = \"<h1>\" + question.question_text + \"</h1><p>\" + output + \"</p>\"\n output = output + '<form action =\"/polls/' + str(question_id) + '/vote/\" method = \"post\">'\n count = 1\n for choice in question.choice_set.all():\n output = output + '<input type=\"radio\" name = \"choice\" id=\"choice' + str(count) + '\"value=\"' + str(choice.id) + '\" />'\n output = output + '<label for=\"choice' + str(count) + '\">' + choice.choice_text + '</label><br />'\n count += 1\n output = '<head><link rel=\"stylesheet\" href=\"' + static('polls/style.css') + '\"></head>' + output + '<input type=\"submit\" value = \"Vote\" /></form>'\n return HttpResponse(output)\n\n@csrf_exempt\ndef vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n try:\n user_choice = question.choice_set.get(pk=request.POST['choice'])\n except (KeyError, Choice.DoesNotExist):\n return HttpResponse(\"No choice selected.\")\n else:\n user_choice.votes += 1\n user_choice.save()\n return HttpResponseRedirect(\"/polls/\" + str(question_id) + \"/results/\")\n \n@csrf_exempt\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n output = '<h1></pre>' + question.question_text + '</h1><ul>'\n for choice in question.choice_set.all():\n output = output + '<li>' + choice.choice_text + ' >>> ' + str(choice.votes)\n output = output + '</ul>'\n return HttpResponse(output)","sub_path":"CSE410/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"222374776","text":"# 每日生成一个日志文件\nimport logging\nimport time\nfrom logging import handlers\n\nlogger = logging.getLogger(\"haha\")\n# 创建日志器\n# fh = handlers.TimedRotatingFileHandler('./a.log', encoding='utf-8')\nlogger.setLevel(logging.DEBUG)\n# 创建处理器\n\n# tfh = logging.handlers.TimedRotatingFileHandler('time.log', when='s', interval=20, backupCount=1, encoding='utf-8')\ntfh = logging.handlers.TimedRotatingFileHandler(\"time.log\", when='s', interval=20, backupCount=3, encoding=\"utf-8\")\n\n# 创建格式器\nformatter = logging.Formatter(\n fmt=\"%(asc_time)s %(level_name)s [%(name)s] \"\n \"[%(filename)s(%(funcName)s:%(line_no)d)] - %(message)s\")\n# 格式化器 放入 处理器\ntfh.setFormatter(formatter)\n# 处理器 放入 日志器\nlogger.addHandler(tfh)\n# 产生日志\nwhile True:\n logger.debug('this is a debug')\n time.sleep(10)\n","sub_path":"TestAutoProject/WebAutoDrive/day8_web/day8/lianxi_06_logging_time_rotate_file_handler.py","file_name":"lianxi_06_logging_time_rotate_file_handler.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"19462323","text":"\"\"\"\nBy listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see that the 6th prime is 13.\n\nWhat is the 10 001st prime number?\n\"\"\"\n\nimport time\nimport math\n\n\nstart = time.time()\n\n\ndef check_if_prime(n):\n if n == 2: return True # Two is the only even prime\n if n % 2 == 0 or n < 2: return False # Eliminate all even numbers and 1\n for i in range(3, int(math.sqrt(n) + 1), 2):\n if n % i == 0: return False\n return True\n\n\nn = count = 1\nwhile count < 10001:\n n += 2\n if check_if_prime(n): count += 1\nprint(n)\nprint(\"Execution time {}\".format(time.time() - start))\n","sub_path":"problem_7/solution_1a.py","file_name":"solution_1a.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"317444697","text":"# TEXTURE.txt 格式:第一行为纹理的长和高,用空格隔开,第二行为16进制纹理数据\n\nimport struct\nimport numpy as np\n\n\ndef resolve(in_file: str) -> np.ndarray:\n with open(in_file) as f:\n header = f.readline()\n raw_data = f.read()\n\n (width, _, height) = header[:-1].partition(' ')\n width = int(width)\n height = int(height)\n\n weights = struct.unpack(\"<%df\" % (width * height * 4), bytes.fromhex(raw_data))\n weights = np.array(weights, dtype=np.float)\n\n return weights.reshape((height, width, 4))\n","sub_path":"tools/HookTextureResolver/resolve.py","file_name":"resolve.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"326853108","text":"import xml.etree.ElementTree as ET\nimport os\n\nfilenames_positivos = []\ndimensions = []\nfor file in os.listdir(\"./annotations\"):\n doc = ET.parse(\"./annotations/\"+file)\n root = doc.getroot()\n if len(root.findall(\"object\")) == 1:\n a = root[4][0].text\n if a == \"with_mask\":\n filenames_positivos.append(file.split('.')[0] + \".png\")\n\n \nf = open(\"positives.txt\", \"w+\")\nfor name in filenames_positivos:\n f.write('images/'+ name + '\\n')\n\n","sub_path":"archive/deleter.py","file_name":"deleter.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"184536287","text":"# -*- coding: utf-8 -*-\nimport math\nimport sys\nfrom ball_in_box import config\n\n__all__ = ['ball_in_box']\n\n\n\n\ndef ball_in_box(num_of_circle, blockers):\n \"\"\"\n Main body of algorithm: Greedy Algorithm\n \"\"\"\n xrange = config.XRANGE\n yrange = config.YRANGE\n percision = config.PERCISION\n circles = []\n dots = prod_dots(xrange, yrange, percision)\n for i in range(num_of_circle):\n temp_r = 0\n circle = [0, 0, 0]\n for dot in dots:\n r = get_max_r(dot, xrange, yrange, blockers, circles)\n if r > temp_r:\n temp_r = r\n circle[0] = dot[0]\n circle[1] = dot[1]\n circle[2] = temp_r\n\n dots.remove((circle[0], circle[1]))\n circles.append((circle[0], circle[1], circle[2]))\n\n return circles\n","sub_path":"ball_in_box/ballinbox.py","file_name":"ballinbox.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"583073685","text":"import csv\r\nimport CG_system as cg_sys\r\n\r\nprint(\"开始读取轨迹数据\")\r\n# planno_trajectory实体 {调度单号:轨迹实体}\r\ntra_reader = csv.reader(open('dd_truck_location_zjxl.csv', encoding= 'utf-8'))\r\ntruckno_trapoilist = cg_sys.traj_formed(tra_reader, '11/2020') # {调度单号: class{车牌号,轨迹点集}}\r\nprint(\"读取轨迹数据结束\")\r\n\r\nprint(\"开始读取调度单和运单数据\")\r\n# plan_map {调度单号:调度单实体}\r\n# truckno_planobjlist {车牌号:[调度单实体]}\r\n# planno_waybillobjlist {调度单号:[运单实体]}\r\nplan_reader = csv.reader(open(\"ods_db_trans_t_plan.csv\", encoding='utf-8'))\r\nway_reader = csv.reader(open(\"dwd_waybill.csv\", encoding='utf-8'))\r\ntruckno_planobjlist, planno_waybillobjlist ,plan_map = cg_sys.plan_formed(plan_reader, way_reader)\r\nprint(\"读取调度单和运单数据结束\")\r\n\r\n# 车牌号:轨迹点实体 车牌号:调度单实体\r\nplanno_trajobj = cg_sys.match_planno_trajobj(truckno_trapoilist, truckno_planobjlist) # {调度单号:轨迹实体()}\r\n\r\n# 噪声数据处理\r\nplanno_trajobj = cg_sys.tra_preprocess(planno_trajobj, plan_map)\r\n\r\n# 写出csv文件\r\nwith open('results.csv','w') as f:\r\n spamwriter = csv.writer(f, dialect='excel')\r\n spamwriter.writerow(['planno', 'create_time', 'load_date', 'return_time', 'truckno', 'time', 'lon', 'lat', 'speed', 'height',\r\n 'direction', 'total_distance', 'status', 'isnoise'])\r\n for planno, trajobj in planno_trajobj.items():\r\n #planno\r\n create_time = str(plan_map[planno].create_time)\r\n load_date = str(plan_map[planno].load_date)\r\n return_time = str(plan_map[planno].return_bill_date)\r\n truckno = str(trajobj.car_mark)\r\n for trapoi in trajobj.tra_poi_list:\r\n time = str(trapoi.time)\r\n lon = str(trapoi.lon)\r\n lat = str(trapoi.lat)\r\n speed = str(trapoi.speed)\r\n height = str(trapoi.height)\r\n direction = str(trapoi.direction)\r\n total_distance = str(trapoi.total_distance)\r\n status = str(trapoi.status)\r\n isnoise = str(trapoi.isNoise)\r\n spamwriter.writerow([planno, create_time, load_date, return_time, truckno, time, lon, lat, speed, height,\r\n direction, total_distance, status, isnoise])\r\n\r\n","sub_path":"轨迹数据处理/generate_traj/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"33859511","text":"from classes import *\n\n\nplayer=Guerreros()\nplayer.define_atributos()\nplayer.define_rareza_inicial()\nplayer.info_combate()\n\nplayer2=Guerreros()\t\nplayer2.define_atributos()\nplayer2.define_rareza_inicial()\nplayer2.info_combate()\t\n\nfor i in range(5):\n\tif player.vida>0 or player2.vida>0: \n\t\tplayer2.vida_actual-=player.ataque*2-player.defensa\n\t\tplayer.vida_actual-=player2.ataque*2-player2.defensa\n\n\telse:\n\t\tpass\nif player.vida_actual<=0:\n\tplayer.vida_actual=0\n\tprint(\"Jugador 2 gana\")\n\tplayer.info_combate()\n\tplayer2.info_combate()\n\tprint(\"\\nJugador 2 gana\")\nelif player2.vida_actual<=0:\n\tplayer2.vida_actual=0\n\tprint(\"Jugador 1 gana\")\t\n\tplayer.info_combate()\n\tplayer2.info_combate()\n\tprint(\"\\nJugador 1 gana\")\t","sub_path":"Videojuego1/precodigo.py","file_name":"precodigo.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"561502961","text":"from flask import Flask, jsonify\nimport pycep_correios\nimport requests\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n # StrRetorno = \"xml\"\n # nCdServico = 40010\n # nVlPeso = 1\n # sCepOrigem = \"01318002\"\n # sCepDestino = \"01318002\"\n # nCdFormato = 1\n # sCdMaoPropria = \"N\"\n # sCdAvisoRecebimento = \"N\"\n # nVlValorDeclarado = 0\n\n # #B ase da URL\n # url = \"http://shopping.correios.com.br/wbm/shopping/script/CalcPrecoPrazo.aspx\"\n\n # # Variáveis na URL\n # url = url + \"?StrRetorno=\" + StrRetorno\n # url = url + \"&nCdServico=\" + str(nCdServico)\n # url = url + \"&nVlPeso=\" + str(nVlPeso)\n # url = url + \"&sCepOrigem=\" + sCepOrigem\n # url = url + \"&sCepDestino=\" + sCepDestino\n # url = url + \"&nCdFormato=\" + str(nCdFormato)\n # url = url + \"&sCdMaoPropria=\" + sCdMaoPropria\n # url = url + \"&sCdAvisoRecebimento=\" + sCdAvisoRecebimento\n # url = url + \"&nVlValorDeclarado=\" + str(nVlValorDeclarado)\n\n # print(url)\n\n # result = requests.post(url)\n\n # return 'eitch'\n\n url=\"http://wsf.cdyne.com/WeatherWS/Weather.asmx?WSDL\"\n #headers = {'content-type': 'application/soap+xml'}\n headers = {'content-type': 'text/xml'}\n body = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <SOAP-ENV:Envelope xmlns:ns0=\"http://ws.cdyne.com/WeatherWS/\" xmlns:ns1=\"http://schemas.xmlsoap.org/soap/envelope/\" \n xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\">\n <SOAP-ENV:Header/>\n <ns1:Body><ns0:GetWeatherInformation/></ns1:Body>\n </SOAP-ENV:Envelope>\"\"\"\n\n response = requests.post(url,data=body,headers=headers)\n return response.content\n\n\nif __name__ == \"__main__\":\n app.run()","sub_path":"course/tutorial_1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"466875042","text":"#!/usr/bin/env python3\nfrom bisect import bisect_right\n\ns = input()\nN = len(s)\nsi = [[] for _ in range(26)]\nfor i in range(N):\n si[ord(s[i])-ord('a')].append(i+1)\n\nans = 0\nt = input()\nM = len(t)\nfor j in range(M):\n ti = si[ord(t[j])-ord('a')]\n if len(ti)==0:\n print(-1)\n exit()\n d = bisect_right(ti,ans%N)\n if d == len(ti):\n ans += ti[0]+N-ans%N\n else:\n ans += ti[d]-ans%N\nprint(ans)","sub_path":"abc138/e/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"534293157","text":"\nfrom sqlalchemy import Table, Column, Integer, Numeric, String\nfrom sqlalchemy import MetaData\n\n\nclass Solution(object):\n\n def __init__(self):\n metadata = MetaData()\n solution = Table('solution', metadata,\n Column('id', Integer(), primary_key=True),\n Column('keyval', String(25), nullable=False),\n Column('problist', String(200), nullable=False),\n Column('difficulty', Numeric(6, 3)),\n Column('solution', String(200), nullable=False),\n )","sub_path":"Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"522636210","text":"'''\nArray Hop program using greedy approach\n'''\n\ndef arrayHoping(array):\n window = array[0]\n i, maxVal, jumpCount =0, 0, 0\n size = len(array)\n while i<size:\n print(window, end = ' ')\n while i < size-1 and window>0:\n maxVal = max(array[i+1], maxVal)\n window -= 1\n i += 1\n window = maxVal\n i += 1\n jumpCount += 1\n print(\"\\nTotal number of jumps: {0}\".format(jumpCount))\n\n\n#Testing the Function: \narraySet = [[5,6,0,4,2,4,1,0,0,4],[2,1,3,1,0,1]]\nfor array in arraySet:\n print('Original array: ',array)\n arrayHoping(array)\n","sub_path":"arrayhop.py","file_name":"arrayhop.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"194337590","text":"\"\"\"\n535. House Robber III\nhttps://www.lintcode.com/problem/house-robber-iii/\n九章高频题班\n\"\"\"\n\"\"\"\nDefinition of TreeNode:\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\"\"\"\n\nclass Solution:\n \"\"\"\n @param root: The root of binary tree.\n @return: The maximum amount of money you can rob tonight\n \"\"\"\n def houseRobber3(self, root):\n # write your code here\n m_not_in_p, m_in_p = self.dfs(root)\n return max(m_not_in_p, m_in_p)\n\n def dfs(self, root):\n if not root:\n return 0, 0\n\n m_not_in_p_left, m_in_p_left = self.dfs(root.left)\n m_not_in_p_right, m_in_p_right = self.dfs(root.right)\n\n m_in_p = m_not_in_p_left + m_not_in_p_right + root.val\n\n m_not_in_p = max(m_not_in_p_left, m_in_p_left) + max(m_not_in_p_right, m_in_p_right)\n\n return m_not_in_p, m_in_p\n","sub_path":"lintcode/535.py","file_name":"535.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"352712718","text":"#Test cleaning\nimport glob\nimport geopandas as gpd\nimport os\nimport numpy as np\nimport pytest\nimport pandas as pd\nimport rasterio\nimport tensorflow as tf\n\nfrom DeepTreeAttention import trees\nfrom DeepTreeAttention.generators import boxes\n\n#random label predictions just for testing\ntest_predictions = \"data/raw/2019_BART_5_320000_4881000_image_small.shp\"\n\n#Use a small rgb crop as a example tile\ntest_sensor_tile = \"data/raw/2019_BART_5_320000_4881000_image_crop.tif\"\n\ntest_sensor_hyperspec = \"data/raw/2019_BART_5_320000_4881000_image_hyperspectral_crop.tif\"\n\n@pytest.fixture()\ndef mod(tmpdir):\n mod = trees.AttentionModel(config=\"conf/tree_config.yml\") \n \n train_dir = tmpdir.mkdir(\"train\")\n predict_dir = tmpdir.mkdir(\"predict\")\n label_file = \"{}/label_file.csv\".format(train_dir)\n \n #create a fake label file\n pd.DataFrame({\"taxonID\":[\"Ben\",\"Jon\"],\"label\":[0,1]}).to_csv(label_file)\n \n config = {}\n train_config = { }\n train_config[\"tfrecords\"] = train_dir\n train_config[\"batch_size\"] = 2\n train_config[\"epochs\"] = 1\n train_config[\"steps\"] = 1\n train_config[\"gpus\"] = 1\n train_config[\"crop_size\"] = 100\n train_config[\"shuffle\"] = True\n train_config[\"weighted_sum\"] = False\n train_config[\"classes\"] = 2\n train_config[\"species_class_file\"] = label_file\n train_config[\"ground_truth_path\"] = label_file \n \n autoencoder_config = {}\n autoencoder_config[\"epochs\"] = 1\n \n #evaluation\n eval_config = { }\n eval_config[\"tfrecords\"] = None\n eval_config[\"steps\"] = 1\n eval_config[\"ground_truth_path\"] = test_predictions\n \n predict_config = { }\n predict_config[\"tfrecords\"] = predict_dir\n \n config[\"train\"] = train_config\n config[\"evaluation\"] = eval_config\n config[\"predict\"] = predict_config\n config[\"autoencoder\"] = autoencoder_config\n \n #Replace config for testing env\n for key, value in config.items():\n for nested_key, nested_value in value.items():\n mod.config[key][nested_key] = nested_value\n \n #Update the inits\n mod.RGB_size = mod.config[\"train\"][\"RGB\"][\"crop_size\"]\n mod.HSI_size = mod.config[\"train\"][\"HSI\"][\"crop_size\"]\n mod.HSI_channels = 369\n mod.RGB_channels = 3\n mod.extend_HSI_box = mod.config[\"train\"][\"HSI\"][\"extend_box\"]\n mod.classes_file = label_file\n mod.sites = 23\n mod.domains = 15\n shp = gpd.read_file(test_predictions)\n shp[\"id\"] = shp.index.values \n mod.train_shp = shp\n mod.test_shp = shp\n \n #Create a model with input sizes\n mod.create()\n \n return mod\n\n@pytest.fixture()\ndef tfrecords(mod, tmpdir):\n shp = gpd.read_file(test_predictions)\n \n created_records = mod.generate(shapefile=test_predictions, site=0, domain=1, elevation=100,\n HSI_sensor_path=test_sensor_hyperspec,\n RGB_sensor_path=test_sensor_tile,\n train=True,\n chunk_size=2,\n savedir = mod.config[\"train\"][\"tfrecords\"],\n raw_boxes=test_predictions) \n return created_records\n\n\ndef test_autoencoder_model(mod, tfrecords):\n mod.read_data(\"HSI_autoencoder\", validation_split=True)\n train, test = mod.find_outliers()\n \n assert not train.empty ","sub_path":"tests/test_cleaning.py","file_name":"test_cleaning.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"92034317","text":"\nlistOfHasil = []\n\n\ndef hasilMergeSort(listSort):\n listOfHasil.append(listSort[:])\n mergeSorting(listSort)\n return listOfHasil\n\n\ndef mergeSorting(listSort):\n # I.S. : listSort tidak terurut merupakan list of integer\n # F.S. : listSort terurut\n temp=[]\n # Kompleksitas waktu : O(n log n)\n # return array of int[] mulai dari langkah 1..dst\n left, right = divideList(listSort)\n size = len(listSort)\n if(size == 1):\n return listSort\n else:\n # sort left side\n leftSorted = mergeSorting(left)\n \n # sort right side\n rightSorted = mergeSorting(right)\n \n # combine leftSorted array and rightSorted array into listSorted\n listSorted = mergeSubList(listSort, leftSorted, rightSorted)\n listOfHasil.append(listSorted[:])\n\n return listSorted\n\n\ndef divideList(listSort):\n # mengembalikan tuple list kiri dan kanan\n # yang merupakan sub list dari list\n size = len(listSort)\n left = []\n right = []\n if(size % 2 == 0):\n for i in range(int(size/2)):\n left.append(listSort[i])\n for i in range(int(size/2), size):\n right.append(listSort[i])\n else:\n for i in range(int(size/2)+1):\n left.append(listSort[i])\n for i in range(int(size/2)+1, size):\n right.append(listSort[i])\n\n return left, right\n\n\ndef mergeSubList(listSort, left, right):\n # I.S. : left list dan right list sudah terurut, listSort belum terurut\n # F.S. : listSort terurut\n\n sizeLeft = len(left)\n sizeRight = len(right)\n l = 0 # increment untuk left list\n r = 0 # increment untuk right list\n i = 0 # increment untuk list\n while((l < sizeLeft)and(r < sizeRight)):\n if(left[l] <= right[r]):\n listSort[i] = left[l]\n l += 1\n else:\n listSort[i] = right[r]\n r += 1\n i += 1\n while(l < sizeLeft):\n listSort[i] = left[l]\n l += 1\n i += 1\n while(r < sizeRight):\n listSort[i] = right[r]\n r += 1\n i += 1\n return listSort\n","sub_path":"sort-web/backend/mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163449035","text":"from modules import general\r\n\r\n\r\ndef create_place(name: str, description: str):\r\n \"\"\"Create a new {place} object.\r\n Args:\r\n name: The name of the place to display in the game and to address in functions;\r\n description: Short description that would be displayed when player gets to the place.\r\n Return:\r\n {place} dictionary\r\n \"\"\"\r\n place = {'category': 'place',\r\n 'name': name,\r\n 'description': description,\r\n 'connections': {},\r\n 'items': [],\r\n 'objects': []}\r\n\r\n # Print info message:\r\n print(f'Successfully created place: {name}')\r\n\r\n return place\r\n\r\n\r\ndef get_dir_options(dir_options: dict = None):\r\n \"\"\"Iterate over passed dict and return dist with attached reversed directions.\r\n Args:\r\n dir_options: Optional. Dict with one-way directions. If nothing is passed, uses {'South', 'North','East','West'}\r\n Returns:\r\n {directions} dictionary\r\n \"\"\"\r\n if dir_options is None:\r\n dir_options = {'south': 'north', 'east': 'west'}\r\n\r\n directions = dir_options.copy()\r\n\r\n # Iterate over the passed dict and copy reversed directions into the result dict:\r\n for start, end in dir_options.items():\r\n directions[end] = start\r\n\r\n # Print info message:\r\n print(f'Directions were successfully created. Possible options are:')\r\n for start, end in directions.items():\r\n print(f'\\t{start:>20} <---> {end:<20}')\r\n\r\n return directions\r\n\r\n\r\ndef connect_places(start: dict, end: dict, direction: str, dir_options: dict):\r\n \"\"\" Connect two places to each other. Simultaneously add opposite directions to both places.\r\n Args:\r\n start: go from {place}\r\n end: go to {place}\r\n direction: \"direction\"\r\n dir_options: dict with corresponding directions\r\n Returns:\r\n {p1}, {p2} place dictionaries with updated directions\r\n \"\"\"\r\n # Check that both start and end are {place} objects:\r\n if not (general.is_place(start) and general.is_place(end)):\r\n print('Error:',\r\n f'\\n\\t{start[\"name\"]} is', 'not' * general.is_place(start), 'a place',\r\n f'\\n\\t{end[\"name\"]} is', 'not' * general.is_place(end), 'a place')\r\n\r\n # Create connection only if the direction is valid:\r\n elif direction not in dir_options:\r\n print(f'There is no such direction as {direction}')\r\n\r\n # Check that start has no previous connection in that direction:\r\n elif start['connections'].get(direction, False):\r\n\r\n # If there is existing connection, print message and do not change it:\r\n print(f'{start[\"name\"]} is already connected in {direction} to {start[\"connections\"].get(direction)[\"name\"]}')\r\n\r\n # Check that end has no previous connection in opposite direction:\r\n elif end['connections'].get(dir_options[direction], False):\r\n\r\n # If there is existing connection, print message and do not change it:\r\n print(\"{0} is already connected in opposite direction to {1}\"\r\n .format(end['name'], end['connections'].get(dir_options[direction])['name']))\r\n\r\n else:\r\n # Create connection for both objects:\r\n start['connections'][direction] = end\r\n end['connections'][dir_options[direction]] = start\r\n\r\n # Print info message:\r\n print(f'Successfully created route: {start[\"name\"]:>20} <---> {end[\"name\"]:<20}')\r\n\r\n return start, end\r\n\r\n\r\ndef remove_connection(p1: dict, direction: str):\r\n \"\"\"Removes connection to specified direction from both {start place} and {target place}.\r\n Args:\r\n p1: go from {place}\r\n direction: \"direction\"\r\n \"\"\"\r\n # Get direction options:\r\n dir_options = get_dir_options()\r\n\r\n # Check that start has connection in specified direction:\r\n if p1['connections'].get(direction, False):\r\n\r\n # Save end name for info message:\r\n p2_name = p1['connections'][direction]['name']\r\n # Remove connections in both directions:\r\n del p1['connections'][direction]['connections'][dir_options[direction]]\r\n del p1['connections'][direction]\r\n\r\n # Print info message:\r\n print(f'Successfully deleted route: {p1[\"name\"]:>20} <---> {p2_name:<20}')\r\n\r\n else:\r\n print(f'{p1[\"name\"]} has no connection on {direction}')\r\n","sub_path":"!Projects/Adventure_game/modules/places.py","file_name":"places.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"357760062","text":"\"\"\"\r\nClasses to represent the default SQL aggregate functions\r\n\"\"\"\r\n\r\nclass AggregateField(object):\r\n \"\"\"An internal field mockup used to identify aggregates in the\r\n data-conversion parts of the database backend.\r\n \"\"\"\r\n def __init__(self, internal_type):\r\n self.internal_type = internal_type\r\n def get_internal_type(self):\r\n return self.internal_type\r\n\r\nordinal_aggregate_field = AggregateField('IntegerField')\r\ncomputed_aggregate_field = AggregateField('FloatField')\r\n\r\nclass Aggregate(object):\r\n \"\"\"\r\n Default SQL Aggregate.\r\n \"\"\"\r\n is_ordinal = False\r\n is_computed = False\r\n sql_template = '%(function)s(%(field)s)'\r\n\r\n def __init__(self, col, source=None, is_summary=False, **extra):\r\n \"\"\"Instantiate an SQL aggregate\r\n\r\n * col is a column reference describing the subject field\r\n of the aggregate. It can be an alias, or a tuple describing\r\n a table and column name.\r\n * source is the underlying field or aggregate definition for\r\n the column reference. If the aggregate is not an ordinal or\r\n computed type, this reference is used to determine the coerced\r\n output type of the aggregate.\r\n * extra is a dictionary of additional data to provide for the\r\n aggregate definition\r\n\r\n Also utilizes the class variables:\r\n * sql_function, the name of the SQL function that implements the\r\n aggregate.\r\n * sql_template, a template string that is used to render the\r\n aggregate into SQL.\r\n * is_ordinal, a boolean indicating if the output of this aggregate\r\n is an integer (e.g., a count)\r\n * is_computed, a boolean indicating if this output of this aggregate\r\n is a computed float (e.g., an average), regardless of the input\r\n type.\r\n\r\n \"\"\"\r\n self.col = col\r\n self.source = source\r\n self.is_summary = is_summary\r\n self.extra = extra\r\n\r\n # Follow the chain of aggregate sources back until you find an\r\n # actual field, or an aggregate that forces a particular output\r\n # type. This type of this field will be used to coerce values\r\n # retrieved from the database.\r\n tmp = self\r\n\r\n while tmp and isinstance(tmp, Aggregate):\r\n if getattr(tmp, 'is_ordinal', False):\r\n tmp = ordinal_aggregate_field\r\n elif getattr(tmp, 'is_computed', False):\r\n tmp = computed_aggregate_field\r\n else:\r\n tmp = tmp.source\r\n\r\n self.field = tmp\r\n\r\n def relabel_aliases(self, change_map):\r\n if isinstance(self.col, (list, tuple)):\r\n self.col = (change_map.get(self.col[0], self.col[0]), self.col[1])\r\n\r\n def as_sql(self, qn, connection):\r\n \"Return the aggregate, rendered as SQL.\"\r\n\r\n if hasattr(self.col, 'as_sql'):\r\n field_name = self.col.as_sql(qn, connection)\r\n elif isinstance(self.col, (list, tuple)):\r\n field_name = '.'.join([qn(c) for c in self.col])\r\n else:\r\n field_name = self.col\r\n\r\n params = {\r\n 'function': self.sql_function,\r\n 'field': field_name\r\n }\r\n params.update(self.extra)\r\n\r\n return self.sql_template % params\r\n\r\n\r\nclass Avg(Aggregate):\r\n is_computed = True\r\n sql_function = 'AVG'\r\n\r\nclass Count(Aggregate):\r\n is_ordinal = True\r\n sql_function = 'COUNT'\r\n sql_template = '%(function)s(%(distinct)s%(field)s)'\r\n\r\n def __init__(self, col, distinct=False, **extra):\r\n super(Count, self).__init__(col, distinct=distinct and 'DISTINCT ' or '', **extra)\r\n\r\nclass Max(Aggregate):\r\n sql_function = 'MAX'\r\n\r\nclass Min(Aggregate):\r\n sql_function = 'MIN'\r\n\r\nclass StdDev(Aggregate):\r\n is_computed = True\r\n\r\n def __init__(self, col, sample=False, **extra):\r\n super(StdDev, self).__init__(col, **extra)\r\n self.sql_function = sample and 'STDDEV_SAMP' or 'STDDEV_POP'\r\n\r\nclass Sum(Aggregate):\r\n sql_function = 'SUM'\r\n\r\nclass Variance(Aggregate):\r\n is_computed = True\r\n\r\n def __init__(self, col, sample=False, **extra):\r\n super(Variance, self).__init__(col, **extra)\r\n self.sql_function = sample and 'VAR_SAMP' or 'VAR_POP'\r\n","sub_path":"django/db/models/sql/aggregates.py","file_name":"aggregates.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"612969233","text":"def violinify(ax_object,data,positions=None,widths=None,color='black', labeled_median=False, labels_median=[]):\n\n violinwidths = 0.5 if widths == None else widths\n\n\n def stradivari(some_violinplot,some_color):\n for pc in some_violinplot['bodies']:\n pc.set_facecolor(some_color)\n pc.set_alpha(0.25)\n for key in some_violinplot.keys():\n if key not in ['bodies','cmins','cmaxes']:\n pc = some_violinplot[key]\n pc.set_color(some_color)\n pc.set_linewidth(1)\n\n parts = ax_object.violinplot(data,positions=positions,widths=violinwidths,\n showextrema=False,showmeans=False)\n stradivari(parts,color)\n bp_dict = ax_object.boxplot(data,positions=positions,widths=widths,\n showfliers=False,\n medianprops=dict(color=color),\n boxprops=dict(color='None'),\n whiskerprops=dict(color='None'),\n capprops=dict(color='None'))\n\n if labeled_median:\n\n for line, label in zip(bp_dict['medians'], labels_median):\n # get position data for median line\n x_left, _ = line.get_xydata()[0] # left edge of median line\n x_right, y = line.get_xydata()[1] # right edge of median line\n\n x = (x_right + x_left)/2\n # overlay median value\n ax_object.text(x, y, label,\n horizontalalignment='center', verticalalignment='bottom', # draw above, centered\n fontsize=8, rotation=90,\n alpha=0.7)","sub_path":"scripts/plotting/violinify.py","file_name":"violinify.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"581476557","text":"from matplotlib import pyplot as plt\nfrom math import floor\nimport csv, math, statistics\nfrom scipy import stats\nfilenames=['peak','mean','max','min','err','fit']\nylabels=['Hour','Detection count', 'Detection count', 'Detection count', 'Standard error','Sum of parameter covariance']\ntitles=['Peak hour of diurnal shift, averaged for grouped longitudes',\\\n 'Mean detection count, averaged for grouped longitudes',\\\n 'Maximum detection count, averaged for grouped longitudes',\\\n 'Minimum detection count, averaged for grouped longitudes',\\\n 'Variation in hourly detection counts, averaged for grouped longitudes',\\\n 'Sine-wave diurnal shift fit, averaged for grouped longitudes']\n\n#for k in range(2,8):\nhist = {}\ndata = []\n#longs = []\nlons = []\n\nwith open('/home/cwp/EMC/lib/analysis/variation/spacial/data.csv','r') as f:\n reader = list(csv.reader(f, delimiter='\\t'))\n reader = reader[1:]\n for row in reader:\n lons.append(row[1])\n data.append(row[2])\n #data.append(row[k])\n\n\"\"\"\nfor i in range(len(longs)):\n if len(longs[i]) != 0:\n if (longs[i][-1] == 'E') or (longs[i][-1] == 'W'):\n longs[i] = longs[i][:-1]\n\n try:\n float(longs[i])\n hist[float(longs[i])] = float(data[i])\n except:\n print(longs[i], data[i])\n pass\n\"\"\"\n\nfor i in range(len(lons)):\n if len(lons[i]) != 0:\n if (lons[i][-1] == 'E') or (lons[i][-1] == 'W'):\n lons[i] = lons[i][:-1]\n\n try:\n float(lons[i])\n hist[float(lons[i])] = float(data[i])\n except:\n print(lons[i], data[i])\n pass\n\nx = sorted(hist.keys())\ny = [hist[i] for i in x]\n\npeaks = [y[0]]\nstart = x[0]\nfinalX = []\nfinalY = []\nerr=[]\ncount = 1\nlong = [x[0]]\nfor i in range(1,len(x)):\n if abs(x[i]-start) <= 10:\n long.append(x[i])\n peaks.append(y[i])\n count += 1\n else:\n print(len(long))\n print(min(long), max(long))\n mean_lon = sum(long)/count\n mean_peak = sum(peaks)/count\n finalY.append(mean_peak)\n finalX.append(mean_lon)\n\n \"\"\"\n if count >= 2:\n err.append(statistics.stdev(peaks)/math.sqrt(count))\n else:\n \"\"\"\n if count != 1:\n err.append(stats.iqr(peaks))\n else:\n err.append(4)\n\n peaks = [y[i]]\n count = 1\n long = [x[i]]\n start = x[i]\n\nfor i in range(len(finalY)):\n finalY[i] = finalY[i]+(finalX[i]/15)\n #err[i] = err[i]/15\n if finalY[i] > 24:\n finalY[i] -= 24\n elif finalY[i] < 0:\n finalY[i] += 24\n\nref = [6 for k in range(-150,151)]\nplt.plot(range(-150,151), ref, 'g')\n#plt.errorbar(finalX,finalY, yerr=err)\nplt.errorbar(finalX, finalY, yerr=err, capsize=4, color=\"b\")\n#plt.scatter(finalX, finalY)\n#plt.scatter(finalX, finalY)\nplt.ylim(0,24)\nplt.xlim(-150,150)\n#plt.title(titles[k-2])\n#plt.title('Daily count skewness, averaged for grouped longitudes')\nplt.xlabel('Longitude (degrees)')\nplt.ylabel(ylabels[0])\n#plt.ylabel(ylabels[k-2])\n#plt.savefig('/home/cwp/EMC/plots/variation/spacial/longitude/'+filenames[k-2]+'.png')\nplt.tight_layout()\n#plt.savefig('/home/cwp/EMC/plots/variation/spacial/longitude/corrected_peak_better.png',dpi=500)\nplt.savefig('/home/cwp/ltx/papers/dishift2/final/figures/corrected.pdf')\n#plt.show()\n\"\"\"\n\nplt.savefig('/home/cwp/EMC/plots/variation/spacial/longitude/skew.png')\n\"\"\"\n","sub_path":"lib/analysis/variation/spacial/plotAttr.py","file_name":"plotAttr.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"517126571","text":"\"\"\"\nSSL Context factories\n\"\"\"\n\n__author__ = 'VMware, Inc.'\n__copyright__ = 'Copyright 2015, 2017 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long\n\nfrom OpenSSL import SSL, crypto\n\nfrom vmware.vapi.lib.log import get_vapi_logger\n\nlogger = get_vapi_logger(__name__)\n\n\nclass ClientContextFactory(object):\n \"\"\"\n Context factory base class. This class should be used to set the\n SSL options\n \"\"\"\n def get_context(self):\n \"\"\"\n Returns the SSL context\n \"\"\"\n raise NotImplementedError\n\n\nclass DefaultClientContextFactory(ClientContextFactory):\n \"\"\"\n Default SSL context class. This chooses some default options for SSL context.\n Clients can retrieve the context.\n\n To modify the context and set some options directly. Create a class like below\n and set the options. Pass this to the get_connector function\n\n class CustomClientContextFactory(DefaultClientContextFactory):\n def get_context(self):\n ctx = DefaultClientContextFactory.get_context(self)\n # modify ctx\n return ctx\n \"\"\"\n def __init__(self, certificate=None, private_key=None, ca_certs=None,\n ca_certs_dir=None):\n \"\"\"\n Initialize DefaultClientContextFactory\n\n :type certificate: :class:`str`\n :param certificate: File path of the certificate\n :type private_key: :class:`str`\n :param private_key: File path of the private key\n :type ca_certs: :class:`str`\n :param ca_certs: File path of ca certificates\n :type ca_certs_dir: :class:`str`\n :param ca_certs_dir: Directory prepared using the c_rehash tool\n included with OpenSSL\n \"\"\"\n\n def callback_fn(conn, cert, errno, depth, result): # pylint: disable=W0613\n \"\"\"\n Callback to handle the cert validation\n\n :type conn: :class:`OpenSSL.SSL.Connection`\n :param conn: OpenSSL connection that triggered the verification\n :type cert: :class:`OpenSSL.crypto.X509`\n :param cert: Certificate that is being validated\n :type errno: :class:`int`\n :param errno: An integer containing the error number (0 in case no\n error) of the error detected. Error descriptions here:\n http://www.openssl.org/docs/apps/verify.html\n :type depth: :class:`int`\n :param depth: An integer indicating the depth of the certificate\n being validated. If it is 0 then it means it is the given\n certificate is the one being validated, in other case is one\n of the chain of certificates\n :type result: :class:`int`\n :param result: An integer that indicates whether the validation of\n the certificate currently being validated (the one in the\n second argument) passed or not the validation. A value of 1 is\n a successful validation and 0 an unsuccessful one.\n :rtype: :class:`bool`\n :return: True if the verification passes, False otherwise\n \"\"\"\n logger.debug(\n 'Verifying SSL certificate at depth %s, subject %s, issuer %s',\n depth, repr(cert.get_subject()), repr(cert.get_issuer()))\n\n if errno:\n try:\n fn = crypto.X509_verify_cert_error_string\n errmsg = ':%s' % fn(errno)\n except AttributeError:\n errmsg = ''\n logger.error('verify error %s: %s', errno, errmsg)\n return False\n return True\n\n # Connect to server supporting TLSv1.0, TLSv1.1, TLSv1.2\n self._context = SSL.Context(SSL.SSLv23_METHOD)\n # Disable the insecure SSLv2 and SSLv3 connections\n self._context.set_options(SSL.OP_NO_SSLv2)\n self._context.set_options(SSL.OP_NO_SSLv3)\n self._context.set_verify(SSL.VERIFY_PEER, callback_fn)\n\n if certificate:\n self._context.use_certificate_file(certificate)\n if private_key:\n self._context.use_privatekey_file(private_key)\n if ca_certs or ca_certs_dir:\n try:\n self._context.load_verify_locations(\n ca_certs.encode('utf-8'), ca_certs_dir)\n except TypeError:\n self._context.load_verify_locations(\n ca_certs, ca_certs_dir)\n\n def get_context(self):\n \"\"\"\n Returns the SSL context\n\n :rtype: :class:`OpenSSL.SSL.Context`\n :return: SSL context\n \"\"\"\n return self._context\n\n\nclass UnverifiedClientContextFactory(DefaultClientContextFactory):\n \"\"\"\n Unverified SSL context class. This class retrieves an unverified SSL Context\n with other options from the DefaultClientContext\n \"\"\"\n def __init__(self):\n DefaultClientContextFactory.__init__(self)\n self._context.set_verify(SSL.VERIFY_NONE, lambda *x: True)\n","sub_path":"alexa-program.bak/vmware/vapi/lib/ssl.py","file_name":"ssl.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"126766389","text":"from sklearn.datasets import fetch_20newsgroups\r\nfrom pprint import pprint\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nimport numpy as np\r\nimport nltk\r\nfrom collections import defaultdict\r\nimport string\r\nfrom nltk.stem.porter import PorterStemmer\r\n\r\n\r\nnewsgroups = fetch_20newsgroups(categories=['comp.graphics','comp.os.ms-windows.misc','rec.autos','rec.sport.hockey'],\r\n subset='all', shuffle=True, random_state=1)\r\n\t\t\t\t\t\t\t\t\r\npprint(list(newsgroups.target_names))\r\n\r\nprint(\"%d documents\" % len(newsgroups.data))\r\nprint(\"%d categories\" % len(newsgroups.target_names))\r\n\r\n#взято с stackoverflow\r\nstemmer = PorterStemmer()\r\ndef stem_tokens(tokens, stemmer):\r\n stemmed = []\r\n for item in tokens:\r\n stemmed.append(stemmer.stem(item))\r\n return stemmed\r\n\r\ndef tokenize(text):\r\n tokens = nltk.word_tokenize(text)\r\n tokens = [i for i in tokens if i not in string.punctuation]\r\n stems = stem_tokens(tokens, stemmer)\r\n return stems\r\n\r\n\t\r\nvectorizer = TfidfVectorizer(stop_words='english', tokenizer=tokenize)\r\ntfidf_data = vectorizer.fit_transform(newsgroups.data)\r\n\r\nquery = 'computer vision'\r\ntfidf_query = vectorizer.transform([query])\r\n\r\nfeature_names = vectorizer.get_feature_names()\r\nfor word in tfidf_query.nonzero()[1]:\r\n print(feature_names[word], ' - ', tfidf_query[0, word])\r\n\t\r\n\t\r\n\r\n\t\r\ndef query_results(query_string, top_count):\r\n tfidf_query = vectorizer.transform([query_string])\r\n cosine_similarities = defaultdict(float) #словарь всех дистанций\r\n count = 0\r\n for doc in tfidf_data: #для каждого документа в корпусе находим косинусное расстояние с запросом\r\n #пользуемся матричным видом для умножения векторов\r\n #так как TF-IDF нормализует данные, делить на длины векторов не нужно\r\n cosine_similarity = doc*(tfidf_query[0].transpose()) \r\n if not cosine_similarity:\r\n cosine_similarity = 0.0\r\n else:\r\n #при умножении матриц получается матрица размером [1,1], записываем этот элемент\r\n cosine_similarity = cosine_similarity[0,0]\r\n #записываем в словарь\r\n cosine_similarities[newsgroups.data[count]] = cosine_similarity\r\n count += 1\r\n #сортируем словарь по значению и выводим заданное значение документов\r\n for key, value in sorted(cosine_similarities.items(), reverse=True, key=lambda x:x[1])[:top_count]:\r\n print('Similarity value = ', value, '\\n\\n', key )\r\n print('----------------------------------------------------------------------')\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\n\t\t\r\nquery_results(\"hockey champion\", 3)\r\n\r\nquery_results(\"auto speed\", 5)","sub_path":"cw2.py","file_name":"cw2.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"259825712","text":"import asyncio\nimport base64\nimport configparser\nimport redis\nfrom aiohttp import web\nimport aiohttp_session\nimport aiohttp_jinja2\nimport jinja2\nfrom cryptography import fernet\nfrom aiohttp_session.cookie_storage import EncryptedCookieStorage\nimport utils.bot_common\n\nroutes = web.RouteTableDef()\nroutes.static(\"/files\", \"files\")\nxml = \"\"\"<?xml version=\"1.0\" ?>\n<cross-domain-policy>\n <allow-access-from domain=\"*\" />\n</cross-domain-policy>\"\"\"\nconfig = configparser.ConfigParser()\nconfig.read(\"web.ini\")\nif config[\"webserver\"][\"allow_reg\"].lower() == \"true\":\n registation = True\nelse:\n registation = False\n\n\ndef get_level(exp):\n expSum = 0\n i = 0\n while expSum <= exp:\n i += 1\n expSum += i * 50\n return i\n\n\n@routes.get(\"/\")\nasync def index(request):\n session = await aiohttp_session.get_session(request)\n context = {}\n if \"token\" not in session:\n context[\"logged_in\"] = False\n else:\n context[\"logged_in\"] = True\n context[\"token\"] = session[\"token\"]\n context[\"update_time\"] = config[\"webserver\"][\"update_time\"]\n return aiohttp_jinja2.render_template(\"index.html\", request,\n context=context)\n\n\n@routes.post(\"/login\")\nasync def login(request):\n session = await aiohttp_session.new_session(request)\n data = await request.post()\n password = data[\"password\"]\n uid = app[\"redis\"].get(f\"auth:{password}\")\n if uid == data[\"login\"]:\n session[\"uid\"] = uid\n session[\"token\"] = password\n raise web.HTTPFound(\"/\")\n\n\n@routes.get(\"/logout\")\nasync def logout(request):\n session = await aiohttp_session.get_session(request)\n if \"token\" in session:\n del session[\"token\"]\n del session[\"uid\"]\n raise web.HTTPFound(\"/\")\n\n\n@routes.get(\"/register\")\nasync def register(request):\n if not registation:\n return web.Response(text=\"Регистрация отключена\")\n uid, password = utils.bot_common.new_account(app[\"redis\"])\n return web.Response(text=f\"Аккаунт создан, ваш логин - {uid}, \"\n f\"пароль - {password}\")\n\n\n@routes.get(\"/prelogin\")\nasync def prelogin(request):\n if \"sid\" not in request.query:\n raise web.HTTPClientError()\n try:\n uid = int(request.query[\"sid\"])\n except ValueError:\n raise web.HTTPClientError()\n exp = int(app[\"redis\"].get(f\"uid:{uid}:exp\"))\n return web.json_response({\"user\": {\"bannerNetworkId\": None, \"reg\": 0,\n \"paymentGroup\": \"\",\n \"preloginModuleIds\": \"\", \"id\": uid,\n \"avatariaLevel\": get_level(exp)}})\n\n\n@routes.post(\"/method/{name}\")\nasync def method(request):\n data = await request.post()\n name = request.match_info[\"name\"]\n if name == \"friends.getAppUsers\":\n return web.json_response({\"response\": []})\n elif name == \"friends.get\":\n return web.json_response({\"response\": {\"count\": 0, \"items\": []}})\n elif name == \"users.get\":\n if data[\"user_ids\"]:\n sid = int(data[\"user_ids\"])\n return web.json_response({\"response\": [{\"id\": sid, \"sex\": 2,\n \"first_name\": \"Павел\",\n \"last_name\": \"Дуров\",\n \"bdate\": \"10.10.1984\"}]})\n return web.json_response({\"response\": []})\n return web.json_response({\"error\": {\"error_code\": 3,\n \"error_msg\": \"Method not found\"}})\n\n\n@routes.post(\"/wall_upload\")\nasync def wall_upload(request):\n return web.json_response({\"server\": 1, \"photo\": [{\"photo\": \"darova\",\n \"sizes\": []}],\n \"hash\": \"darova\"})\n\n\n@routes.post(\"/auth\")\nasync def auth(request):\n data = await request.json()\n return web.json_response({\"jsonrpc\": \"2.0\", \"id\": 1,\n \"result\": data[\"params\"][2][\"auth_key\"]})\n\n\n@routes.get(\"/appconfig.xml\")\nasync def appconfig(request):\n context = {\"address\": config[\"webserver\"][\"web_address\"]}\n response = aiohttp_jinja2.render_template(\"appconfig.xml\", request,\n context=context)\n response.content_type = \"application/xml\"\n return response\n\n\n@routes.get(\"/crossdomain.xml\")\nasync def crossdomain(requst):\n return web.Response(text=xml)\n\n\nasync def main():\n global app\n app = web.Application()\n app.add_routes(routes)\n app[\"redis\"] = redis.Redis(decode_responses=True)\n fernet_key = fernet.Fernet.generate_key()\n secret_key = base64.urlsafe_b64decode(fernet_key)\n aiohttp_session.setup(app, EncryptedCookieStorage(secret_key))\n aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(\"templates\"))\n runner = web.AppRunner(app)\n await runner.setup()\n site = web.TCPSite(runner, \"0.0.0.0\", int(config[\"webserver\"][\"web_port\"]))\n await site.start()\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.create_task(main())\n loop.run_forever()\n","sub_path":"avacity-2.0/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":5176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"427000758","text":"from random import randint\nimport sys\nfrom copy import deepcopy\nimport math\n\nargvs = sys.argv\n\n#${leng} ${leng} ${an} ${input} ${out}\n\nnum_steps = 200\n\n#leng = 50\n\nlenp = 1036\nlens = 155\n\nsum_mat = [[0 for i in range(lenp)] for j in range(lens)]\n#mat[len_p][len_s]\n\ndef check_mat(mat, b_mat):#check whether new mat == before mat \n return mat == b_mat\n\ndef decident(mat):\n match = False \n\n while match is False:\n i = randint(0, lenp-1)\n j = randint(0, lens-1)\n if mat[i][j] == 1:\n match = True \n\n return i, j\n\ndef m_c(bmat, original_s, original_p, ith, jth, p):\n mat = deepcopy(bmat)\n marginal_p = [sum(mat[i]) for i in range(lenp)]\n marginal_s = [0 for i in range(lens)]\n for i in range(lens):\n s = 0\n for j in range(lenp):\n s += mat[j][i]\n marginal_s[i] = s\n\n if marginal_p == original_p and marginal_s == original_s:\n i, j = decident(mat)\n mat[i][j] = 0\n p = 0\n return mat, i, j, p\n #return mat, ith, jth, p\n else:\n \"\"\"\n if mat[ith][jth] == 0:\n mat[ith][jth] = 1\n p = 1\n return mat, ith, jth, p\n else:\n \"\"\"\n match = False\n while match is False:\n k = randint(0, lenp-1)\n l = randint(0, lens-1)\n if mat[k][l] == 1 or (k == ith and l == jth):\n match = True\n\n if k == ith and l == jth:\n mat[ith][jth] = 1\n p = 1\n return mat, ith, jth, p\n else:\n if randint(0, 9) < 5:\n if mat[k][jth] == 0:\n mat[k][jth] = 1\n mat[k][l] = 0\n jth = l\n p = 0\n return mat, ith, jth, p\n #return mat, k, h, p\n\n else:\n if mat[ith][l] == 0:\n mat[ith][l] = 1\n mat[k][l] = 0\n ith = k\n p = 0\n return mat, ith, jth, p\n\ndef jaccade(mat):\n jmat = [[0 for i in range(lenp)] for j in range(lenp)]\n for i in range(lenp):\n for j in range(i+1, lenp):\n c = 0\n for k in range(lens): \n if mat[i][k]*mat[j][k] == 1: \n c += 1 \n jmat[i][j] = c\n jmat[j][i] = c\n return jmat\n\n\ndef mcmc(num, d_file, o_file):\n original_data = []\n f = open(d_file, \"r\")\n for line in f:\n l = line.replace('\\n', '').split(' ')\n #print \"l\", l\n original_data.append([int(x) for x in l])\n f.close()\n\n original_p = [sum(original_data[i]) for i in range(lenp)]\n original_s = [0 for i in range(lens)]\n for i in range(lens):\n s = 0\n for j in range(lenp):\n s += original_data[j][i]\n original_s[i] = s\n\n original_jmat = jaccade(original_data)\n count_n = 0\n\n bmat = original_data\n\n cmat = [[0 for i in range(lenp)] for j in range(lenp)]\n cmat_in = [[0 for i in range(lenp)] for j in range(lenp)]\n \n p = 1\n mi = -1\n mj = -1\n\n while count_n < num:\n mat, mi, mj, p = m_c(bmat, original_s, original_p, mi, mj, p)\n jmat = jaccade(mat)\n\n for i in range(lenp):\n for j in range(i+1, lenp):\n if jmat[i][j] > original_jmat[i][j]:\n cmat[i][j] += 1\n cmat[j][i] += 1\n if jmat[i][j] >= original_jmat[i][j]:\n cmat_in[i][j] += 1\n cmat_in[j][i] += 1\n count_n += 1\n bmat = deepcopy(mat)\n\n tru = []\n fal = []\n\n f = open(o_file+str(num)+\".txt\", 'w')\n pmat = [[0 for i in range(lenp)] for j in range(lenp)]\n for i in range(lenp):\n ll = []\n for j in range(lenp):\n if i == j:\n c = 1.0\n elif cmat[i][j] == 0:\n c = 0.5/num\n #c = -math.log10(cc)\n else: \n c = cmat[i][j]/float(num)\n #c = -math.log10(cc)\n ll.append(c)\n f.write(' '.join([str(x) for x in ll])+'\\n')\n \n f.close()\n\n f = open(o_file+str(num)+\"in.txt\", 'w')\n pmat = [[0 for i in range(lenp)] for j in range(lenp)]\n for i in range(lenp):\n ll = []\n for j in range(lenp):\n if i == j:\n c = 1.0\n elif cmat_in[i][j] == 0:\n c = 0.5/num\n #c = -math.log10(cc)\n else: \n c = cmat_in[i][j]/float(num)\n #c = -math.log10(cc)\n ll.append(c)\n f.write(' '.join([str(x) for x in ll])+'\\n')\n \n f.close()\n\nmcmc(num_steps, \"data_4.txt\", \"data_4_together_pmat\")\n","sub_path":"Together/mcmc_nonperf_real.py","file_name":"mcmc_nonperf_real.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"298428607","text":"\"\"\"\nBinary Trees:\n\nBinary Search Trees (BSTs):\n\n Balanced:\n Lookup: O(log N)\n Insertion: O(log N)\n Deletion: O(log N)\n\n Number of nodes in tree: 2^height - 1 -> log nodes/number of decisions/steps = height (dropping -1)\n log 100 = 2 because 10^2 = 100\n Divide and conquer\n\n Unbalanced:\n Lookup: O(n)\n Insertion: O(n)\n Deletion: O(n)\n\nPros:\nIf balanced, all operations are better than linear\nOrdered\nFlexible size (because node can be placed anywhere in memory)\nVs arrays:\n Faster lookups than arrays (O(log N) vs O(n) if unsorted)\n Faster inserts/deletes because of shifting of all indices, unless end (or beginning?)\nVs hash tables:\n Ordered (Python's dicts are ordered now)\n Structure of parent-child relationship\n Slower insertion and search (no O(1), constant time)\nOn average, an array or a dictionary will have faster operations\n\nCons:\nNo O(1) operations, since there often is traversal involved\n\"\"\"\n\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\nclass BST:\n def __init__(self):\n self.root = None\n\n def insert(self, value, curr=None):\n ## Recursively\n # if curr == None:\n # curr = self.root\n # if not curr:\n # curr = Node(value)\n # return self\n # if value < curr.value:\n # curr = curr.left\n # elif value > curr.value: # in this case, not inserting it at all if value already present\n # curr = curr.right\n # else:\n # return -1 # required to avoid infinite loops\n # self.insert(value, curr)\n\n ## Iteratively\n new_node = Node(value)\n if not self.root:\n self.root = new_node\n return self\n curr = self.root\n while True:\n if value < curr.value:\n if not curr.left:\n curr.left = new_node\n return self\n curr = curr.left\n elif value > curr.value:\n if not curr.right:\n curr.right = new_node\n return self\n curr = curr.right\n else:\n return -1\n\n def lookup(self, value, curr=None):\n ## Recursive\n # if curr == None:\n # curr = self.root\n # if not curr:\n # return None\n # if curr.value == value:\n # return curr\n # if value < curr.value:\n # curr = curr.left\n # elif value > curr.value:\n # curr = curr.right\n # self.lookup(value, curr)\n\n ## Iterative\n if not self.root:\n return None\n curr = self.root\n while curr:\n if value < curr.value:\n curr = curr.left\n elif value > curr.value:\n curr = curr.right\n elif value == curr.value:\n return curr\n return None # or False\n\n def remove(self, value):\n ## Iterative\n if not self.root:\n return -1 # or False\n curr = self.root\n parent = None\n while curr:\n if value < curr.value:\n parent = curr\n curr = curr.left\n elif value > curr.value:\n parent = curr\n curr = curr.right\n elif value == curr.value:\n if not curr.right: # no right child\n if not parent:\n self.root = curr.left\n else:\n if curr.value < parent.value:\n parent.left = curr.left\n elif curr.value > parent.value:\n parent.right = curr.left\n elif curr.right and not curr.right.left: # right child without a left chile\n if not parent:\n self.root = curr.left\n else:\n curr.right.left = curr.left\n if curr.value < parent.value:\n parent.left = curr.right\n elif curr.value > parent.value:\n parent.right = curr.right\n elif curr.right and curr.right.left: # right child with a left child\n # finding right child's left-most child\n leftmost = curr.right.left\n leftmost_parent = curr.right\n while leftmost.left:\n leftmost_parent = leftmost\n leftmost = leftmost.left\n # parent's left subtree is now leftmost's right subtree\n leftmost_parent.left = leftmost.right\n leftmost.left = curr.left\n leftmost.right = curr.right\n if not parent:\n self.root = leftmost\n else:\n if curr.value < parent.value:\n parent.left = leftmost\n elif curr.value > parent.value:\n parent.right = leftmost\n return self\n\n\n def traverse(self, node=None):\n if node == None:\n node = self.root\n if node:\n print(node.value, end=\" \")\n if node.left:\n self.traverse(node.left)\n if node.right:\n self.traverse(node.right)\n\n\nbst = BST()\nlist = [9, 4, 20, 1, 6, 15, 170]\nfor item in list:\n # print(item)\n bst.insert(item)\nresult_node = bst.lookup(170)\nprint(result_node)\nif result_node:\n if result_node.left:\n print(result_node.left.value)\n if result_node.right:\n print(result_node.right.value)\nprint(\" \")\nbst.traverse() # tree = bst.traverse(); print(tree)\nprint(\" \")\nbst.remove(4) # bst.remove(1) # bst.remove(4)\nbst.traverse()\nprint(\" \")\n","sub_path":"data_structures/trees/binary_trees.py","file_name":"binary_trees.py","file_ext":"py","file_size_in_byte":5970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"52016443","text":"from common.op_params import opParams\nimport ast\nimport time\n\n\nclass opTune:\n def __init__(self):\n self.op_params = opParams()\n self.sleep_time = 1.0\n self.start()\n\n def start(self):\n print('Welcome to the opParams command line live tuner!')\n editable = [p for p in self.op_params.get() if p in self.op_params.default_params and self.op_params.default_params[p]['live']]\n while True:\n print('Choose a parameter to tune:')\n print('\\n'.join(['{}. {}'.format(idx + 1, p) for idx, p in enumerate(editable)]))\n choice = input('>> ')\n if not choice:\n print('Exiting opTune!')\n break\n choice = ast.literal_eval(choice) - 1\n if choice not in range(len(editable)):\n self.message('Error, not in range!')\n continue\n self.chosen(editable[choice])\n\n def chosen(self, param):\n allowed_types = self.op_params.default_params[param]['allowed_types']\n print('\\nChosen parameter: {}'.format(param))\n print('Current value: {}'.format(self.op_params.get(param)))\n print('\\n- Description: {}'.format(self.op_params.default_params[param]['description']))\n print('- Allowed types: {}\\n'.format(', '.join([str(i).split(\"'\")[1] for i in allowed_types])))\n while True:\n value = input('Enter value: ')\n if value == '':\n self.message('Exiting this parameter...')\n break\n\n status, value = self.parse_input(value)\n if not status:\n self.message('Cannot parse input!')\n continue\n\n if not any([isinstance(value, typ) for typ in allowed_types]):\n self.message('The type of data you entered ({}) is not allowed with this parameter!\\n'.format(str(type(value)).split(\"'\")[1]))\n continue\n self.op_params.put(param, value)\n print('Saved {} with value: {}! (type: {})\\n'.format(param, value, str(type(value)).split(\"'\")[1]))\n\n def message(self, msg):\n print('--------\\n{}\\n--------'.format(msg), flush=True)\n time.sleep(self.sleep_time)\n print()\n\n def parse_input(self, dat):\n dat = dat.replace(\"'\", '\"')\n try:\n dat = ast.literal_eval(dat)\n except:\n try:\n dat = ast.literal_eval('\"{}\"'.format(dat))\n except ValueError:\n return False, dat\n return True, dat\n\n\nopTune()","sub_path":"op_tune.py","file_name":"op_tune.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"94413370","text":"\"\"\"\r\n변수\r\n\"\"\"\r\nage = 3 #integer\r\nname = \"abcd\" #string\r\npie = 3.14 #float\r\nisTrue = True #boolean\r\nnotYet = None #None\r\narr = [] #array\r\nobj = {\"name\":\"abcd\"} #dictionary\r\ntub = (\"aaa,22\") #tuble\r\ncol = {1,2,3,4,5,5,5,5} #set\r\ndata = b\"Hi, I'm kyeong\" #bytes\r\n\r\n\r\n","sub_path":"python--master/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"88568759","text":"from common.currency_pair import CurrencyPair\nfrom common.market_data import MarketData\nfrom exchanges.abstract_exchange import AbstractExchange, AbstractExchangeError\n\n\nclass BittrexError(AbstractExchangeError):\n pass\n\n\nclass Bittrex(AbstractExchange):\n base_url = 'https://bittrex.com/api/v1.1/public'\n\n exception = BittrexError\n\n # Example of API response\n # {\n # \"success\": true,\n # \"message\": \"\",\n # \"result\": {\n # \"Bid\": 2.05670368,\n # \"Ask\": 3.35579531,\n # \"Last\": 3.35579531\n # }\n # }\n\n async def get_market_data(self, pair: CurrencyPair) -> MarketData:\n result = await self.json(\n await self.get(\n '/getticker',\n {'market': f'{pair.base_currency}-{pair.currency}'}\n ),\n lambda x: x.get('success'),\n lambda x: x.get('message')\n )\n\n return self.get_md(\n pair=pair,\n best_bid=result['result']['Bid'],\n best_ask=result['result']['Ask'],\n last_trade=result['result']['Last']\n )\n","sub_path":"exchanges/bittrex.py","file_name":"bittrex.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"359599748","text":"from django.shortcuts import render\nfrom .models import Image,Location,Category\nfrom django.http import HttpResponse\n\n# Create your views here.\ndef welcome(request):\n \n photos = Image.objects.all()\n arg = { \"photos\": photos}\n\n return render(request, 'welcome.html',arg)\n\n\ndef photos(request,photo_id):\n try:\n photo = Image.objects.get(id = photo_id)\n print(photo.location)\n except DoesNotExist:\n raise Http404()\n return render(request,\"photo.html\", {\"photo\":photo})\n\n\ndef search_results(request):\n if 'category' in request.GET and request.GET['category']:\n search_term = request.GET.get('category')\n searched_categories = Image.search_category(search_term)\n message = f\"{search_term}\"\n\n return render(request, 'search.html', {\"message\": message, \"categories\": searched_categories})\n\n else:\n message = \"You haven't searched for any term\"\n return render(request, ' search.html', {\"message\": message})\n\n","sub_path":"photos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"292484948","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='City',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('city', models.CharField(max_length=255)),\n ],\n ),\n migrations.CreateModel(\n name='Country',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('country', models.CharField(max_length=255)),\n ],\n ),\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n\n ('city', models.CharField(max_length=255)),\n ('address', models.CharField(max_length=255)),\n ('time', models.DateTimeField()),\n ('description', models.TextField()),\n ('country', models.ForeignKey(to='cities.Country')),\n ('relevant_exchange_group', models.ForeignKey(to='cities.City')),\n ],\n ),\n migrations.CreateModel(\n name='University',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ('number_of_students', models.IntegerField(default=0)),\n ('city', models.ForeignKey(to='cities.City')),\n ('country', models.ForeignKey(to='cities.Country')),\n ],\n ),\n migrations.AddField(\n model_name='city',\n name='country',\n field=models.ForeignKey(to='cities.Country'),\n ),\n ]\n","sub_path":"cities/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"192654626","text":"from setuptools import setup, find_packages\nfrom pkg_resources import resource_string\n\nVERSION_STR = '0.9.5'\nREADME_STR = resource_string(__name__, 'README.rst')\n\nsetup(name='sqlalchemy-foundationdb',\n version=VERSION_STR,\n description=\"FoundationDB SQL Layer Dialect and ORM Extension for SQLAlchemy\",\n long_description=README_STR,\n url=\"https://github.com/FoundationDB/sql-layer-adapter-sqlalchemy\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Database :: Front-Ends',\n ],\n keywords='FoundationDB SQLAlchemy',\n author='Mike Bayer',\n author_email='mike@zzzcomputing.com',\n maintainer= 'FoundationDB',\n maintainer_email='distribution@foundationdb.com',\n license='MIT',\n packages=find_packages(exclude=['test']),\n install_requires=['foundationdb_sql >= 0.9dev', 'sqlalchemy >= 0.9.2'],\n include_package_data=True,\n tests_require=['pytest >= 2.5.2', 'mock >= 1.0.1'],\n test_suite=\"pytest.main\",\n zip_safe=True,\n entry_points={\n 'sqlalchemy.dialects': [\n 'foundationdb = sqlalchemy_foundationdb.dialect.psycopg2:FDBPsycopg2Dialect',\n 'foundationdb.psycopg2 = sqlalchemy_foundationdb.dialect.psycopg2:FDBPsycopg2Dialect',\n ]\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"488830541","text":"import os\nimport shutil\nfrom pytest import raises\n\nfrom tests import TestCase\nfrom flask_storage import (\n FileSystemStorage,\n FileSystemStorageFile,\n StorageException\n)\n\n\nclass FileSystemTestCase(TestCase):\n def setup_method(self, method):\n TestCase.setup_method(self, method)\n self.path = os.path.join(\n os.path.dirname(__file__),\n 'uploads',\n 'images'\n )\n self.rel_path = os.path.join('uploads', 'images')\n\n def teardown_method(self, method):\n shutil.rmtree(os.path.join(\n os.path.dirname(__file__),\n 'uploads'\n ), ignore_errors=True)\n\n\nclass TestFileSystemDefaults(FileSystemTestCase):\n def test_if_folder_not_set_uses_application_config_default(self):\n self.app.config['UPLOADS_FOLDER'] = self.path\n storage = FileSystemStorage()\n assert self.rel_path in storage.folder_name\n\n\nclass TestFileSystemCreateFolder(FileSystemTestCase):\n def teardown_method(self, method):\n FileSystemTestCase.teardown_method(self, method)\n try:\n os.remove(os.path.join(\n os.path.dirname(__file__),\n 'uploads')\n )\n except OSError:\n pass\n\n def test_creates_folder_on_success(self):\n storage = FileSystemStorage(os.path.dirname(__file__))\n assert not os.path.exists(self.path)\n storage.create_folder(self.path)\n assert os.path.exists(self.path)\n\n def test_raises_exception_on_folder_conflict(self):\n storage = FileSystemStorage(os.path.dirname(__file__))\n storage.create_folder(self.path)\n with raises(StorageException):\n storage.create_folder(self.path)\n\n def test_raises_exception_on_file_conflict(self):\n storage = FileSystemStorage(os.path.dirname(__file__))\n storage.save('uploads', 'some text')\n with raises(StorageException):\n storage.create_folder('uploads')\n\n def test_conflict_exception_contains_proper_status_code(self):\n storage = FileSystemStorage(os.path.dirname(__file__))\n storage.save('uploads', 'some text')\n try:\n storage.create_folder('uploads')\n assert False\n except StorageException as e:\n assert e.status_code == 409\n assert e.message\n\n\nclass TestFileSystemDeleteFolder(FileSystemTestCase):\n def test_deletes_folder_on_success(self):\n storage = FileSystemStorage(os.path.dirname(__file__))\n storage.create_folder(self.path)\n storage.delete_folder(self.path)\n assert not os.path.exists(self.path)\n\n def test_raises_exception_if_folder_does_not_exist(self):\n storage = FileSystemStorage(os.path.dirname(__file__))\n with raises(StorageException):\n storage.delete_folder(self.path)\n\n\nclass TestFileSystemListFolders(FileSystemTestCase):\n def test_returns_list_of_folders_on_success(self):\n storage = FileSystemStorage(os.path.dirname(__file__))\n storage.create_folder(self.path)\n assert 'uploads' in storage.list_folders()\n\n\nclass TestFileSystemListFiles(FileSystemTestCase):\n def test_returns_list_of_files_on_success(self):\n storage = FileSystemStorage(os.path.dirname(__file__))\n assert os.path.basename(__file__) in storage.list_files()\n\n\nclass TestFileSystemOpen(FileSystemTestCase):\n def setup_method(self, method):\n FileSystemTestCase.setup_method(self, method)\n self.storage = FileSystemStorage(os.path.dirname(__file__))\n self.file = 'some_file.txt'\n\n def teardown_method(self, method):\n FileSystemTestCase.teardown_method(self, method)\n try:\n self.storage.delete(self.file)\n except StorageException:\n pass\n\n def test_raises_exception_for_unknown_file(self):\n with raises(StorageException):\n self.storage.open('some_unknown_file', 'rb')\n\n def test_returns_file_object_on_success(self):\n storage = FileSystemStorage(os.path.dirname(__file__))\n storage.save(self.file, 'something')\n file_ = storage.open(self.file, 'rb')\n assert isinstance(file_, FileSystemStorageFile)\n\n\nclass TestFileSystemDelete(FileSystemTestCase):\n def test_raises_exception_for_unknown_file(self):\n storage = FileSystemStorage(os.path.dirname(__file__))\n with raises(StorageException):\n storage.delete('some_unknown_file')\n","sub_path":"tests/test_filesystem.py","file_name":"test_filesystem.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"63740739","text":"from django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.decorators import login_required\n\nfrom .forms import LoginForm, UserRegistrationForm, UserEditForm, ProfileEditForm\nfrom .models import Profile\n\n@login_required(login_url='/account/login/')\ndef profile(request):\n return render(request, 'account/profile.html')\n\ndef register(request):\n if request.user.is_authenticated:\n return redirect('account:cabinet')\n else:\n if request.method == 'POST':\n user_form = UserRegistrationForm(request.POST)\n if user_form.is_valid():\n new_user = user_form.save(commit=True)\n new_user.set_password(user_form.cleaned_data['password'])\n Profile.objects.create(user=new_user)\n new_user.save()\n return render(request,'account/register_done.html',{'new_user': new_user})\n else:\n user_form = UserRegistrationForm()\n return render(request,'account/register.html',{'user_form': user_form})\n\n@login_required(login_url='/account/login/')\ndef edit(request):\n if request.method == 'POST':\n user_form = UserEditForm(instance=request.user,data=request.POST)\n profile_form = ProfileEditForm(instance=request.user.profile,data=request.POST,files=request.FILES)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n else:\n user_form = UserEditForm(instance=request.user)\n profile_form = ProfileEditForm(instance=request.user.profile)\n return render(request,'account/edit.html', {'user_form': user_form,'profile_form': profile_form})\n","sub_path":"account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"304286758","text":"#!/usr/bin/env python3\n\n#Description of this file\n\nimport logging\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')\n# logging.basicConfig(filename='log.txt', level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s') #This is to output to log.txt file\n# logging.disable(logging.CRITICAL) #Uncomment to disable all logging\n\nimport random\n\ndef average(sequence):\n total = 0\n for num in sequence:\n total += num\n average = total / len(sequence)\n logging.debug(average)\n return average\n\ndef median(sequence):\n if len(sequence) % 2 == 1: # index of median is (x + 1) / 2 - 1 or x/2 - 1/2 = (x-1)/2\n med_index = int((len(sequence) - 1)/2)\n logging.debug(med_index)\n return sequence[med_index]\n elif len(sequence) % 2 == 0: #Ex: (0, 1, 2, 3, 4, 5) Len of 6 (6+2)/2 - 1 or (6)/2 gives right median index\n middle_nums = (sequence[len(sequence)/2 - 1], sequence[len(sequence)/2])\n logging.debug(middle_nums)\n return average[middle_nums]\n\ndef duplicates(sequence):\n dup_list = []\n for item in sequence:\n if sequence.count(item) > 1:\n if item not in dup_list:\n dup_list += [item]\n logging.debug(dup_list)\n return dup_list\n\ndef print_calcs(sequence):\n ave = average(sequence)\n med = median(sequence)\n minimum = min(sequence)\n maximum = max(sequence)\n dups = duplicates(sequence)\n print('Average = {} Median = {} Min = {} Max = {} Dups = {}'.format(ave, med, minimum, maximum, dups))\n\n\n\ndef main():\n fixed_tuple = (0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50)\n\n rand_list = []\n for i in range(len(fixed_tuple)):\n rand_list.append(random.randint(min(fixed_tuple), max(fixed_tuple)))\n rand_list.sort()\n\n print('TUPLE DATA:', fixed_tuple)\n print_calcs(fixed_tuple)\n print()\n print('RANDOM LIST:', rand_list)\n print_calcs(rand_list)\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"Wk8_Number_Crunching_Program.py","file_name":"Wk8_Number_Crunching_Program.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"216938971","text":"def convert(tempC):\n tempF = (tempC * 1.8) + 32 # Calculate temperature in Farenheit\n return tempF # Return the temperature in Farenheit\n\ndef table():\n print(\" F\", \" C\", sep=\"\\t\\t\") # Print F and C at the top of the page once seperated by two tabs\n for i in range(-30, 41, 10): # Take temperatures between -30 and 40 (including) with jumps of 10\n tempC = i # Assign counter value to variable\n tempF = convert(tempC) # Assign the return of function convert to tempF\n print(tempF, tempC, sep=\"\\t\") # Print both temperatures seperated by one tab\n return\n\ntable() # Execute table() function\n","sub_path":"Les5/prE5_1.py","file_name":"prE5_1.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"489386641","text":"import os\nimport sys\nimport wx\n\nimport logger\n\nAPP = None\nLOG = logger.Logger()\nVERSION = \"0.1.0\"\nIS_WINDOWS = os.sep == '\\\\'\nWX_VERSION = wx.VERSION_STRING\n\npyversion = '.'.join(str(v) for v in sys.version_info[:3])\nSYSTEM_INFO = \"Started antelope %s using python version %s with wx version %s in %s.\" % \\\n (VERSION, pyversion, WX_VERSION, sys.platform)\nABOUT_RIDE = '''<h3>antelope -- Test Data Editor</h3>\n<p>antelope %s running on Python %s.</p>\n<p>antelope is a test data editor.\nFor more information, see project pages at\n<a href=\"\"></a>.</p>\n<p>Some of the icons are from <a href=\"http://www.famfamfam.com/lab/icons/silk/\">Silk Icons</a>.</p>\n''' % (VERSION, pyversion)\n","sub_path":"context/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"129226858","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nHomework 5 Part 2\r\nCreated on Fri Feb 28 20:03:33 2020\r\n\r\n@author: Emily Springer\r\n\"\"\"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport emcee\r\nimport math\r\nimport corner\r\nfrom scipy.optimize import minimize\r\n\r\n# creates a simulation of probability of getting each point\r\ndef getData(nPoints):\r\n angles = np.random.rand(nPoints) * np.pi #- np.pi/2\r\n B = 1.\r\n A = 1.\r\n return np.tan(angles) * B + A\r\n\r\ndef getPosterior(params, data):\r\n logB = 0\r\n if (params[1] > 0):\r\n logB = math.log(params[1])\r\n else:\r\n return -np.inf\r\n likelihood = np.ones(len(data)) * logB - np.ones(len(data)) * \\\r\n math.log(math.pi) - np.log(params[1]**2 + (data - params[0]) ** 2)\r\n \r\n return np.sum(likelihood)\r\n\r\nnPoints = 1000\r\n\r\ndata = []\r\n\r\n\r\nn = 1000\r\nA = np.linspace(0, 5, nPoints)\r\nB = np.linspace(0, 5, nPoints)\r\n\r\ndata = getData(n)\r\n\r\n\r\nx_true = 1\r\ny_true = 1\r\nf_true = 0.5\r\nx = np.sort(10 * np.random.rand(n))\r\nyerr = 0.1 + 0.5 * np.random.rand(n)\r\ny = x_true * x + y_true\r\n\r\n \r\nndim = 2\r\nnwalkers = 32 #number of chains \r\nchainLength = 1000\r\np0 = np.random.rand(nwalkers, ndim) * 5\r\nsampler = emcee.EnsembleSampler(nwalkers, ndim, getPosterior, args = [data])\r\nstate = sampler.run_mcmc(p0, 100)\r\nsampler.reset()\r\n\r\nsampler.run_mcmc(state, chainLength)\r\n\r\nsamples = sampler.get_chain(flat=True)\r\n\r\nalpha = np.ones(len(samples))\r\nbeta = np.ones(len(samples))\r\n\r\nfor i in range(len(samples)):\r\n string = np.array2string(samples[i])\r\n split = string[1:-1].split()\r\n alpha[i] = float(split[0])\r\n beta[i] = float(split[1])\r\n \r\nprint(\"MEDIANS\")\r\nprint(np.median(alpha))\r\nprint(np.median(beta))\r\n\r\nmaximumCoords = np.where(samples == np.amax(samples))\r\n\r\nfig = corner.corner(sampler.flatchain, labels=[r\"$\\alpha$\", r\"$\\beta$\"], bins=100)\r\n\r\n\r\n \r\n\r\n\r\n","sub_path":"hw5/hw5p2.py","file_name":"hw5p2.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"200654524","text":"#!/usr/bin/python\n# Copyright: Ansible Project\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: elb_application_lb_info\nshort_description: Gather information about application ELBs in AWS\ndescription:\n - Gather information about application ELBs in AWS\n - This module was called C(elb_application_lb_facts) before Ansible 2.9. The usage did not change.\nversion_added: \"2.4\"\nrequirements: [ boto3 ]\nauthor: Rob White (@wimnat)\noptions:\n load_balancer_arns:\n description:\n - The Amazon Resource Names (ARN) of the load balancers. You can specify up to 20 load balancers in a single call.\n required: false\n names:\n description:\n - The names of the load balancers.\n required: false\n\nextends_documentation_fragment:\n - aws\n - ec2\n'''\n\nEXAMPLES = '''\n# Note: These examples do not set authentication details, see the AWS Guide for details.\n\n# Gather information about all target groups\n- elb_application_lb_info:\n\n# Gather information about the target group attached to a particular ELB\n- elb_application_lb_info:\n load_balancer_arns:\n - \"arn:aws:elasticloadbalancing:ap-southeast-2:001122334455:loadbalancer/app/my-elb/aabbccddeeff\"\n\n# Gather information about a target groups named 'tg1' and 'tg2'\n- elb_application_lb_info:\n names:\n - elb1\n - elb2\n\n# Gather information about specific ALB\n- elb_application_lb_info:\n names: \"alb-name\"\n region: \"aws-region\"\n register: alb_info\n- debug:\n var: alb_info\n'''\n\nRETURN = '''\nload_balancers:\n description: a list of load balancers\n returned: always\n type: complex\n contains:\n access_logs_s3_bucket:\n description: The name of the S3 bucket for the access logs.\n returned: when status is present\n type: str\n sample: mys3bucket\n access_logs_s3_enabled:\n description: Indicates whether access logs stored in Amazon S3 are enabled.\n returned: when status is present\n type: str\n sample: true\n access_logs_s3_prefix:\n description: The prefix for the location in the S3 bucket.\n returned: when status is present\n type: str\n sample: /my/logs\n availability_zones:\n description: The Availability Zones for the load balancer.\n returned: when status is present\n type: list\n sample: \"[{'subnet_id': 'subnet-aabbccddff', 'zone_name': 'ap-southeast-2a'}]\"\n canonical_hosted_zone_id:\n description: The ID of the Amazon Route 53 hosted zone associated with the load balancer.\n returned: when status is present\n type: str\n sample: ABCDEF12345678\n created_time:\n description: The date and time the load balancer was created.\n returned: when status is present\n type: str\n sample: \"2015-02-12T02:14:02+00:00\"\n deletion_protection_enabled:\n description: Indicates whether deletion protection is enabled.\n returned: when status is present\n type: str\n sample: true\n dns_name:\n description: The public DNS name of the load balancer.\n returned: when status is present\n type: str\n sample: internal-my-elb-123456789.ap-southeast-2.elb.amazonaws.com\n idle_timeout_timeout_seconds:\n description: The idle timeout value, in seconds.\n returned: when status is present\n type: str\n sample: 60\n ip_address_type:\n description: The type of IP addresses used by the subnets for the load balancer.\n returned: when status is present\n type: str\n sample: ipv4\n load_balancer_arn:\n description: The Amazon Resource Name (ARN) of the load balancer.\n returned: when status is present\n type: str\n sample: arn:aws:elasticloadbalancing:ap-southeast-2:0123456789:loadbalancer/app/my-elb/001122334455\n load_balancer_name:\n description: The name of the load balancer.\n returned: when status is present\n type: str\n sample: my-elb\n scheme:\n description: Internet-facing or internal load balancer.\n returned: when status is present\n type: str\n sample: internal\n security_groups:\n description: The IDs of the security groups for the load balancer.\n returned: when status is present\n type: list\n sample: ['sg-0011223344']\n state:\n description: The state of the load balancer.\n returned: when status is present\n type: dict\n sample: \"{'code': 'active'}\"\n tags:\n description: The tags attached to the load balancer.\n returned: when status is present\n type: dict\n sample: \"{\n 'Tag': 'Example'\n }\"\n type:\n description: The type of load balancer.\n returned: when status is present\n type: str\n sample: application\n vpc_id:\n description: The ID of the VPC for the load balancer.\n returned: when status is present\n type: str\n sample: vpc-0011223344\n'''\n\nimport traceback\n\ntry:\n import boto3\n from botocore.exceptions import ClientError, NoCredentialsError\n HAS_BOTO3 = True\nexcept ImportError:\n HAS_BOTO3 = False\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.ec2 import (boto3_conn, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict,\n ec2_argument_spec, get_aws_connection_info)\n\n\ndef get_elb_listeners(connection, module, elb_arn):\n\n try:\n return connection.describe_listeners(LoadBalancerArn=elb_arn)['Listeners']\n except ClientError as e:\n module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))\n\n\ndef get_listener_rules(connection, module, listener_arn):\n\n try:\n return connection.describe_rules(ListenerArn=listener_arn)['Rules']\n except ClientError as e:\n module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))\n\n\ndef get_load_balancer_attributes(connection, module, load_balancer_arn):\n\n try:\n load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes'])\n except ClientError as e:\n module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))\n\n # Replace '.' with '_' in attribute key names to make it more Ansibley\n for k, v in list(load_balancer_attributes.items()):\n load_balancer_attributes[k.replace('.', '_')] = v\n del load_balancer_attributes[k]\n\n return load_balancer_attributes\n\n\ndef get_load_balancer_tags(connection, module, load_balancer_arn):\n\n try:\n return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags'])\n except ClientError as e:\n module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))\n\n\ndef list_load_balancers(connection, module):\n\n load_balancer_arns = module.params.get(\"load_balancer_arns\")\n names = module.params.get(\"names\")\n\n try:\n load_balancer_paginator = connection.get_paginator('describe_load_balancers')\n if not load_balancer_arns and not names:\n load_balancers = load_balancer_paginator.paginate().build_full_result()\n if load_balancer_arns:\n load_balancers = load_balancer_paginator.paginate(LoadBalancerArns=load_balancer_arns).build_full_result()\n if names:\n load_balancers = load_balancer_paginator.paginate(Names=names).build_full_result()\n except ClientError as e:\n if e.response['Error']['Code'] == 'LoadBalancerNotFound':\n module.exit_json(load_balancers=[])\n else:\n module.fail_json(msg=e.message, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))\n except NoCredentialsError as e:\n module.fail_json(msg=\"AWS authentication problem. \" + e.message, exception=traceback.format_exc())\n\n for load_balancer in load_balancers['LoadBalancers']:\n # Get the attributes for each elb\n load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn']))\n\n # Get the listeners for each elb\n load_balancer['listeners'] = get_elb_listeners(connection, module, load_balancer['LoadBalancerArn'])\n\n # For each listener, get listener rules\n for listener in load_balancer['listeners']:\n listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn'])\n\n # Turn the boto3 result in to ansible_friendly_snaked_names\n snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']]\n\n # Get tags for each load balancer\n for snaked_load_balancer in snaked_load_balancers:\n snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn'])\n\n module.exit_json(load_balancers=snaked_load_balancers)\n\n\ndef main():\n\n argument_spec = ec2_argument_spec()\n argument_spec.update(\n dict(\n load_balancer_arns=dict(type='list'),\n names=dict(type='list')\n )\n )\n\n module = AnsibleModule(argument_spec=argument_spec,\n mutually_exclusive=[['load_balancer_arns', 'names']],\n supports_check_mode=True\n )\n if module._name == 'elb_application_lb_facts':\n module.deprecate(\"The 'elb_application_lb_facts' module has been renamed to 'elb_application_lb_info'\", version='2.13')\n\n if not HAS_BOTO3:\n module.fail_json(msg='boto3 required for this module')\n\n region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)\n\n if region:\n connection = boto3_conn(module, conn_type='client', resource='elbv2', region=region, endpoint=ec2_url, **aws_connect_params)\n else:\n module.fail_json(msg=\"region must be specified\")\n\n list_load_balancers(connection, module)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"env/lib/python3.9/site-packages/ansible/modules/cloud/amazon/elb_application_lb_info.py","file_name":"elb_application_lb_info.py","file_ext":"py","file_size_in_byte":10825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"154367987","text":"from multiprocessing import Process\nfrom binance_listing import BinanceListing\nfrom coinsniper import CoinSniper\nfrom paratica import Paratica\n\n\ndef run_in_parallel(*fns):\n proc = []\n for fn in fns:\n p = Process(target=fn)\n p.start()\n proc.append(p)\n for p in proc:\n p.join()\n\nbinance = BinanceListing()\nsniper = CoinSniper()\nparatica = Paratica()\n\nif __name__ == \"__main__\":\n run_in_parallel(binance.run, sniper.run, paratica.run)","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"624770154","text":"from com.creatorfromhell.cakebot.CakeBot import CakeBot\nfrom com.creatorfromhell.cakebot.eater.Eater import Eater\nimport discord\nimport asyncio\n\nclient = discord.Client()\n\nbot = CakeBot()\n\ndefault_rank = 'Member'\nserver_join_message = True\n\nlimit = 2500\n\n\n@client.event\nasync def on_ready():\n print('Logged in as ' + client.user.name)\n\n\n@client.event\nasync def on_message(message):\n print('<' + message.author.name + ':' + message.channel.server.name + '->' + message.channel.name + '>' + message.content)\n\n if message.author.id not in bot.eaters:\n bot.eaters[message.author.id] = Eater(message.author.id)\n\n if await bot.handle_message(bot, client, message):\n return\n\n if len(client.messages) >= limit:\n bot.clean_messages(client)\n\n\n@client.event\nasync def on_member_join(member):\n server = member.server\n fmt = member.mention + ' welcome! Let\\'s just fix your rank..'\n await client.send_message(server, fmt.format(member, server))\n await client.add_roles(member, get_role(server, default_rank))\n\n\n@client.event\nasync def on_server_join(server):\n if server_join_message:\n for channel in server.channels:\n await client.send_message(channel, \"Happy Birthday!\")\n\n\ndef get_role(server, name):\n return next((r for r in server.roles if r.name == name), None)\n\nclient.run('your bot\\'s application token')","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"100441044","text":"from collections import defaultdict\n\n\ndef solve(input_s: str):\n ans = 0\n n = len(input_s)\n mod = 2019\n dp = defaultdict(int)\n cur = 0\n for i in range(n, -1, -1):\n if i == n:\n dp[0] += 1\n else:\n # cur += ((10 ** (n - i - 1)) * int(input_s[i])) % mod これはnがでかくなるとめちゃめちゃ遅い\n cur += pow(10, n - i - 1, 2019) * int(input_s[i]) % mod\n cur %= mod\n dp[cur] += 1\n for v in dp.values():\n if v > 1:\n ans += v * (v - 1) // 2\n return ans\n\n\nif __name__ == '__main__':\n S = input()\n print(solve(S))","sub_path":"Python_codes/p02702/s793114766.py","file_name":"s793114766.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"72266583","text":"import scrapy\nimport time\nfrom scrapy import Selector\nfrom scrapys.items import MaoyanItem\nfrom selenium import webdriver\n\nclass MaoyanSpider(scrapy.Spider):\n name = 'maoyan'\n allowed_domains = ['maoyan.com']\n start_urls = ['https://maoyan.com/films?showType=3']\n\n def __init__(self):\n try:\n browser = webdriver.Chrome()\n browser.get('https://passport.meituan.com/account/unitivelogin')\n time.sleep(1)\n\n # 输入账号和密码\n browser.find_element_by_xpath('//*[@id=\"login-email\"]').send_keys('13552656607')\n browser.find_element_by_xpath('//*[@id=\"login-password\"]').send_keys('')\n time.sleep(1)\n\n # 点击登陆\n browser.find_element_by_xpath('//*[@id=\"J-normal-form\"]/div[5]/input[5]').click()\n\n cookie = browser.get_cookies()\n self.headers(cookie)\n except Exception as e :\n print(\"----获取header头失败,可能会抓取失败!----\")\n print(e)\n finally:\n browser.close()\n\n def start_requests(self):\n url = 'https://maoyan.com/films?showType=3'\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n movies = Selector(response=response).xpath('//*[@class=\"movie-item film-channel\"]')\n for movie in movies :\n link = \"https://maoyan.com\" + movie.xpath('./a/@href').extract_first()\n yield scrapy.Request(url=link,callback=self.parse2)\n\n def parse2(self ,response):\n item = MaoyanItem()\n time = Selector(response=response).xpath('//div[3]/div/div[2]/div[1]/ul/li[3]/text()').extract_first()\n name = Selector(response=response).xpath('//div[3]/div/div[2]/div[1]/h1/text()').extract_first()\n list_t = \"\"\n tyeps = Selector(response=response).xpath('//div[3]/div/div[2]/div[1]/ul/li[1]/a')\n for type in tyeps :\n list_t = list_t + type.xpath('./text()').extract_first()\n\n item['time'] = time\n item['name'] = name\n item['type'] = list_t.strip()\n yield item\n","sub_path":"week02/scrapys/scrapys/spiders/maoyan.py","file_name":"maoyan.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"256696919","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: tcarroll\n\"\"\"\n\nfrom ruffus import *\nfrom cgatcore import pipeline as P\nimport os\nimport sys\nimport pandas as pd\n\n@transform(\"*.fastq.gz\", suffix('.fastq.gz'),'_fastqc.html')\ndef fastqc(infile, outfile):\n statement = '''fastqc %(infile)s --nogroup -t 6'''\n P.run(statement, job_queue = 'batchq', job_threads = 6)\n\n@merge(fastqc, 'multiqc_report.html')\ndef multiqc(infiles, outfile):\n path = os.path.split(os.path.abspath(infiles[0]))[0]\n statement = '''multiqc %(path)s 2> %(outfile)s.log'''\n P.run(statement, job_queue = 'batchq')\n \n@transform('*.fastq.gz', suffix('.fastq.gz'),'.sam')\ndef mappy(infile,outfile):\n statement='''hisat2 -x /t1-data/databank/igenomes/Homo_sapiens/UCSC/hg38/Sequence/HISAT2/genome\n -U %(infile)s -S %(outfile)s -p 12'''\n P.run(statement, job_queue = 'batchq', job_threads = 12)\n \n@transform(mappy, suffix('.sam'),'.bam')\ndef toBam(infile, outfile):\n statement = '''samtools sort -@ 6 -o %(outfile)s %(infile)s'''\n P.run(statement, job_queue = 'batchq', job_threads = 6)\n \n@transform(toBam, suffix('.bam'),'.bai')\ndef index(infile, outfile):\n statement = '''samtools index %(infile)s'''\n P.run(statement, job_queue = 'batchq')\n \n#@transform(toBam, suffix('.bam'),'.yml')\n#def picard(infile, outfile):\n# statement = '''picard CollectAlignmentSummaryMetrics \n# R=/t1-data/databank/igenomes/Mus_musculus/UCSC/mm10/Sequence/WholeGenomeFasta/genome.fa \n# INPUT=%(infile)s \n# OUTPUT=%(outfile)s'''\n# P.run(statement, job_queue = 'batchq')\n\n@transform(toBam, suffix('.bam'),'.flagstat')\ndef flagstat(infile, outfile):\n statement = '''samtools flagstat %(infile)s > %(outfile)s'''\n P.run(statement, job_queue = 'batchq')\n\n@transform(toBam, suffix('.bam'), '.counts')\ndef featureCounts(infile, outfile):\n statement = '''featureCounts -T12 -a /t1-data/databank/igenomes/Homo_sapiens/UCSC/hg38/Annotation/Genes/genes.gtf \n -o %(outfile)s %(infile)s'''\n P.run(statement, job_queue = 'batchq', job_threads = 12)\n \n@merge(featureCounts, 'Count_Table.txt')\ndef Table(infiles,outfile):\n count = 0\n for x in infiles:\n if count == 0:\n Temp_Table = pd.read_csv(x, skiprows = 1, sep = '\\t')\n else:\n Temp_Table2 = pd.read_csv(x, skiprows = 1, sep = '\\t')\n Temp_Table2 = Temp_Table2.iloc[:,[6]]\n Temp_Table = pd.concat([Temp_Table,Temp_Table2], axis = 1)\n count += 1\n Temp_Table.to_csv(outfile, sep = '\\t', index = False)\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n P.main(argv)\n\nif __name__ == \"__main__\":\n sys.exit(P.main(sys.argv))\n","sub_path":"pipelines/rnaseq_pipeline_humanSE.py","file_name":"rnaseq_pipeline_humanSE.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"548760803","text":"import pyedflib\r\nimport numpy as np\r\nf = pyedflib.EdfReader(\"EDF_DB/SC4002E0-PSG.edf\")\r\nn = f.signals_in_file\r\nsignal_labels = f.getSignalLabels()\r\nsigbufs = f.readSignal(0)\r\n#%%\r\n\r\nf2 = pyedflib.EdfReader(\"EDF_DB/SC4002EC-Hypnogram.edf\")\r\nn2 = f2.signals_in_file\r\na=f2.readAnnotations()\r\n#\r\n#%%\r\nf2._close()\r\ndel f2\r\n\r\nf._close()\r\ndel f\r\n#%%\r\n#input single channel signal\r\ndef createWindowsBySamples(SingleChannelSignal, WSizeSamples):\r\n \r\n WSizeSamples=int(WSizeSamples)\r\n WindowsInSignal=int(SingleChannelSignal.size/WSizeSamples)\r\n ArrayOfWindows = np.zeros((WindowsInSignal, WSizeSamples))\r\n for i in np.arange(WindowsInSignal):\r\n ArrayOfWindows[i,:]=SingleChannelSignal[(i*WSizeSamples):((i+1)*WSizeSamples)]\r\n \r\n return ArrayOfWindows\r\n\r\n#100Hz\r\nT=0.01\r\nWindowTime=30\r\nWindowSamples=30/T\r\n\r\nSCNSignal=sigbufs\r\n\r\nWindows=createWindowsBySamples(SCNSignal,WindowSamples)\r\n\r\n#154 lenght \r\n#1 duracion\r\n#2 duracion intervalo\r\nSignalAnnotations=a\r\nSingleChannelSignal=SCNSignal\r\nWSizeSamples=WindowSamples\r\n#%%\r\n\r\ndef labelSignal(SingleChannelSignal, SignalAnnotations, T, WSizeSamples):\r\n \r\n #ArrayOfWindows=createWindowsBySamples(SingleChannelSignal)\r\n NumberOfWindows=int(SingleChannelSignal.size/WSizeSamples)\r\n LAnnotation=SignalAnnotations[0].size\r\n SignalIntervalLabels=SignalAnnotations[0]/T\r\n SignalLabels=SignalAnnotations[2]\r\n LabelArray=np.empty([NumberOfWindows], dtype='U25')\r\n start=0\r\n \r\n for i in np.arange(LAnnotation-1):\r\n LabelLenght=SignalIntervalLabels[i+1]-SignalIntervalLabels[i]\r\n CurrentLabel=SignalLabels[i]\r\n WindowsInInterval=int(LabelLenght/WSizeSamples)\r\n for j in np.arange(WindowsInInterval):\r\n LabelArray[j+start]=CurrentLabel\r\n start+=WindowsInInterval\r\n \r\n return LabelArray\r\n\r\ndef binaryLabel(LabelArray):\r\n \r\n BinaryArray = np.zeros((LabelArray.size))\r\n for i in np.arange(LabelArray.size):\r\n if LabelArray[i][-1]=='W':\r\n BinaryArray[i]=1\r\n return BinaryArray\r\n\r\n \r\n \r\nLabelArray=labelSignal(SCNSignal, a, T, WindowSamples)\r\nBinaryLabels=binaryLabel(LabelArray)\r\nDataBase=np.concatenate((Windows, BinaryLabels.reshape((1, BinaryLabels.size)).T), axis=1)\r\n\r\n\r\n\r\n\r\n#%%\r\nimport csv\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.patches as mpatches\r\nimport sklearn as skl \r\nfrom sklearn import model_selection\r\n\r\nTrain, Test=model_selection.train_test_split(DataBase, test_size=0.5,\r\n stratify=DataBase[:,3000],\r\n random_state=21)\r\n\r\n \r\nsign_train = Train[:,0:3000];\r\nclass_train = Train[:,3000];\r\n\r\nsign_test = Test[:,0:3000];\r\nclass_test = Test[:,3000];\r\n#%%\r\n\r\nimport numpy as np\r\nimport scipy as sp\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn import preprocessing\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.metrics import confusion_matrix\r\n#Se procede a extraer caracteristicas de las imf calculadas\r\n\r\n#funciones de extraccion de caracteristicas entregadas\r\n \r\ndef crosses(my_array):\r\n return ((my_array[:,:-1] * my_array[:,1:]) < 0).sum(axis=1)\r\n\r\ndef slope_sign_changes(my_array):\r\n w = np.shape(my_array)[1];\r\n d1 = my_array[:,1:w-2] - my_array[:,2:w-1]\r\n d2 = my_array[:,2:w-1] - my_array[:,3:w]\r\n return np.double( np.sum((d1*d2) < 0, 1) );\r\n\r\ndef extr_feat(s):\r\n h = np.shape(s)[0]\r\n w = np.shape(s)[1];\r\n feat = np.zeros((h, 10))\r\n feat[:,0] = np.sum(np.abs(s),1); # iemg\r\n feat[:,1] = crosses(s); #zc\r\n feat[:,2] = slope_sign_changes(s); #ssc\r\n feat[:,3] = np.sum(np.abs(s[:,1:w-1] - s[:,2:w]),1); # wl\r\n feat[:,4] = np.sum(np.abs(s[:,1:w-1] - s[:,2:w] > 0.1),1); # wamp\r\n feat[:,5] = np.var(s,1);\r\n feat[:,6] = sp.stats.skew(s,1);\r\n feat[:,7] = sp.stats.kurtosis(s,1);\r\n feat[:,8] = np.median(s,1);\r\n feat[:,9] = np.std(s,1);\r\n return feat;\r\n\r\n#se extraen las caracteristicas para Train y Validation\r\ncaract_train = (extr_feat(sign_train[:,0:3000]));\r\n\r\ncaract_test = (extr_feat(sign_test[:,0:3000]));\r\n\r\n\r\n\r\n\r\n\r\n\r\n#%%\r\n\r\n#se normalizan caracteristicas de cada conjunto, con los parametros del conjunto de train\r\nscaler = preprocessing.StandardScaler().fit(caract_train);\r\ncaract_train = scaler.transform(caract_train);\r\ncaract_test = scaler.transform(caract_test);\r\n\r\n#%%\r\n\r\n#funcion para calcular desempeños\r\ndef TVFP(Confusion):\r\n #se crea arreglo con 0 para alberga TVP y TFP de cada clase\r\n TVFP=np.zeros((Confusion.shape[0],2))\r\n #suma sobre elementos de diagonal\r\n suma=0\r\n #Se itera sobre las clases de la matriz de confusion, par obtener TVP y TFP de cada\r\n #una\r\n for i in range(0, Confusion.shape[0]):\r\n #TVP se calcula dividiendo la diagonal de cada clase con el numero total\r\n #muestras de esa clase\r\n TVP= Confusion[i,i]/np.sum(Confusion[i,:])\r\n #TFP se calcula sumando los valores de la clase predicha de la clase y restandole el elemento diagonal,\r\n #luego esto se divide por todos los datos, menos la fila de la clase\r\n TFP=(np.sum(Confusion[:,i])-Confusion[i,i])/(np.sum(Confusion)-np.sum(Confusion[i,:]))\r\n TVFP[i,:]=[TVP,TFP]\r\n #suma diagonales\r\n suma=suma+Confusion[i,i]\r\n \r\n #Al final se agrega el promedio de TVP y TFP de la red \r\n PromTVP=np.sum(TVFP[:,0])/TVFP.shape[0]\r\n PromTFP=np.sum(TVFP[:,1])/TVFP.shape[0]\r\n Prom=np.array([[PromTVP,PromTFP]])\r\n TVFP=np.concatenate((TVFP,Prom), axis=0)\r\n #se retorna el arreglo\r\n Acc=suma/np.sum(Confusion)\r\n return TVFP, Acc\r\n\r\n\r\n#%%\r\n#se entrena con MLP\r\nclf = MLPClassifier(solver='adam', alpha=1e-5, tol=1e-5, hidden_layer_sizes=(300,50), max_iter = 10000, random_state=1);\r\nclf.fit(caract_train, class_train);\r\n\r\n#se clasifica conjunto de validacion\r\npred = clf.predict(caract_test);\r\n\r\n#se calculan desempeños\r\nconf = confusion_matrix( class_test, pred);\r\nRates, Acc= TVFP(conf)\r\nprint('MLP')\r\nprint('Acc= ', Acc*100 );\r\n#print('TVP= ', Rates[6,0]*100 );\r\n#print('TFP= ', Rates[6,1]*100 );\r\nprint( conf );\r\n#%%\r\n\r\n#se repite el proceso anterior pero para un clasificador Random Forest\r\nclf = RandomForestClassifier(n_estimators=50,max_leaf_nodes=100,n_jobs=-1,random_state=0)\r\nclf.fit(caract_train, class_train);\r\npred = clf.predict(caract_test);\r\n\r\nconf = confusion_matrix( class_test, pred);\r\nRates, Acc= TVFP(conf)\r\nprint('RF')\r\nprint('Acc= ', Acc*100 );\r\n#print('TVP= ', Rates[6,0]*100 );\r\n#print('TFP= ', Rates[6,1]*100 );\r\nprint( conf );\r\n \r\n#7950000=22.0833333hrs\r\n#sigbufs2 = np.zeros((n2, f2.getNSamples()))\r\n#T=0.01\r\n#import main_load_edf\r\n#\r\n#psg_dir = 'SC4001E0-PSG.edf'\r\n#ann_dir = 'SC4001EC-Hypnogram.edf'\r\n#\r\n#main_load_edf(psg_dir, ann_dir, \"file\")\r\n#%%\r\n\r\nimport glob\r\n\r\nx=glob.glob(\"EDF_DB/*PSG.edf\")\r\n","sub_path":"EEG/EEGv2.py","file_name":"EEGv2.py","file_ext":"py","file_size_in_byte":6851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"67368422","text":"# coding=utf-8\n\"\"\"\nModulo per la gestione dei menu item dell'item \"File\"\n\"\"\"\nimport gtk\n\n\ndef file_chooser_dialog(self):\n\t\"\"\"\n\tCreazione della dialog per scegliere il file di testo da aprire e visualizzare\n\tnella main-window\n\t\"\"\"\n\tself.text = \"\"\n\tdialog = gtk.FileChooserDialog(\"Open Dialog\",\n\t\tNone,\n\t\tgtk.FILE_CHOOSER_ACTION_OPEN,\n\t\t(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,\n\t\t gtk.STOCK_OPEN, gtk.RESPONSE_OK))\n\tdialog.set_default_response(gtk.RESPONSE_OK)\n\tdialog.set_icon_from_file(\"logo.png\")\n\n\t# Sezione filter\n\tfile_filter = gtk.FileFilter()\n\tfile_filter.set_name(\"Tutti i file .txt\")\n\tfile_filter.add_pattern(\"*.txt\")\n\tdialog.set_filter(file_filter)\n\t# In seguito alla selezione del file\n\tresponse = dialog.run()\n\tif response == gtk.RESPONSE_OK:\n\t\tself.configuration.set_input_file(dialog.get_filename())\n\t\t# Apertura file per ricavarne il testo per poi visualizzarlo\n\t\ttext_file = open(dialog.get_filename(), 'r')\n\t\tself.text = text_file.read()\n\tdialog.destroy()\n\tif response == gtk.RESPONSE_OK:\n\t\tself.fill_txt(self.text)\n\n\ndef close_text(self):\n\t\"\"\"\n\tGestione chiusura del file di testo attualmente visualizzato\n\t\"\"\"\n\tself.vbox.remove(self.scrolled_window)\n\tself.scrolled_window = gtk.ScrolledWindow()\n\tself.scrolled_window.set_border_width(10)\n\tself.scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n\tself.vbox.pack_start(self.scrolled_window, True, True, 0)\n\tself.open_item.set_sensitive(True)\n\tself.close_item.set_sensitive(False)\n\tself.operat_item.set_sensitive(False)\n\n\t# Rimuozione del file di testo all'interno dell'attuale configurazione, ovvero dall'interno\n\t# dell'oggetto di tipo 'Configuration'\n\tself.configuration.set_input_file(None)\n\n\ndef quit_program(self):\n\t\"\"\"\n\tGestione chiusura dell'applicazione\n\t\"\"\"\n\tself.stop_name_server()\n\tgtk.main_quit()","sub_path":"src/handlers/file_handlers.py","file_name":"file_handlers.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"411979773","text":"from lect_5_hw_1 import Person\n\nclass Employee(Person):\n def __init__(self, full_name, year, position, experience, salary):\n Person.__init__(self, full_name, year)\n self.position = position\n self.experience = experience\n self.salary = salary\n\n def full_information(self):\n if self.experience <= 3:\n junior = \"Junior\" + \" \" + self.position\n return junior\n if 3 < self.experience <= 6:\n middle = \"Middle\" + \" \" + self.position\n return middle\n else:\n senior = \"Senior\" + \" \" + self.position\n return senior\n\n def increase_salary(self, increase_sal):\n new_salary = self.salary + increase_sal\n return new_salary\n\nif __name__ == \"__main__\":\n a = Employee(\"Alex Ustinov\", 1986, \"Developer\", 6, 1000)\n print(a.full_information())\n print(a.increase_salary(200))","sub_path":"lect_5_hw_1_2.py","file_name":"lect_5_hw_1_2.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"55195330","text":"import xml.etree.cElementTree as et\nfrom Building import Building\n\nclass To_XML(Building):\n def __init__(self, id, no_of_floor, addr):\n Building.__init__(self, id, no_of_floor, addr)\n \n def to_XML(self):\n tree = et.parse('test.xml')\n data = tree.getroot()\n \n et.SubElement(data, 'building', id=self.id, no_of_floor=self.no_of_floor, addr = self.addr).text=''\n \n tree.write('test.xml')\n\nid = input(\"Enter building ID: \")\nno_of_floor = input(\"Enter building floors: \")\naddr = input(\"Enter building address: \")\n\ntest = To_XML(id, no_of_floor, addr)\ntest.to_XML()","sub_path":"Net Centric/Lab05/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"190922738","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import division, unicode_literals, print_function\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nfrom zipfile import ZipFile\nfrom itertools import combinations\nimport os\n\nfrom Crypto.PublicKey import RSA\nfrom sage.all import gcd, Zmod\n\n\ndef write_key(path, key, mod):\n with open(path, 'w') as output_file:\n print('Key (%s):\\n' % path, key, end='\\n\\n')\n output_file.write(key)\n os.chmod(path, mod)\n\n\ndef get_keys_pair():\n keys = []\n keys_archieve = ZipFile('keys')\n for filename in keys_archieve.infolist():\n with keys_archieve.open(filename) as key_file:\n keys.append(RSA.importKey(key_file.read()))\n\n for key1, key2 in combinations(keys, 2): # перебираем все пары ключей\n keys_gcd = gcd(key1.n, key2.n)\n if keys_gcd != 1:\n return (key1, keys_gcd)\n\n\ndef find_private_key(key, p):\n ring = Zmod((p - 1) * (key.n // p - 1)) # кольцо фи\n compl = int(ring(key.e) ** -1) # обратный в нем к e\n\n key = RSA.construct((key.n, key.e, compl)) # пару ключей\n return key\n\n\ndef main():\n key_data = get_keys_pair()\n key = find_private_key(*key_data)\n\n write_key('output/key.pub', key.publickey().exportKey(), 0o644)\n write_key('output/key', key.exportKey(), 0o600)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"04/2/evil.py","file_name":"evil.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"292720250","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\nimport datetime\n\nmainURL = \"https://deepindex.org/\"\n\ntopic = []\nsize=[]\nprojects=[]\ngroups =[]\n\npage = urlopen(mainURL)\nsoup = bs(page)\ntopics = soup.find_all('h3')\nfor topic in topics:\n\tproject = topic.find('p').text\n\tprojects.append(project)\n\tgroups.append(topic)\n\ndeep = pd.DataFrame({'Category': groups,\n\t\t\t\t\t 'Projects': projects\n\t\t\t\t\t\t})\n\ndeep['Status'] = str(datetime.date.today()) # new column: when the query was done\n\n\n#--------------------------------------------------------------------------------------\n\ntopics = []\ntopics = soup.find_all(\"div\", class_=\"col-md-4\")\nfor topic in topics:\n print(topic.find('p').text)\n\n\n","sub_path":"AI_projects.py","file_name":"AI_projects.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"454789551","text":"from django.db import models\nfrom django.utils import timezone\n\nimport datetime\n\n\nclass RequisicionManager(models.Manager):\n \"\"\"\n Manager para las requisiciones\n \"\"\"\n\n def aprobadas_jefe_departamento(self):\n \"\"\"\n Retorna un QuerySet con las requisiciones aprobadas por jefes\n \"\"\"\n from .models import Historial\n return (self.filter(\n historial__estado=Historial.APROBADA, historial__empleado__jefe_departamento=True\n ).exclude(estado=self.model.ANULADA) | self.rechazadas_jefe_administrativo()).distinct()\n\n def rechazadas_jefe_administrativo(self):\n \"\"\"\n Retorna un Queryset con las requisiciones Rechazadas por el jefe administrativo\n \"\"\"\n from .models import Historial\n return self.annotate(\n num_historial=models.Count('historial')\n ).filter(\n historial__empleado__areas__departamento__nombre__icontains='administra',\n historial__estado=Historial.RECHAZADA,\n num_historial__gt=2\n )\n\n def ingresadas_compras(self):\n \"\"\"\n Retorna un QuerySet con las requisiciones que acaban de entrar a compras\n \"\"\"\n return self.filter(\n historial__empleado__jefe_departamento=True\n ).exclude(\n estado__in=[self.model.ANULADA, self.model.TERMINADA],\n ).exclude(\n historial__empleado__areas__nombre__icontains='compra'\n ).order_by(\n '-fecha_ingreso', 'prioridad'\n ).distinct()[:5]\n\n def ingresadas_administrativo(self):\n \"\"\"\n Retorna un QuerySet con las requisiciones ingresadas en jefe administativo\n \"\"\"\n return self.annotate(\n num_historial=models.Count('historial')\n ).filter(\n num_historial=2\n ).exclude(\n estado__in=[self.model.ANULADA, self.model.TERMINADA]\n )\n\n def ingresadas_financiero(self):\n \"\"\"\n Retorna un Queryset con las requisiciones ingresadas en jefe financiero\n \"\"\"\n exclude = list(\n self.ingresadas_administrativo().values_list('id', flat=True)\n ) + list(\n self.ingresadas_compras().values_list('id', flat=True)\n )\n query = (self.filter(\n estado=self.model.PROCESO,\n ) | self.filter(\n estado=self.model.PROCESO,\n historial__empleado__usuario__user_permissions__codename='organizacional.es_presidente'\n ) | self.annotate(\n num_historial=models.Count('historial')\n ).filter(\n estado=self.model.PROCESO,\n historial__empleado__usuario__is_superuser=True,\n num_historial=4\n ).exclude(\n id__in=exclude\n )).distinct()\n return [q for q in query if q.get_rastreo() == self.model.DATA_SET['financiero']]\n\n def ultimo_mes(self, *args, **kwargs):\n \"\"\"\n Retorna un QuerySet con las requisiciones ingresadas el ultimo mes\n \"\"\"\n hoy = timezone.now().today()\n\n return self.filter(\n historial__empleado__jefe_departamento=True,\n fecha_ingreso__range=(\n datetime.date(year=hoy.year, month=hoy.month, day=1),\n hoy + datetime.timedelta(days=1)\n )\n ).filter(*args, **kwargs).distinct()\n\n def finalizadas_mes(self, *args, **kwargs):\n \"\"\"\n Retorna un QuerySet con las requisiciones finalizadas en el mes\n \"\"\"\n hoy = timezone.now()\n\n return self.filter(\n fecha_termina__range=(\n datetime.date(year=hoy.year, month=hoy.month, day=1),\n hoy + datetime.timedelta(days=1)\n ),\n estado=self.model.TERMINADA\n ).filter(*args, **kwargs)\n\n def aprobadas_compras(self):\n \"\"\"\n Retorna un QuerySet con las requisiciones aprobadas por usuario de compras\n \"\"\"\n\n return self.annotate(\n num_historial=models.Count('historial')\n ).exclude(num_historial__lt=2, estado=self.model.ANULADA)\n\n def aprobadas_jefe_administrativo(self):\n \"\"\"\n Retorna un QuerySet con las requisiciones aprobadas por un jefe administrativo\n \"\"\"\n # se sacan las requisiciones que tengan mas de 3 historiales y se excluyen las que esten en presidencia\n return self.annotate(\n num_historial=models.Count('historial')\n ).exclude(\n num_historial__lt=3).exclude(\n estado=self.model.ANULADA).exclude(\n id__in=self.en_presidencia().values_list('id', flat=True) # presidencia es excluido\n )\n\n def aprobadas_jefe_financiero(self):\n \"\"\"\n Retorna un Queryset con las requisiciones a las cuales un jefe financiero ha\n puesto una fecha de pago\n \"\"\"\n # return self.aprobadas_jefe_administrativo().exclude(fecha_pago=None)\n from .models import DetalleRequisicion\n return self.filter(\n presupuesto_aprobado=self.model.SI,\n detallerequisicion__forma_pago__in=[DetalleRequisicion.EFECTIVO, DetalleRequisicion.DEBITO]\n ).exclude(estado=self.model.ANULADA).exclude(fecha_pago=None)\n\n def en_presidencia(self):\n \"\"\"\n Retorna un Queryset con las requisiciones que han superado un tope maximo definido\n por los parametros iniciales, siempre y cuando haya sido aprobada por un usuario jefe\n administrativo\n \"\"\"\n from .models import Parametros\n\n # se sacan las requisiciones que vallan a presidencia por superar cierto monto\n query_for_total = self.annotate(\n total_valores=models.Sum('detallerequisicion__total_aprobado')\n ).filter(total_valores__gte=Parametros.objects.tope()).values_list('id', flat=True)\n\n # se sacan las requisiciones que tengan 3 historiales\n query_for_historial = self.annotate(\n num_historial=models.Count('historial')\n ).filter(num_historial=3).values_list('id', flat=True)\n\n # se hace la intercepcion de los id, es decir las requisiciones que cumplan con las dos condiciones\n query = set(query_for_total) & set(query_for_historial)\n\n return self.filter(id__in=query)\n\n\nclass ParametrosManager(models.Manager):\n \"\"\"\n Manager para la clase de parametros\n \"\"\"\n\n def dias(self):\n \"\"\"\n Devuelve el ultimo valor que tenga el campo de dias\n \"\"\"\n return self.last().dias_habiles\n\n def tope(self):\n \"\"\"\n Devuelve el ultimo valor que tenga en el campo de tope de monto\n \"\"\"\n return self.last().tope_monto\n\n\nclass DetalleRequisicionManager(models.Manager):\n \"\"\"\n Clase de manager para el modelo de DetalleRequisicion\n \"\"\"\n\n def salida_credito_mes(self):\n \"\"\"\n Retorna la cantidad de dinero que ha salido por items en credito\n \"\"\"\n hoy = timezone.now().date()\n\n return self.filter(\n requisicion__historial__fecha__range=(\n datetime.date(year=hoy.year, month=hoy.month, day=1), hoy + datetime.timedelta(days=1)\n ),\n forma_pago=self.model.CREDITO,\n cumplida=True\n ).aggregate(models.Sum('total_aprobado'))\n\n def salida_efectivo_mes(self):\n \"\"\"\n Retorna la cantidad de dinero que ha salido por items en efectivo\n \"\"\"\n hoy = timezone.now().date()\n\n return self.filter(\n requisicion__historial__fecha__range=(\n datetime.date(year=hoy.year, month=hoy.month, day=1), hoy + datetime.timedelta(days=1)\n ),\n forma_pago__in=[self.model.EFECTIVO, self.model.DEBITO],\n cumplida=True\n ).aggregate(models.Sum('total_aprobado'))\n","sub_path":"compras/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":7753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"5372377","text":"\"\"\" Log configuration with detailed output.\"\"\"\n\nimport logging\nimport logging.config\n\nclass FilenameLinenoFilter(logging.Filter):\n \"\"\" Logging filter to write a colon-separated filename and line number.\"\"\"\n def filter(self, record):\n record.filename_lineno = '{}:{}'.format(record.filename, record.lineno)\n return True\n\n\nLOG_SETTINGS = {\n 'version': 1,\n 'filters': {\n 'filename_lineno_filter': {\n '()': FilenameLinenoFilter,\n },\n },\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)s %(filename_lineno)-10s %(levelname)5s | %(message)s',\n\n # For full pathname, in case of unknown source of message:\n #'format': '%(asctime)s %(threadName)-10s %(pathname)s $(lineno)s \\\n # %(levelname)5s | %(msg)s',\n },\n },\n 'handlers': {\n 'default': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'filters': ['filename_lineno_filter'],\n 'formatter': 'standard',\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['default'],\n 'level': 'DEBUG',\n 'propagate': True\n },\n },\n}\nlogging.config.dictConfig(\n LOG_SETTINGS,\n)\n\n# Quiet down the uninteresting loggers.\nmodules = (\n 'requests', 'apscheduler',\n)\nfor module in modules:\n try:\n logging.getLogger(module).setLevel(logging.CRITICAL)\n except Exception:\n pass\n","sub_path":"log_config.py","file_name":"log_config.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"37104965","text":"import logging\n\nfrom discord import Member, PermissionOverwrite, utils\nfrom discord.ext import commands\nfrom more_itertools import unique_everseen\n\nfrom bot.bot import Bot\nfrom bot.constants import Roles\nfrom bot.decorators import with_role\n\nlog = logging.getLogger(__name__)\n\n\nclass CodeJams(commands.Cog):\n \"\"\"Manages the code-jam related parts of our server.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @commands.command()\n @with_role(Roles.admins)\n async def createteam(self, ctx: commands.Context, team_name: str, members: commands.Greedy[Member]) -> None:\n \"\"\"\n Create team channels (voice and text) in the Code Jams category, assign roles, and add overwrites for the team.\n\n The first user passed will always be the team leader.\n \"\"\"\n # Ignore duplicate members\n members = list(unique_everseen(members))\n\n # We had a little issue during Code Jam 4 here, the greedy converter did it's job\n # and ignored anything which wasn't a valid argument which left us with teams of\n # two members or at some times even 1 member. This fixes that by checking that there\n # are always 3 members in the members list.\n if len(members) < 3:\n await ctx.send(\n \":no_entry_sign: One of your arguments was invalid\\n\"\n f\"There must be a minimum of 3 valid members in your team. Found: {len(members)}\"\n \" members\"\n )\n return\n\n code_jam_category = utils.get(ctx.guild.categories, name=\"Code Jam\")\n\n if code_jam_category is None:\n log.info(\"Code Jam category not found, creating it.\")\n\n category_overwrites = {\n ctx.guild.default_role: PermissionOverwrite(read_messages=False),\n ctx.guild.me: PermissionOverwrite(read_messages=True)\n }\n\n code_jam_category = await ctx.guild.create_category_channel(\n \"Code Jam\",\n overwrites=category_overwrites,\n reason=\"It's code jam time!\"\n )\n\n # First member is always the team leader\n team_channel_overwrites = {\n members[0]: PermissionOverwrite(\n manage_messages=True,\n read_messages=True,\n manage_webhooks=True,\n connect=True\n ),\n ctx.guild.default_role: PermissionOverwrite(read_messages=False, connect=False),\n ctx.guild.get_role(Roles.verified): PermissionOverwrite(\n read_messages=False,\n connect=False\n )\n }\n\n # Rest of members should just have read_messages\n for member in members[1:]:\n team_channel_overwrites[member] = PermissionOverwrite(\n read_messages=True,\n connect=True\n )\n\n # Create a text channel for the team\n team_channel = await ctx.guild.create_text_channel(\n team_name,\n overwrites=team_channel_overwrites,\n category=code_jam_category\n )\n\n # Create a voice channel for the team\n team_voice_name = \" \".join(team_name.split(\"-\")).title()\n\n await ctx.guild.create_voice_channel(\n team_voice_name,\n overwrites=team_channel_overwrites,\n category=code_jam_category\n )\n\n # Assign team leader role\n await members[0].add_roles(ctx.guild.get_role(Roles.team_leaders))\n\n # Assign rest of roles\n jammer_role = ctx.guild.get_role(Roles.jammers)\n for member in members:\n await member.add_roles(jammer_role)\n\n await ctx.send(\n f\":ok_hand: Team created: {team_channel.mention}\\n\"\n f\"**Team Leader:** {members[0].mention}\\n\"\n f\"**Team Members:** {' '.join(member.mention for member in members[1:])}\"\n )\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Load the CodeJams cog.\"\"\"\n bot.add_cog(CodeJams(bot))\n","sub_path":"bot/cogs/jams.py","file_name":"jams.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"92070741","text":"from __future__ import print_function\r\n\r\nimport json\r\nimport os\r\nimport h5py\r\nimport cPickle\r\n\r\n'''\r\nget_all_images():\r\n from downloaded COCO dataset to pickle files storing \r\n split_train.pkl\r\n split_valid.pkl\r\n split_test.pkl\r\n {\r\n 'image_ids': [0, ...]\r\n 'file_names': ['data/images/COCO_train2014_000000000000.jpg', ...]\r\n }\r\n\r\n\r\n'''\r\n\r\nDATAROOT = 'data/images/'\r\nfolder = {\r\n 'train': 'train2014/',\r\n 'valid': 'val2014/',\r\n 'test': 'test2014/'\r\n}\r\nprefix = {\r\n 'train': 'COCO_train2014_000000',\r\n 'valid': 'COCO_val2014_000000',\r\n 'test': 'COCO_test2014_000000',\r\n}\r\ndataset_output_file = {\r\n 'train': 'split_train.pkl',\r\n 'valid': 'split_valid.pkl',\r\n 'test': 'split_test.pkl'\r\n}\r\n\r\n\r\ndef get_images(split):\r\n img_ids = []\r\n filenames = []\r\n id2file = {}\r\n directory = os.path.join(DATAROOT, folder[split])\r\n for parent, dirnames, filenames in os.walk(directory):\r\n for filename in filenames:\r\n if os.path.isfile(os.path.join(directory, filename)):\r\n img_id = int(filename.split('.')[0][-6:])\r\n # print(filename, ':', img_id)\r\n img_ids.append(img_id)\r\n filenames.append(os.path.join(directory, filename))\r\n id2file[img_id] = os.path.join(directory, filename)\r\n print('> find %d images for split %s.' % (len(img_ids), split))\r\n to_save = {\r\n 'image_ids': img_ids,\r\n 'file_names': filenames\r\n }\r\n cPickle.dump(to_save, open(os.path.join(DATAROOT, dataset_output_file[split]), 'w'))\r\n return id2file\r\n\r\n\r\ndef get_all_images():\r\n id2file = get_images('train')\r\n id2file.update(get_images('valid'))\r\n id2file.update(get_images('test'))\r\n print(len(id2file))\r\n cPickle.dump(id2file, open(os.path.join(DATAROOT, 'id2file.pkl'), 'w'))\r\n\r\n\r\nif __name__ == '__main__':\r\n get_all_images()\r\n","sub_path":"image_feature.py","file_name":"image_feature.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"354041005","text":"# Class ClassifierList\n\n# Author: Eric Broda, Broda Group Software Inc.\n# Date: November 17, 2015\n\nfrom common.System import Logger\n\nimport numpy as np\n\nfrom importlib import import_module\n\nclass ClassifierList():\n\n packageName = 'com.brodagroup.machinelearning.classifierlist'\n\n logger = None\n\n # Initializer\n def __init__(self):\n self.logger = Logger(self.packageName).getLogger()\n\n self.classifiers = []\n self.addClassifier('xgb', 'XGBoost Classifier', 'classifier.XGB', 'XGB')\n self.addClassifier('nnn', 'NOLEARN Lasagne neural network', 'classifier.NNnolearn', 'NNnolearn')\n self.addClassifier('nns', 'SCIKIT neuralnetwork', 'classifier.NNscikit', 'NNscikit')\n self.addClassifier('for', 'SCIKIT Random Forest Classifier', 'sklearn.ensemble', 'RandomForestClassifier')\n self.addClassifier('ext', 'SCIKIT Extra Trees Classifier', 'sklearn.ensemble', 'ExtraTreesClassifier')\n self.addClassifier('svc', 'SCIKIT SVC', 'sklearn.svm', 'SVC')\n self.addClassifier('nsv', 'SCIKIT NU SVC', 'sklearn.svm', 'NuSVC')\n self.addClassifier('knn', 'SCIKIT Nearest Neighbour Classifier', 'sklearn.neighbors', 'KNeighborsClassifier')\n self.addClassifier('dtr', 'SCIKIT Decision Tree', 'sklearn.tree', 'DecisionTreeClassifier')\n self.addClassifier('log', 'SCIKIT Logistic Regression', 'sklearn.linear_model', 'LogisticRegression')\n self.addClassifier('pct', 'SCIKIT Perceptron', 'sklearn.linear_model', 'Perceptron')\n self.addClassifier('sgd', 'SCIKIT SGD Classifier', 'sklearn.linear_model', 'SGDClassifier')\n\n return\n\n def loadClass(self, moduleName, className, parameters):\n self.logger.debug('Loading module: {0}, class: {1}'.format(moduleName, className))\n self.logger.debug('Using load parameters: {0}'.format(parameters))\n\n try:\n module_ = import_module(moduleName)\n try:\n class_ = getattr(module_, className)\n instance = class_(**parameters)\n except AttributeError:\n raise RuntimeError('Class does not exist: {0}'.format(className))\n except ImportError:\n raise RuntimeError('Module does not exist: {0}'.format(moduleName))\n return instance\n\n def addClassifier(self, code, name, moduleName, className):\n x = [code, name, moduleName, className]\n self.classifiers.append(x)\n return\n\n def load(self, code, parameters):\n\n item = next((x for x in self.classifiers if x[0] == code), None)\n\n if item == None:\n raise RuntimeError('Classifier code not found: {0}'.format(code))\n\n code = item[0]\n name = item[1]\n moduleName = item[2]\n className = item[3]\n\n classifier = self.loadClass(moduleName, className, parameters)\n\n return classifier\n\n def list(self):\n x = np.array(self.classifiers)\n a = x[:,0]\n return(a.tolist())\n","sub_path":"classifier/ClassifierList.py","file_name":"ClassifierList.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"343628340","text":"from typing import Any, Dict, Set, Union\n\nfrom ludwig.constants import DECODER, ENCODER, INPUT_FEATURES, PREPROCESSING, TYPE\nfrom ludwig.features.feature_registries import input_type_registry, output_type_registry\nfrom ludwig.utils.misc_utils import get_from_registry\n\n\ndef get_feature_type_parameter_values_from_section(\n config: Dict[str, Any], features_section: str, feature_type: str, parameter_name: str\n) -> Set:\n \"\"\"Returns the set of all parameter values used for the given features_section, feature_type, and\n parameter_name.\"\"\"\n parameter_values = set()\n for feature in config[features_section]:\n if feature[TYPE] == feature_type:\n if parameter_name in feature:\n parameter_values.add(feature[parameter_name])\n elif parameter_name in feature[ENCODER]:\n parameter_values.add(feature[ENCODER][parameter_name])\n elif parameter_name in feature[DECODER]:\n parameter_values.add(feature[DECODER][parameter_name])\n return parameter_values\n\n\ndef get_defaults_section_for_feature_type(\n feature_type: str,\n config_defaults: Dict[str, Dict[str, Any]],\n config_defaults_section: str,\n) -> Union[Dict[str, Any], Dict]:\n \"\"\"Returns a dictionary of all default parameter values specified in the global defaults section for the\n config_defaults_section of the feature_type.\"\"\"\n\n if feature_type not in config_defaults:\n return {}\n\n if config_defaults_section not in config_defaults[feature_type]:\n return {}\n\n return config_defaults[feature_type][config_defaults_section]\n\n\ndef merge_config_preprocessing_with_feature_specific_defaults(\n config_preprocessing: Dict[str, Any], config_defaults: Dict[str, Dict[str, Any]]\n) -> Dict[str, Any]:\n \"\"\"Returns a new dictionary that merges preprocessing section of config with type-specific preprocessing\n parameters from config defaults.\"\"\"\n preprocessing_params = {}\n preprocessing_params.update(config_preprocessing)\n for feature_type in config_defaults:\n preprocessing_params[feature_type] = config_defaults[feature_type].get(PREPROCESSING, {})\n return preprocessing_params\n\n\ndef get_default_encoder_or_decoder(feature: Dict[str, Any], config_feature_group: str) -> str:\n \"\"\"Returns the default encoder or decoder for a feature.\"\"\"\n if config_feature_group == INPUT_FEATURES:\n feature_schema = get_from_registry(feature.get(TYPE), input_type_registry).get_schema_cls()\n return feature_schema().encoder.type\n feature_schema = get_from_registry(feature.get(TYPE), output_type_registry).get_schema_cls()\n return feature_schema().decoder.type\n","sub_path":"ludwig/utils/config_utils.py","file_name":"config_utils.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"28248981","text":"import matplotlib.pyplot as plt\nfrom matplotlib.ticker import AutoMinorLocator\nimport numpy as np\n\nx=np.linspace(100,200,num=1000)\ny=85.5*np.sin(x/5.)/x*1e9\ny2=x**2/20000.*1e9\ny3=-y/y2*1e9\ny4=1/y2*1e18\ny5=np.cos(x/7.)*y2\ny6=y5*np.sin(x/8.)\n\nfig, ax = plt.subplots()\nplt.minorticks_on()\n\nplt.plot(x,y)\nplt.plot(x,y2)\nplt.plot(x,y3)\nplt.plot(x,y4)\nplt.plot(x,y5)\nplt.plot(x,y6)\n\n\n\n\n#labelpad\nplt.xlabel(\"Time ($\\sigma_{22}$)\",labelpad=3)\nplt.ylabel(\"WWW\",labelpad=3)\n#plt.yticks((-.951e9,0.,.951e9,1.902e9))\n#plt.ylim(-.951e9,1.902e9)\n#plt.yticks((-1e9,0.,1e9,2e9))\n#plt.xticks((100,140,180),minor=True)\n\nax.xaxis.set_minor_locator(AutoMinorLocator(4))\nax.yaxis.set_minor_locator(AutoMinorLocator(2))\n\nplt.savefig(\"tesfig.pdf\")","sub_path":"data-apr15/process/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"622869262","text":"class Solution:\n \"\"\"\n Name : Shahreen Shahjahan Psyche\n Time : O(1) \n Space: O(1) \n\n\n Passed all Test cases in LC : Yes\n \"\"\"\n \n def __init__(self):\n \n \n \n self.first_20 = [\"\", \"One\", \"Two\", \"Three\", \"Four\", \"Five\", \"Six\", \n \"Seven\", \"Eight\", \"Nine\", \"Ten\", \"Eleven\", \"Twelve\",\n \"Thirteen\",\"Fourteen\", \"Fifteen\", \"Sixteen\", \n \"Seventeen\", \"Eighteen\", \"Nineteen\"]\n self.hundreds = [\"\",\"\",\"Twenty\",\"Thirty\", \"Forty\", \"Fifty\", \"Sixty\", \"Seventy\", \"Eighty\", \"Ninety\"]\n self.thousands = [\"\", \"Thousand\", \"Million\", \"Billion\"]\n \n \n \n def processThreeDigits(self, num):\n \n if num == 0:\n return \"\"\n elif num < 20 :\n return self.first_20[num] + \" \"\n elif num < 100:\n return self.hundreds[int(num/10)] + \" \" + self.processThreeDigits(int(num%10))\n else:\n return self.first_20[int(num/100)] + \" Hundred \" + self.processThreeDigits(int(num%100)) \n \n def numberToWords(self, num: int) -> str:\n \n if num == 0:\n return \"Zero\"\n \n i = 0\n res = \"\"\n while num > 0:\n process = num%1000\n if process != 0:\n res = self.processThreeDigits(process) + self.thousands[i] + \" \" + res\n num = int(num/1000)\n i += 1\n \n return res.strip()\n \n","sub_path":"Problem1.py","file_name":"Problem1.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"301922686","text":"import numpy as np\nimport numpy.linalg as linalg\nimport matplotlib.pyplot as plt\nfrom cykdtree import PyKDTree\n\n\ndef get_point_line_intersection(p, origin, direction):\n p = np.array(p, dtype='float64')\n origin = np.array(origin, dtype='float64')\n direction = np.array(direction, dtype='float64')\n projection = (origin - p) - np.dot((origin - p), direction) * direction\n intersection_point = projection + p\n b = intersection_point - origin\n for i in range(3):\n if direction[i] != 0:\n return b[i] / direction[i]\n\n\ndef point_within_bounds(left_edge, right_edge, point):\n for i in range(3):\n if left_edge[i] > point[i] or point[i] > right_edge[i]:\n return False\n for i in range(3):\n if np.isclose(left_edge[i], point[i], rtol=1e-09) or \\\n np.isclose(left_edge[i], point[i], rtol=1e-09):\n return False\n return True\n\n\ndef line_sphere_intersect(center, radius, origin, direction):\n direction = np.divide(direction, np.linalg.norm(direction))\n starting_point_vector = np.array([origin[0], origin[1], origin[2]])\n center_vector = np.array([center[0], center[1], center[2]])\n v = starting_point_vector - center_vector\n determinant = (np.dot(v, direction) ** 2) - (v @ v - radius ** 2)\n return determinant >= 0\n\n\ndef line_box_intersect(left_edge, right_edge, origin, direction):\n # https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection\n direction = np.array(direction, dtype='float64')\n origin = np.array(origin, dtype='float64')\n ox = origin[0]\n oy = origin[1]\n oz = origin[2]\n x0 = left_edge[0]\n x1 = right_edge[0]\n y0 = left_edge[1]\n y1 = right_edge[1]\n z0 = left_edge[2]\n z1 = right_edge[2]\n # sign = direction < 0\n invdir = np.empty(3, dtype='float64')\n invdir[np.isclose(direction, 0, rtol=1e-09)] = np.inf\n invdir[np.logical_not(np.isclose(direction, 0, rtol=1e-09))] = \\\n np.divide(1., direction[np.logical_not(np.isclose(direction, 0, rtol=1e-09))])\n\n tmin = (x0 - ox) * invdir[0]\n tmax = (x1 - ox) * invdir[0]\n\n if(tmin > tmax):\n tmin, tmax = tmax, tmin\n\n tymin = (y0 - oy) * invdir[1]\n tymax = (y1 - oy) * invdir[1]\n\n if tymin > tymax:\n tymin, tymax = tymax, tymin\n\n if tmin > tymax or tymin > tmax:\n return None\n\n if tymin > tmin:\n tmin = tymin\n\n if tymax < tmax:\n tmax = tymax\n\n tzmin = (z0 - oz) * invdir[2]\n tzmax = (z1 - oz) * invdir[2]\n\n if tzmin > tzmax:\n tzmin, tzmax = tzmax, tzmin\n\n if tmin > tzmax or tzmin > tmax:\n return None\n\n if tzmin > tmin:\n tmin = tzmin\n\n if tzmax < tmax:\n tmax = tzmax\n\n return np.array([tmin, tmax], dtype='float64')\n\n\ndef kdtreeRayTrace(positions,\n radius,\n left_edge,\n right_edge,\n resolution,\n origin):\n left_edge = np.array(left_edge, dtype='float64')\n right_edge = np.array(right_edge, dtype='float64')\n width = right_edge - left_edge\n dx = width[0]/resolution[0]\n dy = width[1]/resolution[1]\n tree = PyKDTree(positions,\n left_edge=np.array(left_edge),\n right_edge=np.array(right_edge),\n leafsize=30\n )\n for i in range(resolution[0]):\n for j in range(resolution[1]):\n origin = np.empty(3, dtype='float64')\n origin[0] = j * dx + dx / 2\n origin[1] = i * dy + dy / 2\n origin[2] = 2\n return get_intersecting_particles(tree, positions, radius, origin, [0, 0, -1])\n\n\ndef get_intersecting_particles(tree, positions, radiuses, origin, direction):\n epsilon = 0.0001\n direction = np.array(direction, dtype='float64')\n origin = np.array(origin, dtype='float64')\n intersecting_particles = []\n # Find the point where the ray intersects the entire domain\n bounded_region_left = tree.left_edge\n bounded_region_right = tree.right_edge\n\n # Check if origin is inside domain\n if point_within_bounds(bounded_region_left, bounded_region_right, origin):\n intersection_point = origin\n else:\n ray_box_intersection = line_box_intersect(bounded_region_left,\n bounded_region_right,\n origin,\n direction)\n\n # If the intersection occurs at a negative t, the ray is pointing away\n # Return nothing\n if ray_box_intersection is None or ray_box_intersection[0] < 0:\n return []\n intersection_point = (ray_box_intersection[0] + epsilon) * direction + origin\n\n curr_node = tree.get(np.array(intersection_point))\n\n while True:\n # check particle intersections\n node_particle_positions = positions[tree.idx[curr_node.slice]]\n node_particle_radiuses = radiuses[tree.idx[curr_node.slice]]\n\n # Check leaf node for particle intersections\n for i in range(len(node_particle_positions)):\n if line_sphere_intersect(node_particle_positions[i],\n node_particle_radiuses[i],\n origin,\n direction):\n intersecting_particles.append(node_particle_positions[i])\n\n # Search for next leaf node\n ray_box_intersection = line_box_intersect(curr_node.left_edge,\n curr_node.right_edge,\n origin,\n direction)\n intersection_point = (ray_box_intersection[1] + epsilon) * \\\n direction + origin\n\n if not point_within_bounds(bounded_region_left, bounded_region_right, intersection_point):\n # sort by z-coordinate\n intersecting_particles.sort(key=lambda x: get_point_line_intersection(x, origin, direction))\n return intersecting_particles\n\n curr_node = tree.get(np.array(intersection_point))\n\n\ndef test():\n resolution = [256] * 2\n kdtreeRayTrace(np.random.random((int(1e6), 3)),\n np.random.random(int(1e6)),\n [0., 0., 0.], [1., 1., 1.],\n resolution, [0.5, 0.5, 2.])\n\n\ndef test_get_line():\n print(get_point_line_intersection([1, 1, 9.328752967925], [0, 0, 0], [0, 0, 1]))\n\n\ndef test_ray_box():\n left_edge = [0.49990015,\n 0.49983722,\n 0.94301857]\n right_edge = [0.52996934,\n 0.52093917,\n 0.96627477]\n print(line_box_intersect(left_edge, right_edge,\n np.array([0.5, 0.5, 2]),\n np.array([0, 0, -1])))\n\n\nif __name__ == \"__main__\":\n test()","sub_path":"NaiveRayTracer/KDRayTracer.py","file_name":"KDRayTracer.py","file_ext":"py","file_size_in_byte":6924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"471301456","text":"import unittest\nimport tkinter as tk\nfrom BaseTest import *\n\nclass TestsPanelTests(BaseTest):\n def setUp(self):\n BaseTest.setUp(self)\n self.app = MainPanel(self.root, self)\n self.panel = self.app.tests_panel\n\n def test_AddTestGroup(self):\n assert(len(self.panel.test_groups) == 0)\n for i in range(10):\n self.panel.add_test_group()\n attr = self.panel.test_groups[-1].attr\n assert(attr.name.get() == \"Group \" + str(i + 1))\n\n assert(len(self.panel.test_groups) == 10)\n\n def test_ClearPanel(self):\n for i in range(10):\n self.panel.add_test_group()\n\n assert(len(self.panel.test_groups) == 10)\n self.panel.clear_panel()\n assert(len(self.panel.test_groups) == 0)\n\n def test_Serialize(self):\n assert(self.panel.serialize() == [])\n self.panel.add_test_group()\n group = self.panel.test_groups[0]\n assert(self.panel.serialize() == [group.serialize()])\n\n # This ends up testing TestGroup more\n def test_LoadAndCloseOtherGroups(self):\n s = [{'name': 'TestGroup1', 'is_public': False, 'valgrind': True, 'tests': []},\n {'name': 'TestGroup2', 'is_public': True, 'vaglrind': False, 'tests': []}]\n self.panel.load(s)\n assert(len(self.panel.test_groups) == 2)\n\n group1 = self.panel.test_groups[0]\n group2 = self.panel.test_groups[1]\n\n assert(group1.attr.name.get() == 'TestGroup1' and \\\n group2.attr.name.get() == 'TestGroup2')\n assert(not group1.attr.is_public and group1.attr.run_valgrind)\n assert(group2.attr.is_public and not group2.attr.run_valgrind)\n assert(group1.attr.tests == group2.attr.tests == [])\n assert(group2.is_open and not group1.is_open)\n\n self.panel.close_other_groups(group1)\n assert(not group1.is_open and not group2.is_open)","sub_path":"Tests/TestsPanelTests.py","file_name":"TestsPanelTests.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"90034167","text":"import requests\nimport os\nfrom twilio.rest import Client\n\nOWM_ENDPOINT = \"https://api.openweathermap.org/data/2.5/onecall\"\n\n# Your Account Sid and Auth Token from twilio.com/console\n# and set the environment variables. See http://twil.io/secure\naccount_sid = os.environ.get(\"SID\")\nauth_token = os.environ.get(\"AUTH_TOKEN\")\n\n# https://www.ventusky.com - precipitation - data to test code find area with heavy rain\n# https://www.latlong.net/ - find lat lon of place where it is currently raining\n\n# FUKUOKA JP LAT LON\n# lat = 33.590355\n# lon = 130.401718\n\n# LONDON UK LAT LON\n# \"lat\": 51.507351,\n# \"lon\": -0.127758,\n\nweather_params = {\n \"lat\": 51.507351,\n \"lon\": -0.127758,\n \"appid\": os.environ.get(\"API_KEY\"),\n \"exclude\": \"current,minutely,daily\"\n}\n\nresponse = requests.get(OWM_ENDPOINT, params=weather_params)\nresponse.raise_for_status()\nweather_data = response.json()\n\n# slice list\nhourly_weather_data = weather_data[\"hourly\"][:12]\n\n# loop solution\nwill_rain = False\nhourly_weather_data = hourly_weather_data\nmessage_to_send = \"\"\nfor hour in range(0, 12):\n weather_id = int(hourly_weather_data[hour][\"weather\"][0][\"id\"])\n if weather_id < 700:\n message_to_send += f\"Bring umbrella - rain @ {hour + 8} Hours\\n\"\n will_rain = True\n\nif will_rain:\n client = Client(account_sid, auth_token)\n\n message = client.messages \\\n .create(\n body=message_to_send,\n from_=\"\",\n to=\"\"\n )\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"643460623","text":"import torch\nfrom torch import flatten\nimport torch.nn.functional as F\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.nn import CrossEntropyLoss\nfrom src.train_base import *\n\n\n# training type 설정해서 -> Trainer 가져옴\ndef get_trainer(config,args, device, data_loader, log_writer, type):\n return Trainer(config, args, device, data_loader, log_writer, type=type)\n\n\ndef get_optimizer(model, config, args_optim):\n if args_optim == \"adam\":\n return torch.optim.Adam(model.parameters(), lr=1e-05, weight_decay=config.train.weight_decay)\n\n if args_optim == \"adadelta\":\n return torch.optim.Adadelta(model.parameters(), lr=1, rho=1e-06, weight_decay=config.train.weight_decay)\n\ndef get_lr_scheduler(optimizer):\n #return torch.optim.lr_scheduler.LambdaLR(optimizer=optimizer, lr_lambda = lambda epoch: 0.95**epoch, last_epoch=-1, verbose=False)\n return torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer=optimizer, T_0=10, T_mult=1, eta_min=0.001, last_epoch=-1)\n\nclass Trainer:\n def __init__(self,config, args, device, data_loader, log_writer, type):\n self.config = config\n self.args = args\n self.pretrained_weights_path = config.data_info[args.dataset].weights\n \n self.device = device\n self.data_loader = data_loader\n self.log_writer = log_writer\n self.type = type\n\n self.loss_function = CrossEntropyLoss()\n self.global_step = 0\n\n def init_optimizer(self, optimizer):\n self.optimizer = optimizer\n\n def init_lr_schedule(self, scheduler):\n self.scheduler = scheduler\n\n def train_epoch(self, model, epoch, global_step=None):\n if self.type==\"train\":\n model.train()\n else:\n model.eval()\n\n model.to(self.device)\n loss_save = list()\n\n for data in tqdm(self.data_loader, desc='Epoch: {}'.format(epoch)):\n input_data = data[0].to(device=torch.device(self.device))\n label = data[1].long()\n label = label.to(device=torch.device(self.device))\n y = model.forward(input_data) # (batch_size, n_classes)\n loss = self.loss_function(y, label)\n\n if self.type ==\"train\":\n self.optim_process(model, loss)\n self.global_step += 1\n self.write_tb(loss, global_step)\n else:\n loss_save.append(loss.item())\n\n self.evaluator = self.evaluate(y, label)\n\n if self.type != 'train':\n loss = sum(loss_save)/len(loss_save)\n self.write_tb(loss, global_step)\n return loss\n\n def optim_process(self, model, loss):\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), self.config.train.clip)\n self.optimizer.step()\n self.scheduler.step()\n\n def write_tb(self, loss, global_step):\n if self.type =='train':\n lr = self.optimizer.param_groups[0][\"lr\"]\n self.log_writer.add_scalar(\"train/loss\", loss, self.global_step)\n self.log_writer.add_scalar(\"train/lr\", lr, self.global_step)\n \n else:\n self.log_writer.add_scalar(\"valid/loss\", loss, global_step)\n\n def evaluate(self, y, label):\n a = torch.argmax(F.log_softmax(y, dim=1), dim=1)\n accuracy = len(a[a == label])/len(a)*100\n return accuracy\n \n\n\ndef get_data_loader(config=None, dataset=None,data_type=\"train\", shuffle=True,workers=10, drop_last=True):\n batch_size = config.train.batch_size\n dataset = Make_Dataset(config.path_preprocessed, dataset, data_type)\n data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=workers, drop_last=drop_last, collate_fn = padded_seq)\n #data_loader = DataLoader(dataset, batch_size = batch_size, shuffle=shuffle, num_workers=workers, drop_last=drop_last)\n return data_loader\n\ndef padded_seq(samples):\n def padd(samples):\n length = [len(s) for s in samples]\n max_length = max(length)\n batch = torch.zeros(len(samples),max_length).to(torch.long)\n for idx, sample in enumerate(samples):\n batch[idx, :length[idx]] = torch.LongTensor(sample)\n return batch\n data = []\n label = []\n for sample in samples:\n data.append(sample[\"data\"])\n label.append(sample[\"label\"])\n data = padd(data)\n data = torch.LongTensor(data)\n label = torch.Tensor(label)\n return data, label\n\nclass Make_Dataset(Dataset):\n def __init__(self, file_path, dataset, data_type):\n data = torch.load(file_path)\n self.dataset = data[dataset]\n \n if data_type ==\"train\":\n self.data = self.dataset[\"train\"]\n self.data_label = self.dataset[\"train_label\"]\n assert len(self.data) == len(self.data_label)\n elif data_type ==\"test\":\n self.data = self.dataset[\"test\"]\n self.data_label = self.dataset[\"test_label\"]\n assert len(self.data) == len(self.data_label)\n else:\n self.data = self.dataset[\"dev\"]\n self.data_label = self.dataset[\"dev_label\"]\n assert len(self.data) == len(self.data_label)\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n \n ret_dict = dict()\n ret_dict[\"data\"] = self.data[idx]\n ret_dict[\"label\"] = self.data_label[idx]\n\n return ret_dict\n\n\n# import torch\n# dataset = torch.load(\"/data/user15/workspace/CNN/data/preprocessed/preprocessed.pkl\")\n# dataset[\"MR\"][\"train\"][0]","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"117032306","text":"\"\"\"\n\n删除链表中等于给定值 val 的所有节点。\n\n示例:\n\n输入: 1->2->6->3->4->5->6, val = 6\n输出: 1->2->3->4->5\n\n\"\"\"\n\n\n# definition a singly-linked list\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\n# 递归的方式\nclass Solution:\n def removeElements(self, head, val):\n if head:\n head.next = self.removeElements(head.next ,val)\n return head.next if head and head.val == val else head\n\n\nclass Solution2(object):\n def removeElements(self, head, val):\n \"\"\"\n :type head: ListNode\n :type val: int\n :rtype: ListNode\n \"\"\"\n p = ListNode(-1)\n # 因为要删除的可能是链表的第一个元素,所以用一个h节点来做处理\n # 最后只要返回h的下一个节点即可\n p.next, h = head, p\n # 注意遍历的条件是p.next不为空\n while p.next:\n # 如果p的下一个节点的值==val\n # P就指向下下一个,这就删掉了指定的节点\n if p.next.val == val:\n p.next = p.next.next\n # 注意这里的continue\n # 因为循环最后还有一个P=p.next,所以要跳过\n continue\n # 不用continue用else的方式也是可以的\n p = p.next\n\n return h.next\n\n","sub_path":"链表/203-移除链表元素.py","file_name":"203-移除链表元素.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"585091925","text":"from keras.models import load_model\nfrom keras.models import model_from_json\nimport sys\nimport cv2\n\nans=['可愛','性感','氣質','陽光']\n\nmodel = load_model('model.h5')\nprint(\"Loaded model from disk\")\n\npath=sys.argv[2]\n\ndef get_result(ImgArray):\n ImgArray = cv2.resize(ImgArray,(64,64))\n ImgArray = ImgArray.astype('float32')\n ImgArray = ImgArray/255\n ImgArray = ImgArray.reshape(1, 64, 64, 3)\n predicted_classes = model.predict_classes(ImgArray)\n return predicted_classes\n\nimage=cv2.imread(path)\nresult=get_result(image)\nprint(ans[result[0]-1])","sub_path":"detect_picture.py","file_name":"detect_picture.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"117316816","text":"#!/usr/bin/env python\n\n\"\"\" Tests for sfjson\nYou must set the sf_jid and sf_token environment variables to run these tests.\n\"\"\"\n\nimport os\nimport unittest\nfrom sfjson import Superfeedr\nfrom xml.etree import cElementTree as Element\nfrom sleekxmpp.xmlstream import StanzaBase\n\nimport logging\nlogging.basicConfig(level=logging.INFO,\n format='[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d] %(message)s')\n\n\nclass SuperfeedrJSONTest(unittest.TestCase):\n\n jid = None\n token = None\n sf_client = None\n\n @classmethod\n def setUpClass(cls):\n cls.jid = os.environ.get('sf_jid')\n cls.token = os.environ.get('sf_token')\n cls.sf_client = Superfeedr(cls.jid, cls.token)\n\n @classmethod\n def tearDownClass(cls):\n cls.sf_client.disconnect()\n\n def test_connected(self):\n assert self.sf_client.success\n\n def test_subscribe(self):\n url = 'http://superfeedr.com/track?include=iacquire'\n\n result = self.sf_client.subscribe([url])\n\n assert result[0]['subscription']['feed']['url'] == url\n\n def test_list(self):\n url = 'http://superfeedr.com/track?include=iacquire'\n\n self.sf_client.subscribe([url])\n\n feeds = self.sf_client.list()\n\n assert 'http://superfeedr.com/track?include=iacquire' in feeds\n\n def test_unsubscribe(self):\n assert self.sf_client.unsubscribe('http://superfeedr.com/track?include=iacquire')\n assert self.sf_client.unsubscribe('http://www.iacquire.com/feed/')\n\n def test_message_parse(self):\n\n expected_title = 'iPad Air : une grosse autonomie, mais pas la plus grosse'\n\n with open('sfjson_msg.xml', 'rb') as f:\n xml = Element.fromstring(f.read())\n stanza = StanzaBase(xml=xml)\n result = self.sf_client.superfeedr_msg(stanza)\n\n assert result['status']['feed'] == 'http://superfeedr.com/track?include=apple'\n assert result['items'][0]['title'] == expected_title\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"sfjson_test.py","file_name":"sfjson_test.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"424144006","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n####################################################################################################\n# Name: CV Process Testing\n# Author: Jorden Allcock\n#\n# Description: File is responsible for:\n#\n# - Mesuring Size of Files prior to running python script\n#\t- Initiating the CV Process operation to strip characters and thus reduce file size\n#\t- Calculate difference of file sizes before and after processing\n#\t- Output file size, positive value shows a reduction in file size\n####################################################################################################\n\n###################################################################################\n# Required imports\n###################################################################################\n\nimport os # To enable file browsing of local directory\nimport sys\nimport datetime\t\t\t\t# Required to show start/end datetime of processing\nsys.path.append('..') # Required to import scripts from parent folder\nimport collections\nimport cv_process\n\n###################################################################################\n# Getting initial CV File Size\n###################################################################################\n\nlogfile = open('/home/jallcock/environments/Final-Project/testing/cv_reduction_testing_log.txt', 'a')\n\nlogfile.write('Starting processing at ' + str(datetime.datetime.now()) + '\\n\\n')\n\ncv_directory = '/home/jallcock/environments/Final-Project/stored_cvs'\ncv_size_difference = {}\n\nif os.listdir(cv_directory) == []:\n logfile.write('Directory empty - unable to process CVs')\nelse:\n for file in os.listdir(cv_directory):\n if os.path.getsize(os.path.join(cv_directory, file)) > 0:\n cv_size_difference[file] = os.path.getsize(os.path.join(cv_directory, file))\n logfile.write('File size of [' + file + '] before processing - ' + str(os.path.getsize(os.path.join(cv_directory, file))) + '\\n')\n\n logfile.write('\\nInitial CV Size Computed')\n\n logfile.write('\\n\\n')\n\n###################################################################################\n# Initiating CV Process script\n###################################################################################\n\n logfile.write('Running CV Process script\\n')\n\n cv_process.main(cv_directory)\n\n logfile.write('Completed processing of CVs\\n')\n\n###################################################################################\n# Checking contents of CV Directory after CV Download\n###################################################################################\n\n#if os.listdir(cv_directory) == []:\n for file in os.listdir(cv_directory):\n if os.path.getsize(os.path.join(cv_directory, file)) > 0:\n cv_size_difference[file] = cv_size_difference[file] - os.path.getsize(os.path.join(cv_directory, file))\n logfile.write('File size of [' + file + '] after processing - ' + str(os.path.getsize(os.path.join(cv_directory, file))) + '\\n')\n else:\n logfile.write('************************' + file + ' IS ZERO SIZED************************' + '\\n')\n#else:\n# logfile.write('Files not written to directory\\n')\n\n###################################################################################\n# Outputting Difference in File Size\n###################################################################################\n\nfor key in cv_size_difference:\n logfile.write('Difference in file size for [' + key + '] is ' + str(cv_size_difference[key]) + '\\n')\n\nlogfile.write('Finished processing at ' + str(datetime.datetime.now()) + '\\n')\n\nlogfile.close()\n","sub_path":"testing/cv_reduction_testing.py","file_name":"cv_reduction_testing.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"5375275","text":"import numpy as np\nimport tensorflow as tf\n\nfrom mlscanner.splitchar.char_trainer import FEATURES, model_file\n\n\nclass CharInterpreter:\n def __init__(self):\n self.model = tf.keras.models.load_model(model_file)\n\n def predict(self, x):\n predictions = self.model.predict(x, 32)\n # print('prediction', predictions)\n letters = \"\"\n scores = []\n for prediction in predictions:\n n = np.argmax(prediction)\n letters += FEATURES[n]\n scores.append(FEATURES[n] + \": \" + str(prediction[n]))\n print(scores)\n return letters\n","sub_path":"mlscanner/splitchar/char_interpreter.py","file_name":"char_interpreter.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"617006409","text":"\"\"\"\nUnit test execute as:\npython $CINGROOT/python/cing/Scripts/test/test_DoScriptOnEntryList.py\n\"\"\"\nfrom cing import cingDirScripts\nfrom cing import cingDirTmp\nfrom cing.Libs.NTutils import * #@UnusedWildImport\nfrom cing.Scripts.doScriptOnEntryList import doFunctionOnEntryList\nfrom cing.Scripts.doScriptOnEntryList import doScriptOnEntryList\nfrom cing.Scripts.validateEntry import ARCHIVE_TYPE_BY_CH23\nfrom cing.Scripts.validateEntry import PROJECT_TYPE_PDB\nfrom unittest import TestCase\nimport unittest\n\ndef sleepy(sleepTime, bogusArgumentList = [] ):\n nTdebug(\"Will sleep for %s ignoring bogusArgumentList: %s\" % (sleepTime, str(bogusArgumentList)))\n time.sleep(float(sleepTime))\n# end def\n\nclass AllChecks(TestCase):\n\n def test_DoScriptOnEntryList(self):\n\n cingDirTmpTest = os.path.join( cingDirTmp, getCallerName() )\n mkdirs( cingDirTmpTest )\n self.failIf(os.chdir(cingDirTmpTest), msg =\n \"Failed to change to test directory for files: \" + cingDirTmpTest)\n entryListFileName = \"entry_list_todo.csv\"\n entry_list_todo = [ 0,1,2,3,4,5,6,7,8,9 ]\n writeTextToFile(entryListFileName, toCsv(entry_list_todo))\n\n pythonScriptFileName = os.path.join(cingDirScripts, 'doNothing.py')\n extraArgList = ('.', '.', '.', '.', ARCHIVE_TYPE_BY_CH23, PROJECT_TYPE_PDB)\n\n self.assertFalse(\n doScriptOnEntryList(pythonScriptFileName,\n entryListFileName,\n '.',\n processes_max = 8,\n delay_between_submitting_jobs = 5,\n max_time_to_wait = 20,\n start_entry_id = 0,\n max_entries_todo = 1,\n extraArgList = extraArgList,\n shuffleBeforeSelecting = True ))\n # end def\n \n def test_DoFunctionOnEntryList(self):\n cingDirTmpTest = os.path.join( cingDirTmp, getCallerName() )\n mkdirs( cingDirTmpTest )\n self.failIf(os.chdir(cingDirTmpTest), msg =\n \"Failed to change to test directory for files: \" + cingDirTmpTest)\n entryListFileName = 'entryListFileName.csv'\n writeTextToFile(entryListFileName, '\\n'.join('0.1 0.2'.split()))\n doFunctionOnEntryList(sleepy, entryListFileName) \n # end def\n\nif __name__ == \"__main__\":\n cing.verbosity = verbosityDebug\n unittest.main()\n","sub_path":"python/cing/Scripts/test/test_DoScriptOnEntryList.py","file_name":"test_DoScriptOnEntryList.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"387871907","text":"import pytest\nimport task_5_4\nimport sys\nsys.path.append('..')\n\nfrom common_functions import check_class_exists, check_attr_or_method\n\n\ndef test_class_created():\n '''Проверяем, что класс создан'''\n check_class_exists(task_5_4, 'IPv4Network')\n\n\ndef test_attributes_created():\n '''\n Проверяем, что у объекта есть атрибуты:\n address, mask, broadcast, allocated, unassigned\n '''\n net = task_5_4.IPv4Network('100.7.1.0/26')\n check_attr_or_method(net, attr='network')\n check_attr_or_method(net, attr='broadcast')\n check_attr_or_method(net, attr='allocated')\n check_attr_or_method(net, attr='unassigned')\n\ndef test_method_created():\n '''\n Проверяем, что у объекта есть метод:\n allocate\n '''\n net = task_5_4.IPv4Network('100.7.1.0/26')\n check_attr_or_method(net, method='allocate')\n\n\ndef test_returned_types():\n '''Проверяем работу объекта'''\n net = task_5_4.IPv4Network('100.7.1.0/26')\n assert net.allocated == [], \"По умолчанию allocated должен содержать пустой список\"\n assert isinstance(net.unassigned, list), \"unassigned должен содержать список IP-адресов\"\n assert type(net.hosts) == list, \"Метод hosts должен возвращать список\"\n\n\ndef test_address_allocation():\n '''Проверяем работу объекта'''\n net = task_5_4.IPv4Network('100.7.1.0/26')\n assert len(net.hosts) == 62, \"В данной сети должно быть 62 хоста\"\n assert net.broadcast == '100.7.1.63', \"Broadcast адрес для этой сети 100.7.1.63\"\n\n net.allocate('100.7.1.45')\n net.allocate('100.7.1.15')\n net.allocate('100.7.1.60')\n\n assert len(net.hosts) == 62, \"Метод hosts должен возвращать все хосты\"\n assert len(net.allocated) == 3, \"Переменная allocated должна содержать 3 хоста\"\n assert len(net.unassigned) == 59, \"Метод unassigned должен возвращать на 3 хоста меньше\"\n\n\n","sub_path":"exercises/05_data_classes/tests/test_task_5_4.py","file_name":"test_task_5_4.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"461802240","text":"\nfrom flask import render_template\nfrom flask import request\nfrom flask import abort, redirect, url_for\n\nimport markdown\nfrom mdserver import utils\n\nimport os\n\n#flask \n\nfrom flask import Flask\napp = Flask(__name__)\napp.debug = True\n\n@app.route(\"/\")\n#localhost:5000 is home page\ndef home():\n return render_template('layout.html' ,body = utils.md_to_html('mdfiles/home.md'), doc ='test')\n\n\n@app.route(\"/register\", methods=['POST', 'GET'])\n# register button clicked\ndef register():\n\tif request.method == 'POST':\n\t\tname = request.form['txtName'] + '\\n'\n\t\temail = request.form['txtEmail'] + '\\n'\n\t\tphone = request.form['txtPhone'] + '\\n'\n\t\tcomments = request.form['txtComment']\n\t\twith open('mdfiles/backend','w+') as f:\n\t\t\tf.write(name)\n\t\t\tf.write(email)\n\t\t\tf.write(phone)\n\t\t\tf.write(comments)\n\t\treturn redirect('/register')\n\telse:\n\t\twith open('mdfiles/backend','r') as f:\n\t\t\tname = f.readline()\n\t\t\temail = f.readline()\n\t\t\tphone = f.readline()\n\t\t\tcomment = f.read()\n\t\treturn render_template('register.html', name=name, email=email, phone=phone, comments=comment)\n\n@app.route(\"/about\")\n# user select about page\ndef about():\n\treturn render_template('layout.html' ,body = utils.md_to_html('mdfiles/about.md'))\n\n@app.route(\"/test\")\n# testing\ndef test():\n\twith open('bin/templates/register.html') as f:\n\t\tcontent = f.read()\n\treturn render_template('layout.html' ,body = content)\n\n\n\n@app.route(\"/<doc>\")\n#localhost:5000/test\ndef folder_serve(doc):\n\tpath = os.path.join('mdfiles',doc)\n\tfile_path = os.path.join(path,'index.md')\n\treturn render_template('layout.html', body = utils.md_to_html(file_path),doc=doc)\n\n@app.route('/hello/<name>')\n#localhost:5000/hello/test\ndef hello(name=None):\n return render_template('hello.html', name=name)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"mdserver/mdserver/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"632853803","text":"import math\nimport statistics\nimport warnings\n\nimport numpy as np\nfrom hmmlearn.hmm import GaussianHMM\nfrom sklearn.model_selection import KFold\nfrom asl_utils import combine_sequences\n\n\nclass ModelSelector(object):\n '''\n base class for model selection (strategy design pattern)\n '''\n\n def __init__(self, all_word_sequences: dict, all_word_Xlengths: dict, this_word: str,\n n_constant=3,\n min_n_components=2, max_n_components=10,\n random_state=14, verbose=False):\n self.words = all_word_sequences\n self.hwords = all_word_Xlengths\n self.sequences = all_word_sequences[this_word]\n self.X, self.lengths = all_word_Xlengths[this_word]\n self.this_word = this_word\n self.n_constant = n_constant\n self.min_n_components = min_n_components\n self.max_n_components = max_n_components\n self.random_state = random_state\n self.verbose = verbose\n\n def select(self):\n raise NotImplementedError\n\n def base_model(self, num_states):\n # with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n # warnings.filterwarnings(\"ignore\", category=RuntimeWarning)\n try:\n hmm_model = GaussianHMM(n_components=num_states, covariance_type=\"diag\", n_iter=1000,\n random_state=self.random_state, verbose=False).fit(self.X, self.lengths)\n if self.verbose:\n print(\"model created for {} with {} states\".format(self.this_word, num_states))\n return hmm_model\n except:\n if self.verbose:\n print(\"failure on {} with {} states\".format(self.this_word, num_states))\n return None\n\n\nclass SelectorConstant(ModelSelector):\n \"\"\" select the model with value self.n_constant\n\n \"\"\"\n\n def select(self):\n \"\"\" select based on n_constant value\n\n :return: GaussianHMM object\n \"\"\"\n best_num_components = self.n_constant\n return self.base_model(best_num_components)\n\n\nclass SelectorBIC(ModelSelector):\n \"\"\" select the model with the lowest Bayesian Information Criterion(BIC) score\n\n http://www2.imm.dtu.dk/courses/02433/doc/ch6_slides.pdf\n Bayesian information criteria: BIC = -2 * logL + p * logN\n \"\"\"\n\n def select(self):\n \"\"\" select the best model for self.this_word based on\n BIC score for n between self.min_n_components and self.max_n_components\n\n :return: GaussianHMM object\n \"\"\"\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n bic_scores = []\n \"\"\"\n N is the number of data points, f is the number of features:\n\n N, f = self.X.shape\n\n Having m as the num_components, The free parameters p are a sum of:\n\n The free transition probability parameters, which is the size of the transmat matrix less one row because they add up to 1 and therefore the final row is deterministic, so m*(m-1)\n The free starting probabilities, which is the size of startprob minus 1 because it adds to 1.0 and last one can be calculated so m-1\n The number of means, which is m*f\n Number of covariances which is the size of the covars matrix, which for \"diag\" is m*f\n All of the above is equal to:\n\n p = m^2 +2mf-1\n\n Finally, the BIC equation is:\n\n BIC = -2 * logL + p * logN\n \"\"\"\n try:\n n_components = range(self.min_n_components, self.max_n_components + 1)\n for num_states in n_components:\n model = self.base_model(num_states)\n log_l = model.score(self.X, self.lengths)\n p = num_states ** 2 + 2 * num_states * model.n_features - 1\n bic_score = -2 * log_l + p * math.log(num_states)\n bic_scores.append(bic_score)\n except Exception as e:\n pass\n\n states = n_components[np.argmin(bic_scores)] if bic_scores else self.n_constant\n return self.base_model(states)\n\n\nclass SelectorDIC(ModelSelector):\n ''' \n Abbr.\n - DIC - Discriminative Information Criterion\n\n Equation.\n - DIC = log(P(X(i)) - 1/(M-1)SUM(log(P(X(all but i))\n\n select best model based on Discriminative Information Criterion\n\n Biem, Alain. \"A model selection criterion for classification: Application to hmm topology optimization.\"\n Document Analysis and Recognition, 2003. Proceedings. Seventh International Conference on. IEEE, 2003.\n http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.58.6208&rep=rep1&type=pdf\n https://pdfs.semanticscholar.org/ed3d/7c4a5f607201f3848d4c02dd9ba17c791fc2.pdf\n DIC = log(P(X(i)) - 1/(M-1)SUM(log(P(X(all but i))\n '''\n\n def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n models = []\n dic_scores = []\n other_words = []\n\n for word in self.words:\n if word != self.this_word:\n other_words.append(self.hwords[word])\n try:\n for num_states in range(self.min_n_components, self.max_n_components + 1):\n model = self.base_model(num_states)\n word_log_p = model.score(self.X, self.lengths)\n models.append([word_log_p, model])\n\n except Exception as e:\n pass\n\n for model in models:\n word_log_p, hmm_model = model\n \n # equal to 1/(M-1)SUM(log(P(X(all but i))\n anti_log_p = np.mean([hmm_model.score(word[0], word[1]) for word in other_words])\n \n dic_score = word_log_p - anti_log_p \n dic_scores.append([dic_score, hmm_model])\n\n best_dic = max(dic_scores, key = lambda x: x[0])[1] if dic_scores else None\n return best_dic\n\n\nclass SelectorCV(ModelSelector):\n ''' select best model based on average log Likelihood of cross-validation folds\n\n '''\n\n def select(self):\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n cv_scores = []\n kf = KFold(n_splits = 3, shuffle = False, random_state = None)\n\n for num_states in range(self.min_n_components, self.max_n_components + 1):\n log_ls = []\n try:\n if len(self.sequences) > 2:\n for train_index, test_index in kf.split(self.sequences):\n\n self.X, self.lengths = combine_sequences(train_index, self.sequences)\n X_test, lengths_test = combine_sequences(test_index, self.sequences)\n\n hmm_model = self.base_model(num_states)\n log_l = hmm_model.score(X_test, lengths_test)\n else:\n hmm_model = self.base_model(num_states)\n log_l = hmm_model.score(self.X, self.lengths)\n\n log_ls.append(log_l)\n cv_scores.append([np.mean(log_ls), hmm_model])\n\n except Exception as e:\n pass\n\n best_cv = max(cv_scores, key = lambda x: x[0])[1] if cv_scores else None\n return best_cv\n","sub_path":"my_model_selectors.py","file_name":"my_model_selectors.py","file_ext":"py","file_size_in_byte":7124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"398709880","text":"###############################################################################\r\n# Author: Wasi Ahmad\r\n# Project: Deep Semantic Similarity Model\r\n# Date Created: 7/18/2017\r\n#\r\n# File Description: This script provides a definition of the corpus, each\r\n# example in the corpus and the dictionary.\r\n###############################################################################\r\n\r\nimport os, helper, json\r\nfrom collections import Counter\r\n\r\n\r\nclass Dictionary(object):\r\n def __init__(self):\r\n self.word2idx = {}\r\n self.idx2word = []\r\n\r\n def build_dict(self, sample, max_words):\r\n word_count = Counter()\r\n for query in sample.data:\r\n word_count.update(query.query_terms)\r\n for doc in query.rel_docs:\r\n word_count.update(doc.title)\r\n\r\n most_common = word_count.most_common(max_words) if max_words > 0 else word_count.most_common()\r\n for (index, w) in enumerate(most_common):\r\n self.idx2word.append(w[0])\r\n self.word2idx[w[0]] = len(self.idx2word) - 1\r\n\r\n def __len__(self):\r\n return len(self.idx2word)\r\n\r\n\r\nclass Document(object):\r\n def __init__(self):\r\n self.title = []\r\n self.is_clicked = False\r\n\r\n def add_content(self, text, tokenize):\r\n content_terms = helper.tokenize(text, tokenize)\r\n for i in range(len(content_terms)):\r\n term = '#' + content_terms[i] + '#'\r\n for j in range(0, len(term) - 2):\r\n self.title.append(term[j:j + 3])\r\n\r\n def set_clicked(self):\r\n self.is_clicked = True\r\n\r\n\r\nclass Query(object):\r\n def __init__(self):\r\n self.query_terms = []\r\n self.rel_docs = []\r\n\r\n def add_text(self, text, tokenize):\r\n content_terms = helper.tokenize(text, tokenize)\r\n for i in range(len(content_terms)):\r\n term = '#' + content_terms[i] + '#'\r\n for j in range(0, len(term) - 2):\r\n self.query_terms.append(term[j:j + 3])\r\n\r\n def add_document(self, document):\r\n self.rel_docs.append(document)\r\n\r\n\r\nclass Corpus(object):\r\n def __init__(self, _tokenize):\r\n self.tokenize = _tokenize\r\n self.data = []\r\n\r\n def parse(self, in_file, max_example):\r\n \"\"\"Parses the content of a file.\"\"\"\r\n assert os.path.exists(in_file)\r\n\r\n with open(in_file, 'r') as f:\r\n for line in f:\r\n session = json.loads(line)\r\n assert len(session['query']) == len(session['clicks'])\r\n for qidx in range(len(session['query'])):\r\n query = Query()\r\n query.add_text(session['query'][qidx][0], self.tokenize)\r\n for i in range(len(session['clicks'][qidx])):\r\n doc = Document()\r\n doc_title = session['clicks'][qidx][i][1]\r\n doc_label = session['clicks'][qidx][i][2]\r\n doc.add_content(doc_title, self.tokenize)\r\n if int(doc_label) == 1:\r\n doc.set_clicked()\r\n query.add_document(doc)\r\n self.data.append(query)\r\n if len(self.data) == max_example:\r\n return\r\n","sub_path":"ranking_baselines/DSSM/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"393826407","text":"\nimport os,glob,pathlib\nfrom scratch.sam.util import *\nimport itertools\nfrom sklearn.linear_model import LinearRegression\n\n# Shamelessly stolen from stack overflow\ndef sorted_k_partitions(seq, k):\n \"\"\"Returns a list of all unique k-partitions of `seq`.\n\n Each partition is a list of parts, and each part is a tuple.\n\n The parts in each individual partition will be sorted in shortlex\n order (i.e., by length first, then lexicographically).\n\n The overall list of partitions will then be sorted by the length\n of their first part, the length of their second part, ...,\n the length of their last part, and then lexicographically.\n \"\"\"\n n = len(seq)\n groups = [] # a list of lists, currently empty\n\n def generate_partitions(i):\n if i >= n:\n yield list(map(tuple, groups))\n else:\n if n - i > k - len(groups):\n for group in groups:\n group.append(seq[i])\n yield from generate_partitions(i + 1)\n group.pop()\n\n if len(groups) < k:\n groups.append([seq[i]])\n yield from generate_partitions(i + 1)\n groups.pop()\n\n result = generate_partitions(0)\n\n # Sort the parts in each partition in shortlex order\n result = [sorted(ps, key = lambda p: (len(p), p)) for ps in result]\n # Sort partitions by the length of each part, then lexicographically.\n result = sorted(result, key = lambda ps: (*map(len, ps), ps))\n\n return result\n\n\ndef get_mse(reg, feat, energies):\n pred = reg.predict(feat)\n mse = np.mean((energies - pred)**2)\n\n return mse\n\n# idx is a list of n_merge (k) sets of indices (each set is a new feature)\ndef gen_feat(feat_vec, indices, sum_vec=None):\n\n if sum_vec is None:\n sum_vec = [np.ones(len(idx)) for idx in indices]\n \n n_merge = len(indices)\n\n new_feat = np.zeros((feat_vec.shape[0], n_merge))\n\n for i, (idx, s) in enumerate(zip(indices, sum_vec)):\n new_feat[:,i] = np.dot(feat_vec[:,idx], s)\n\n\n return new_feat\n\n\ndef gen_sum_vec(indices):\n sum_parts = [[] for s in indices]\n\n for i, idx in enumerate(indices):\n n_merge = len(idx)\n vec = np.ones(n_merge)\n if n_merge == 1:\n sum_parts[i].append(np.ones(1))\n for j in range(1, n_merge//2+1):\n sub_indices = list(itertools.combinations(np.arange(n_merge), j))\n\n for sub_idx in sub_indices:\n this_vec = vec.copy()\n this_vec[np.array(sub_idx)] = -1\n sum_parts[i].append(this_vec)\n\n sum_vec = np.zeros(len(indices), dtype=object)\n\n n_parts = [len(s) for s in sum_parts]\n\n\n\n#state = State(np.array([],dtype=int), 2, 2)\n#state.plot(do_annotate=True)\n#state.plot_edges()\n# noo=5\n# noe=14\nds = np.load(\"sam_data_p_06_q_06.dat.npz\", allow_pickle=True)\nstates = ds['states']\n\nn_dat = states.shape[0]\n\nenergies = ds['energies']\nerr_energies = ds['err_energies']\n\nfcs = ds['fcs']\nerr_fcs = ds['err_fcs']\n\nfeat_vec = np.zeros((n_dat, 18))\nfor i, state in enumerate(states):\n\n feat_vec[i] = state.k_o, state.n_oo, state.n_oe, *state.n_ooo, *state.n_ooe, *state.n_oee, *state.n_ccc, *state.n_cce, *state.n_cee\n\nperf_m3, err, reg_m3 = fit_leave_one(feat_vec[:,:3], energies)\n\nperf_m12, err, reg_m12 = fit_leave_one(feat_vec[:,:-3], energies)\n\nnames = np.array(['ko', 'noo', 'noe', \n 'nooo_c', 'nooo_b', 'nooo_e', 'nooe_c', 'nooe_b', 'nooe_e', 'noee_c', 'noee_b', 'noee_e',\n 'nccc_c', 'nccc_b', 'nccc_e', 'ncce_c', 'ncce_b', 'ncce_e', 'ncee_c', 'ncee_b', 'ncee_e'])\nreg = linear_model.LinearRegression()\n\n\nk_vals = np.arange(1,feat_vec.shape[1]+1)\n\nbest_perfs = np.zeros_like(k_vals).astype(float)\nbest_perfs_cv = np.zeros_like(best_perfs)\nbest_indices = np.zeros(k_vals.size, dtype=object)\n\nfor i, k in enumerate(k_vals):\n parts = sorted_k_partitions(np.arange(feat_vec.shape[1]), k)\n\n print(\"\\nk={}\".format(k))\n print(\" {} partitions\".format(len(parts)))\n\n best_perf = np.inf\n best_ind = None\n best_feat = None\n reg = linear_model.LinearRegression()\n\n for j, indices in enumerate(parts):\n if j % 10000 == 0:\n print(\"\\r {}\".format(j))\n new_feat = gen_feat(feat_vec, indices)\n reg.fit(new_feat, energies)\n\n mse = get_mse(reg, new_feat, energies)\n\n if mse < best_perf:\n best_perf = mse\n best_ind = indices\n best_feat = new_feat.copy()\n\n best_perfs[i] = best_perf\n best_indices[i] = best_ind\n\n perf, err, r = fit_leave_one(best_feat, energies)\n best_perfs_cv[i] = perf.mean()\n\n print(\" best perf: {:.2f} (tot: {:.2f})\".format(perf.mean(), best_perf))\n\nbar_idx = 3*k_vals\n\nplt.close('all')\nplt.bar(bar_idx, np.sqrt(best_perfs), width=1, label='total RMSE')\nplt.bar(bar_idx+1, np.sqrt(best_perfs_cv), width=1, label='CVd RMSE')\n\nxmin, xmax = plt.xlim()\nplt.plot([xmin, xmax], [np.sqrt(perf_m3.mean()), np.sqrt(perf_m3.mean())], 'r--', label='M3')\n\nplt.plot([xmin, xmax], [np.sqrt(perf_m12.mean()), np.sqrt(perf_m12.mean())], 'k--', label='M12')\n\nax = plt.gca()\nax.set_xticks(bar_idx+0.5)\nax.set_xticklabels(k_vals)\nax.set_ylim(1.9,4)\nplt.xlim(xmin, xmax)\nplt.legend()\n\n","sub_path":"scratch/sam/gen_figs/old_merge_coefs.py","file_name":"old_merge_coefs.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"126209517","text":"# zernike_tests.py\n\"\"\"\nTest suite for zernike.py\n\"\"\"\n\nfrom nose.tools import *\nfrom pyotf.zernike import *\nimport numpy as np\n\n\ndef test_degrees_input():\n \"\"\"Make sure an error is returned if n and m aren't seperated by two\"\"\"\n assert_raises(ValueError, degrees2noll, 1, 2)\n\n\ndef test_noll_input():\n \"\"\"Make sure an error is raised if noll isn't a positive integer\"\"\"\n assert_raises(ValueError, noll2degrees, 0)\n assert_raises(ValueError, noll2degrees, -1)\n\n\ndef test_integer_input():\n \"\"\"make sure degrees2noll and noll2degrees only accept integer inputs\"\"\"\n assert_raises(ValueError, noll2degrees, 2.5)\n assert_raises(ValueError, noll2degrees, 1.0)\n assert_raises(ValueError, degrees2noll, 1.0, 3.0)\n assert_raises(ValueError, degrees2noll, 1.5, 3.5)\n\n\ndef test_indices():\n \"\"\"Make sure that noll2degrees and degrees2noll are opposites of each\n other\"\"\"\n test_noll = np.random.randint(1, 36, 10)\n test_n, test_m = noll2degrees(test_noll)\n test_noll2 = degrees2noll(test_n, test_m)\n assert_true((test_noll == test_noll2).all(), \"{} != {}\".format(test_noll, test_noll2))\n\n\ndef test_n_lt_m():\n \"\"\"n must always be greater than or equal to m\"\"\"\n assert_raises(ValueError, zernike, 0.5, 0.0, 4, 5)\n\n\ndef test_forward_mapping():\n \"\"\"Make sure that the mapping from degrees to Noll's indices is correct\"\"\"\n # from https://en.wikipedia.org/wiki/Zernike_polynomials\n degrees = np.array(\n ((0, 0), (1, 1), (1, -1), (2, 0), (2, -2), (2, 2), (3, -1), (3, 1), (3, -3), (3, 3))\n )\n j = np.array((1, 2, 3, 4, 5, 6, 7, 8, 9, 10))\n n, m = degrees.T\n assert_true((degrees2noll(n, m) == j).all())\n\n\ndef test_reverse_mapping():\n \"\"\"Make sure that the mapping from Noll's indices to degrees is correct\"\"\"\n # from https://en.wikipedia.org/wiki/Zernike_polynomials\n degrees = np.array(\n ((0, 0), (1, 1), (1, -1), (2, 0), (2, -2), (2, 2), (3, -1), (3, 1), (3, -3), (3, 3))\n )\n j = np.array((1, 2, 3, 4, 5, 6, 7, 8, 9, 10))\n n, m = degrees.T\n n_test, m_test = noll2degrees(j)\n assert_true((m_test == m).all(), \"{} != {}\".format(m_test, m))\n assert_true((n_test == n).all(), \"{} != {}\".format(n_test, n))\n\n\ndef test_r_theta_dims():\n \"\"\"Make sure that a ValueError is raised if the dims are greater than 2\"\"\"\n r = np.ones((3, 3, 3))\n assert_raises(ValueError, zernike, r, r, 10)\n\n\ndef test_zernike_return_shape():\n \"\"\"Make sure that the return shape matches input shape\"\"\"\n x = np.linspace(-1, 1, 512)\n xx, yy = np.meshgrid(x, x)\n r, theta = cart2pol(yy, xx)\n zern = zernike(r, theta, 10)\n assert_equal(zern.shape, r.shape)\n\n\ndef test_zernike_errors():\n \"\"\"Make sure zernike doesn't accept bad input.\"\"\"\n noll = np.ones((2, 2, 2))\n # check noll dims\n assert_raises(ValueError, zernike, 0, 0, noll)\n # check that n and m must have dimension of 1\n assert_raises(ValueError, zernike, 0, 0, noll, noll)\n # check that r can't be negative\n assert_raises(ValueError, zernike, -1, 0, 0, 1)\n # check that r only has 2 dims\n assert_raises(ValueError, zernike, np.ones((10, 10, 2)), 0, 0, 1)\n\n\ndef test_zernike_zero():\n \"\"\"Make sure same result is obtained for integer and float\"\"\"\n n, m = choose_random_nm()\n r = 0.5\n theta = np.random.rand() * 2 * np.pi - np.pi\n assert_true(\n np.isfinite(zernike(r, theta, n, m)).all(),\n \"r, theta, n, m = {}, {}, {}, {}\".format(r, theta, n, m),\n )\n\n\ndef test_zernike_edges():\n \"\"\"Make sure same result is obtained at 0 and 0.0 and 1 and 1.0\"\"\"\n n, m = choose_random_nm()\n theta = np.random.rand() * 2 * np.pi - np.pi\n assert_equal(\n zernike(1.0, theta, n, m),\n zernike(1, theta, n, m),\n \"theta, n, m = {}, {}, {}\".format(theta, n, m),\n )\n assert_equal(\n zernike(0.0, theta, n, m),\n zernike(0, theta, n, m),\n \"theta, n, m = {}, {}, {}\".format(theta, n, m),\n )\n\n\ndef test_odd_nm():\n \"\"\"Make sure that n and m seperated by odd numbers gives zeros\"\"\"\n n, m = choose_random_nm(True)\n theta = np.random.rand(100) * 2 * np.pi - np.pi\n # we'll check outside the normal range too, when r\n r = np.random.rand(100) * 2\n assert_true(\n (zernike(r, theta, n, m) == 0).all(), \"theta, n, m = {}, {}, {}\".format(theta, n, m)\n )\n\n\ndef choose_random_nm(odd=False):\n \"\"\"Small utility function to choose random n and m, optional argument specifies\n whether n and m are seperated by a factor of 2 or not\"\"\"\n m = np.nan\n n = np.nan\n # make sure m and n are seperated by a factor of 2 otherwise\n # we'll get all zeros\n while (m - n + odd) % 2:\n # choose random positive n\n n = np.random.randint(100)\n if n:\n # if n is greater than zero choose random m such that\n # n >= m\n m = np.random.randint(-n, n + 1)\n else:\n m = 0\n assert n >= abs(m), \"Somethings very wrong {} not >= {}\".format(n, m)\n assert not (m - n + odd) % 2, \"m = {}, n = {}\".format(m, n)\n return n, m\n","sub_path":"tests/zernike_tests.py","file_name":"zernike_tests.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"260587367","text":"import pymysql\n\nfrom Common import ConfigManage\n\nconfig = ConfigManage.get_default_configdic()\n\n\nclass MySql:\n def __init__(self, database):\n self.host = config['mysql.config']['host']\n self.port = config['mysql.config']['port']\n self.user = config['mysql.config']['user']\n self.password = config['mysql.config']['pwd']\n self.database = database\n\n def _GetConnect(self):\n\n '''得到连接信息,返回conn.cursor()'''\n self.conn = pymysql.connect(host=self.host, port=self.port, user=self.user, password=self.password,\n database=self.database,\n charset=\"utf8\")\n cur = self.conn.cursor(cursor=pymysql.cursors.DictCursor) # 定义游标\n # cur = self.conn.cursor() # sqlserver连接\n if not cur:\n raise NameError('连接数据库失败')\n else:\n return cur\n\n def ExecQuery(self, sql):\n '''\n 查询模块,传入查询语句,返回查询结果\n 返回的是一个包含tuple的list,list的元素是记录行,tuple的元素是每行记录的字段\n '''\n cur = self._GetConnect()\n cur.execute(sql) # 查询SQL语句\n reslist = cur.fetchall() # 查询结果\n self.conn.close() # 关闭连接\n return reslist\n\n def ExecNotQuery(self, sql):\n '''\n 执行非查询语句\n '''\n cur = self._GetConnect()\n cur.execute(sql)\n self.conn.commit() # update/delete/insert必须要这一步\n self.conn.close()\n\n # def CheckDB(self, testDB):\n # db = config['mysql.config']['database']\n # for value in db.values():\n # print(value)\n # if value != testDB:\n # return False\n # return True\n\n\nif __name__ == '__main__':\n host = '59.111.124.211'\n port = 33161\n user = 'root'\n pwd = 'zhMysql6567'\n db = 'ychgoods'\n sql = \"select *from Gds_GoodsClass where id='a9e100e9-1293-cc24-f107-3f46496d8a8a';\"\n a = MySql(db)\n result = a.ExecQuery(sql)\n print(result)\n print(result[0]['ID'])\n","sub_path":"Utils/MysqlHelper.py","file_name":"MysqlHelper.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"13000726","text":"import numpy as np\n\nX_raw = np.array([2013,2014,2015,2016,2017])\ny_raw = np.array([12000, 14000,15000, 16500, 17500])\n\n\nX = (X_raw - X_raw.min()) / (X_raw.max() - X_raw.min())\ny = (y_raw - y_raw.min()) / ( y_raw.max()- y_raw.min())\n\na, b = 0,0\n\nnum_epoch = 10000\n\nlearning_rate = 1e-3\n\nfor e in range(num_epoch):\n y_pred = a * X + b\n\n grad_a, grad_b = ( y_pred - y).dot(X), (y_pred - y).sum()\n\n a, b = a - learning_rate * grad_a, b - learning_rate * grad_b\n\nprint(a,b)","sub_path":"eager_tutorial/04_numpy_lr.py","file_name":"04_numpy_lr.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"197866392","text":"from operator import itemgetter\nimport sys\n\ncurrent_year = None\ncurrent_tempo = 0.0\n#total_count = 0\nyear = None\ncurr_count = 1\ntempo = 0\n\nfor line in sys.stdin:\n\n #indexes from mapreduce file\n index = index.strip()\n #total_count += 1\n value = index.split('\\n')\n value = value[0].split('\\t')\n #print (\"hejhej\")\n (year, tempo), count = value, 1\n\n #---count---\n try:\n tempo = float(tempo)\n count = int(count)\n except ValueError:\n#---count---\n continue\n # print (\"hejsan\")\n if curr_year == year:\n\n curr_tempo += tempo\n curr_count += count\n else:\n if current_year:\n\n #output result\n print ('%s\\t%s' % (curr_year, curr_tempo/curr_count))\n #print (\"hej\")\n #total_count += count\n curr_count = count \n curr_year = year\n curr_tempo = tempo\n\n\n\n \nif curr_year == year:\n print ('%s\\t%s' % (curr_year, curr_tempo/curr_count))\n#print total_count\n","sub_path":"reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"462841672","text":"\"\"\"\nGiven a binary tree, return the inorder traversal of its nodes' values.\n\nExample:\n\nInput: [1,null,2,3]\n 1\n \\\n 2\n /\n 3\n\nOutput: [1,3,2]\nFollow up: Recursive solution is trivial, could you do it iteratively?\n\"\"\"\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def inorderTraversal(self, root):\n \"\"\"\n Runtime: 40 ms, faster than 36.46% of Python3 online submissions for Binary Tree Inorder Traversal.\n Memory Usage: 13.7 MB, less than 6.56% of Python3 online submissions for Binary Tree Inorder Traversal.\n \"\"\"\n res = []\n def recursive(node):\n if node != None:\n recursive(node.left)\n res.append(node.val)\n recursive(node.right)\n recursive(root)\n return res\n\n def inorderIterative(self,root):\n \"\"\"\n use stack to do it iteratively\n Runtime: 28 ms, faster than 98.67% of Python3 online submissions for Binary Tree Inorder Traversal.\n Memory Usage: 13.8 MB, less than 6.56% of Python3 online submissions for Binary Tree Inorder Traversal.\n \"\"\"\n def iterate(node,node_stack):\n while node != None:\n node_stack.append(node)\n node = node.left\n\n node_stack = []\n res = []\n iterate(root,node_stack)\n while node_stack:\n current = node_stack.pop()\n res.append(current.val)\n iterate(current.right,node_stack)\n return res\n","sub_path":"Problems/0094-BinaryTreeInorderTraversal.py","file_name":"0094-BinaryTreeInorderTraversal.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"258809286","text":"import argparse\nimport cv2\n\n\ndef get_args(output_path, buffer_size_value, fps_value=20):\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-o\", \"--output\",\n default=output_path,\n help=\"path to output directory\")\n ap.add_argument(\"-f\", \"--fps\", type=int,\n default=fps_value,\n help=\"FPS of output video\")\n ap.add_argument(\"-c\", \"--codec\", type=str,\n default=\"MJPG\",\n help=\"codec of output video\")\n ap.add_argument(\"-b\", \"--buffer-size\", type=int,\n default=buffer_size_value,\n help=\"buffer size of video clip writer\")\n args = vars(ap.parse_args())\n return args\n\n\ndef resize(image, width=None, height=None, inter=cv2.INTER_AREA):\n # grab the image size\n (h, w) = image.shape[:2]\n\n # if both the width and height are None, then return the original image\n if width is None and height is None:\n return image\n\n # check to see if the width is None\n if width is None:\n # calculate the ratio of the height and construct the dimensions\n r = height / float(h)\n dim = (int(w * r), height)\n\n # otherwise, the height is None\n else:\n # calculate the ratio of the width and construct the dimensions\n r = width / float(w)\n dim = (width, int(h * r))\n\n # resize the image\n resized = cv2.resize(image, dim, interpolation=inter)\n\n return resized\n\n\ndef grab_contours(cnts):\n # if the length the contours tuple returned by cv2.findContours is '2'\n # then we are using either OpenCV v2.4\n if len(cnts) == 2:\n cnts = cnts[0]\n\n # if the length of the contours tuple is '3' then we are using either OpenCV v3\n elif len(cnts) == 3:\n cnts = cnts[1]\n\n # otherwise OpenCV has changed their cv2.findContours return\n # signature yet again and I have no idea WTH is going on\n else:\n raise Exception((\"Contours tuple must have length 2 or 3, \"\n \"otherwise OpenCV changed their cv2.findContours return \"\n \"signature yet again. Refer to OpenCV's documentation \"\n \"in that case\"))\n\n # return the actual contours array\n return cnts\n","sub_path":"search/scene_detection/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"483541695","text":"\"\"\"Sphinx configuration file for TSSW package\"\"\"\n\nfrom documenteer.conf.pipelinespkg import *\n\n\nproject = \"{{cookiecutter.csc_repo_name}}\"\nhtml_theme_options[\"logotext\"] = project\nhtml_title = project\nhtml_short_title = project\n\nintersphinx_mapping[\"ts_xml\"] = (\"https://ts-xml.lsst.io\", None)\n","sub_path":"{{cookiecutter.doc}}/conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"281074538","text":"from __future__ import absolute_import\nimport struct\n\n\ndef load_holder_canvas(canvas, geom, **kw):\n if geom:\n if isinstance(geom, str):\n\n holes = [(x, y, r, str(c + 1))\n for c, (x, y, r) in iter_geom(geom)]\n else:\n holes = geom\n\n canvas.load_scene(holes, **kw)\n\n\ndef iter_geom(geom, fmt='>fff', width=12):\n f = lambda x: struct.unpack(fmt, geom[x:x + width])\n return ((i, f(gi)) for i, gi in enumerate(range(0, len(geom), width)))\n\n\ndef make_geom(xyr, fmt='>fff'):\n return b''.join((struct.pack(fmt, *args)) for args in xyr).decode('utf-8')\n","sub_path":"pychron/canvas/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"402481309","text":"import logging\nimport simplejson\nfrom utils import get_active_celery_tasks\nfrom application import socketio, redis_store, celery\n\n_task1 = None\n_task2 = None\n\n_current_state = {\"status\": \"undefined\"}\n\ncelery_tasks = {}\n\n\ndef wait_updating_date_task():\n global _current_state\n\n pubsub = redis_store.pubsub()\n pubsub.subscribe(\"updates\")\n while True:\n socketio.sleep(20)\n\n message = pubsub.get_message(ignore_subscribe_messages=True)\n\n if not message:\n continue\n\n try:\n message_data = simplejson.loads(str(message.get('data'), 'UTF-8'))\n except simplejson.JSONDecodeError as e:\n continue\n\n _current_state = message_data\n socketio.emit('updated_data', message_data,\n namespace='/updates', broadcast=True)\n\n\ndef inspect_active_tasks():\n while True:\n tasks = get_active_celery_tasks()\n\n socketio.emit('tasks_state', {'tasks': tasks},\n namespace='/updates', broadcast=True)\n\n socketio.sleep(30)\n\n\n@socketio.on('connect', namespace='/updates')\ndef on_connect_to_updates():\n global _task1, _task2, _current_state\n\n if _task1 is None:\n _task1 = socketio.start_background_task(target=wait_updating_date_task)\n\n if _task2 is None:\n _task2 = socketio.start_background_task(target=inspect_active_tasks)\n\n socketio.emit('connected', {\"scheduler_status\": _current_state, \"tasks\": get_active_celery_tasks()},\n namespace='/updates')\n\n logging.info(\"client connected\")\n\n\n@socketio.on('disconnect', namespace='/updates')\ndef on_connect_to_updates():\n logging.info(\"client disconnected\")\n\n\n@socketio.on('add_task', namespace='/updates')\ndef on_add_task(data):\n since_date = data.get('since_date')\n to_date = data.get('to_date')\n\n celery.send_task('tasks.manually_update_data',\n kwargs={'since_date': since_date, 'to_date': to_date, 'status': None})\n\n\n@socketio.on('remove_task', namespace='/updates')\ndef on_remove_task(data):\n \"\"\"\"\"\"\n celery.control.revoke(data.get('task_id'), terminate=True)\n\n","sub_path":"application/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"186249089","text":"\n\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n#root of project\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\n# Application definition\n\nINSTALLED_APPS = (\n #django app\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n #third party apps\n 'crispy_forms',\n 'django_filters',\n 'registration',\n #my apps\n 'carts',\n 'newsletter',\n 'orders',\n 'products',\n)\n\n'''\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [os.path.join(BASE_DIR, \"templates\"), os.path.join(BASE_DIR, \"products\",\"templates\")],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n'''\n\nADMINS = (\n\t(\"Justin\", \"codingforentrepreneurs@gmail.com\"),\n\n\t)\n\n#ALLOWED_HOSTS = ['cfedeploy.webfactional.com', 'trydjango.com', 'www.trydjango.com']\nALLOWED_HOSTS = []\n#purchasing domain name http://name.com\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\n#Braintree Payments Details\nBRAINTREE_PUBLIC = \"qn3p5n7njksw47r3\"\nBRAINTREE_PRIVATE = \"d14ac944794c0df1c81991ecf49221ff\"\nBRAINTREE_MERCHANT_ID = \"n84nynknvzz3j3sz\"\nBRAINTREE_ENVIRONEMNT = \"Sandbox\"\n\nfrom django.conf import settings\nDATABASES = settings.DATABASES\nimport dj_database_url\n# Parse database configuration from $DATABASE_URL\nDATABASES['default'] = dj_database_url.config()\n\n# Honor the 'X-Forwarded-Proto' header for request.is_secure()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\n\n","sub_path":"ecommerce2/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"106552293","text":"import encryptor\n\ne = encryptor.encryptor()\n\nrefid, key = e.encrypt_file('Lac.jpg')\n\nkey = e.decrypt_file(refid, 'Lac.jpg.enc')\n\nenc_data = e.encrypt_data( 'how', key)\n\ndata = e.decrypt_data(enc_data, key)\n\nprint(data)\n","sub_path":"test_encryptor.py","file_name":"test_encryptor.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"78649692","text":"from pygai.base import AbstractBoard\nimport pygame\nfrom itertools import product\nfrom copy import deepcopy\n\nclass Piece:\n def __init__(self, value=0, size=(97, 97)):\n self.value = value\n self.surface = -1\n self.rect = -1\n self.index = []\n self.position = pygame.Vector2(-1, -1)\n self.size = size\n\n if value > 0:\n self.color = 'white'\n self.image_postfix = \"-white.png\"\n else:\n self.color = 'black'\n self.image_postfix = \"-black.png\"\n return\n \n def set_image(self, image_path=''):\n self.surface = pygame.transform.scale(pygame.image.load(image_path), self.size)\n self.rect = self.surface.get_rect()\n return\n\n def set_position(self, pos):\n \"\"\"\n pos::Coord2D (Column, Row) or (x, y) or (j, i)\n \"\"\"\n\n self.index = [int(pos[1]), int(pos[0])]\n self.position = pos\n self.rect = pygame.Rect(self.position, self.size)\n return\n \n def is_clicked(self, event):\n if event.button == 1: # is left button clicked\n if self.rect.collidepoint(event.pos): # is mouse over button\n return True\n\n def draw(self, win):\n self.rect = pygame.Rect(self.position * 90, self.size)\n win.blit(self.surface, self.rect)\n\n def get_available_moves(self, board):\n return\n \nclass Pawn(Piece):\n def __init__(self, value=1):\n super().__init__(value)\n self.set_image(\"./asset/chess-pawn\" + self.image_postfix)\n self.never_moved = True\n self.wscoreboard = [\n [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],\n [5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0],\n [1.0,1.0,2.0,3.0,3.0,2.0,1.0,1.0],\n [0.5,0.5,1.0,2.5,2.5,1.0,0.5,0.5],\n [0.0,0.0,0.0,2.0,2.0,0.0,0.0,0.0],\n [0.5,-0.5,-1.0,0.0,0.0,-1.0,-0.5,0.5],\n [0.5,1.0,1.0,-2.0,-2.0,1.0,1.0,0.5],\n [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]\n ]\n self.bscoreboard = [\n [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],\n [0.5,1.0,1.0,-2.0,-2.0,1.0,1.0,0.5],\n [0.5,-0.5,-1.0,0.0,0.0,-1.0,-0.5,0.5],\n [0.0,0.0,0.0,2.0,2.0,0.0,0.0,0.0],\n [0.5,0.5,1.0,2.5,2.5,1.0,0.5,0.5],\n [1.0,1.0,2.0,3.0,3.0,2.0,1.0,1.0],\n [5.0,5.0,5.0,5.0,5.0,5.0,5.0,5.0],\n [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]\n ]\n \n def get_available_moves(self, board):\n \"\"\"returns a list of available moves\"\"\"\n\n if self.color == 'black':\n direction = 1\n else:\n direction = -1\n\n available_moves = []\n \n i, j = int(self.position[1]), int(self.position[0])\n try:\n if board[i + 1 * direction][j] == 0:\n available_moves.append(pygame.Vector2(self.position[1] + 1 * direction, self.position[0]))\n except Exception as e:\n \n pass\n\n # takes move\n try:\n if board[i + 1 * direction][j - 1] != 0 and board[i + 1 * direction][j - 1].color != self.color:\n available_moves.append(pygame.Vector2(self.position[1] + 1 * direction, self.position[0] - 1))\n if board[i + 1 * direction][j + 1] != 0 and board[i + 1 * direction][j + 1].color != self.color:\n available_moves.append(pygame.Vector2(self.position[1] + 1 * direction, self.position[0] + 1))\n except Exception as e:\n \n pass\n try:\n if self.never_moved and board[self.index[0] + 1 * direction][self.index[1]] == 0 and board[self.index[0] + 2 * direction][self.index[1]] == 0:\n available_moves.append(pygame.Vector2(self.position[1] + 2 * direction, self.position[0]))\n except:\n pass\n return available_moves \n\nclass King(Piece):\n def __init__(self, value=40):\n super().__init__(value)\n self.set_image(\"./asset/chess-king\" + self.image_postfix)\n self.wscoreboard = [\n [-3.0,-4.0,-4.0,-5.0,-5.0,-4.0,-4.0,-3.0],\n [-3.0,-4.0,-4.0,-5.0,-5.0,-4.0,-4.0,-3.0],\n [-3.0,-4.0,-4.0,-5.0,-5.0,-4.0,-4.0,-3.0],\n [-3.0,-4.0,-4.0,-5.0,-5.0,-4.0,-4.0,-3.0],\n [-2.0,-3.0,-3.0,-4.0,-4.0,-3.0,-3.0,-2.0],\n [-1.0,-2.0,-2.0,-2.0,-2.0,-2.0,-2.0,-1.0],\n [2.0,2.0,0.0,0.0,0.0,0.0,2.0,2.0],\n [2.0,3.0,1.0,0.0,0.0,1.0,3.0,2.0]\n ]\n self.bscoreboard = [\n [2.0,3.0,1.0,0.0,0.0,1.0,3.0,2.0],\n [2.0,2.0,0.0,0.0,0.0,0.0,2.0,2.0],\n [-1.0,-2.0,-2.0,-2.0,-2.0,-2.0,-2.0,-1.0],\n [-2.0,-3.0,-3.0,-4.0,-4.0,-3.0,-3.0,-2.0],\n [-3.0,-4.0,-4.0,-5.0,-5.0,-4.0,-4.0,-3.0],\n [-3.0,-4.0,-4.0,-5.0,-5.0,-4.0,-4.0,-3.0],\n [-3.0,-4.0,-4.0,-5.0,-5.0,-4.0,-4.0,-3.0],\n [-3.0,-4.0,-4.0,-5.0,-5.0,-4.0,-4.0,-3.0],\n ]\n \n def get_available_moves(self, board):\n i, j = self.index\n available_moves = []\n\n # Diagonals\n if i - 1 >= 0 and i - 1 < 8 and j - 1 >= 0 and j - 1 < 8 and (board[i - 1][j - 1] == 0 or (board[i - 1][j - 1] != 0 and board[i - 1][j - 1].color != self.color)):\n available_moves.append((i - 1, j - 1))\n if i + 1 >= 0 and i + 1 < 8 and j + 1 >= 0 and j + 1 < 8 and (board[i + 1][j + 1] == 0 or (board[i + 1][j + 1] != 0 and board[i + 1][j + 1].color != self.color)):\n available_moves.append((i + 1, j + 1))\n if i - 1 >= 0 and i - 1 < 8 and j + 1 >= 0 and j + 1 < 8 and (board[i - 1][j + 1] == 0 or (board[i - 1][j + 1] != 0 and board[i - 1][j + 1].color != self.color)):\n available_moves.append((i - 1, j + 1))\n if i + 1 >= 0 and i + 1 < 8 and j - 1 >= 0 and j - 1 < 8 and (board[i + 1][j - 1] == 0 or (board[i + 1][j - 1] != 0 and board[i + 1][j - 1].color != self.color)):\n available_moves.append((i + 1, j - 1))\n\n # Vertical\n if i - 1 >= 0 and i - 1 < 8 and j >= 0 and j < 8 and (board[i - 1][j] == 0 or (board[i - 1][j] != 0 and board[i - 1][j].color != self.color)):\n available_moves.append((i - 1, j))\n if i + 1 >= 0 and i + 1 < 8 and j >= 0 and j < 8 and (board[i + 1][j] == 0 or (board[i + 1][j] != 0 and board[i + 1][j].color != self.color)):\n available_moves.append((i + 1, j))\n\n # Horizontal\n if i >= 0 and i < 8 and j + 1 >= 0 and j + 1 < 8 and (board[i][j + 1] == 0 or (board[i][j + 1] != 0 and board[i][j + 1].color != self.color)):\n available_moves.append((i, j + 1))\n if i >= 0 and i < 8 and j - 1 >= 0 and j - 1 < 8 and (board[i][j - 1] == 0 or (board[i][j - 1] != 0 and board[i][j - 1].color != self.color)):\n available_moves.append((i, j - 1))\n\n return available_moves\n\nclass Queen(Piece):\n def __init__(self, value=9):\n super().__init__(value)\n self.rook = Rook(value)\n\n self.bishop = Bishop(value)\n\n self.set_image(\"./asset/chess-queen\" + self.image_postfix)\n self.wscoreboard = [\n [-2.0,-1.0,-1.0,-0.5,-0.5,-1.0,-1.0,-2.0],\n [-1.0,0.0,0.0,0.0,0.0,0.0,0.0,-1.0],\n [-1.0,0.0,0.5,0.5,0.5,0.5,0.0,-1.0],\n [-0.5,0.0,0.5,0.5,0.5,0.5,0.0,-0.5],\n [0.0,0.0,0.5,0.5,0.5,0.5,0.0,-0.5],\n [-1.0,0.5,0.5,0.5,0.5,0.5,0.0,-1.0],\n [-1.0,0.0,0.5,0.0,0.0,0.0,0.0,-1.0],\n [-2.0,-1.0,-1.0,-0.5,-0.5,-1.0,-1.0,-2.0]\n ]\n self.bscoreboard = [\n [-2.0,-1.0,-1.0,-0.5,-0.5,-1.0,-1.0,-2.0],\n [-1.0,0.0,0.5,0.0,0.0,0.0,0.0,-1.0],\n [-1.0,0.5,0.5,0.5,0.5,0.5,0.0,-1.0],\n [0.0,0.0,0.5,0.5,0.5,0.5,0.0,-0.5],\n [-0.5,0.0,0.5,0.5,0.5,0.5,0.0,-0.5],\n [-1.0,0.0,0.5,0.5,0.5,0.5,0.0,-1.0],\n [-1.0,0.0,0.0,0.0,0.0,0.0,0.0,-1.0],\n [-2.0,-1.0,-1.0,-0.5,-0.5,-1.0,-1.0,-2.0]\n ]\n \n def get_available_moves(self, board):\n available_moves = []\n\n self.rook.index = self.index\n moves_hv = self.rook.get_available_moves(board)\n\n self.bishop.index = self.index\n moves_diag = self.bishop.get_available_moves(board)\n\n available_moves.extend(moves_hv)\n available_moves.extend(moves_diag)\n return available_moves\n\nclass Rook(Piece):\n def __init__(self, value=5):\n super().__init__(value)\n self.set_image(\"./asset/chess-rook\" + self.image_postfix)\n self.wscoreboard = [\n [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0],\n [0.5,1.0,1.0,1.0,1.0,1.0,1.0,0.5],\n [-0.5,0.0,0.0,0.0,0.0,0.0,0.0,-0.5],\n [-0.5,0.0,0.0,0.0,0.0,0.0,0.0,-0.5],\n [-0.5,0.0,0.0,0.0,0.0,0.0,0.0,-0.5],\n [-0.5,0.0,0.0,0.0,0.0,0.0,0.0,-0.5],\n [-0.5,0.0,0.0,0.0,0.0,0.0,0.0,-0.5],\n [0.0,0.0,0.0,0.5,0.5,0.0,0.0,0.0]\n ]\n self.bscoreboard = [\n [0.0,0.0,0.0,0.5,0.5,0.0,0.0,0.0],\n [-0.5,0.0,0.0,0.0,0.0,0.0,0.0,-0.5],\n [-0.5,0.0,0.0,0.0,0.0,0.0,0.0,-0.5],\n [-0.5,0.0,0.0,0.0,0.0,0.0,0.0,-0.5],\n [-0.5,0.0,0.0,0.0,0.0,0.0,0.0,-0.5],\n [-0.5,0.0,0.0,0.0,0.0,0.0,0.0,-0.5],\n [-0.5,0.0,0.0,0.0,0.0,0.0,0.0,-0.5],\n [0.5,1.0,1.0,1.0,1.0,1.0,1.0,0.5],\n [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]\n ]\n \n def get_available_moves(self, board):\n \"\"\"4 Directions Vertical & Horizontal\"\"\"\n\n i, j = self.index\n available_moves = []\n\n # Vertical \n explore_i = i\n while explore_i >= 0 and j >= 0 and explore_i < 8 and j < 8:\n explore_i += 1\n if explore_i >= 0 and explore_i < 8:\n if board[explore_i][j] == 0:\n available_moves.append((explore_i, j))\n continue\n if board[explore_i][j] != 0 and board[explore_i][j].color != self.color:\n available_moves.append((explore_i, j))\n break\n else:\n break\n\n explore_i = i\n while explore_i >= 0 and j >= 0 and explore_i < 8 and j < 8:\n explore_i -= 1\n if explore_i >= 0 and explore_i < 8:\n if board[explore_i][j] == 0:\n available_moves.append((explore_i, j))\n continue\n if board[explore_i][j] != 0 and board[explore_i][j].color != self.color:\n available_moves.append((explore_i, j))\n break\n else:\n break\n\n # Horizontal\n explore_j = j\n while i >= 0 and explore_j >= 0 and i < 8 and explore_j < 8:\n explore_j += 1\n if explore_j >= 0 and explore_j < 8:\n if board[i][explore_j] == 0:\n available_moves.append((i, explore_j))\n continue\n if board[i][explore_j] != 0 and board[i][explore_j].color != self.color:\n available_moves.append((i, explore_j))\n break\n else:\n break\n \n explore_j = j\n while i >= 0 and explore_j >= 0 and i < 8 and explore_j < 8:\n explore_j -= 1\n if explore_j >= 0 and explore_j < 8:\n if board[i][explore_j] == 0:\n available_moves.append((i, explore_j))\n continue\n if board[i][explore_j] != 0 and board[i][explore_j].color != self.color:\n available_moves.append((i, explore_j))\n break\n else:\n break\n\n return available_moves\n\nclass Bishop(Piece):\n def __init__(self, value=3):\n super().__init__(value)\n self.set_image(\"./asset/chess-bishop\" + self.image_postfix)\n self.wscoreboard = [\n [-2.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-2.0],\n [-1.0,0.0,0.0,0.0,0.0,0.0,0.0,-1.0],\n [-1.0,0.0,0.5,1.0,1.0,0.5,0.0,-1.0],\n [-1.0,0.5,0.5,1.0,1.0,0.5,0.5,-1.0],\n [-1.0,0.0,1.0,1.0,1.0,1.0,0.0,-1.0],\n [-1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0],\n [-1.0,0.5,0.0,0.0,0.0,0.0,0.5,-1.0],\n [-2.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-2.0]\n ]\n self.bscoreboard = [\n [-2.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-2.0],\n [-1.0,0.5,0.0,0.0,0.0,0.0,0.5,-1.0],\n [-1.0,1.0,1.0,1.0,1.0,1.0,1.0,-1.0],\n [-1.0,0.0,1.0,1.0,1.0,1.0,0.0,-1.0],\n [-1.0,0.5,0.5,1.0,1.0,0.5,0.5,-1.0],\n [-1.0,0.0,0.5,1.0,1.0,0.5,0.0,-1.0],\n [-1.0,0.0,0.0,0.0,0.0,0.0,0.0,-1.0],\n [-2.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-2.0]\n ]\n\n def get_available_moves(self, board):\n # \\ /\n # \\ / Diagonals\n available_moves = []\n i, j = self.index\n \n # diagonal \\\n explore_i = i + 1\n explore_j = j + 1\n if explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n while board[explore_i][explore_j] == 0 and explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n available_moves.append((explore_i, explore_j))\n explore_i += 1\n explore_j += 1\n if explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n continue\n else:\n break\n\n try:\n if board[explore_i][explore_j] != 0 and board[explore_i][explore_j].color != self.color and explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n available_moves.append((explore_i, explore_j))\n except:\n pass\n\n explore_i = i - 1\n explore_j = j - 1\n if explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n while board[explore_i][explore_j] == 0 and explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n available_moves.append((explore_i, explore_j))\n explore_i -= 1\n explore_j -= 1\n if explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n continue\n else:\n break\n\n try:\n if board[explore_i][explore_j] != 0 and board[explore_i][explore_j].color != self.color and explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n available_moves.append((explore_i, explore_j))\n except:\n pass\n\n # diagonal /\n explore_i = i - 1\n explore_j = j + 1\n if explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n \n while board[explore_i][explore_j] == 0 and explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n available_moves.append((explore_i, explore_j))\n explore_i -= 1\n explore_j += 1\n if explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n continue\n else:\n break\n try:\n if board[explore_i][explore_j] != 0 and board[explore_i][explore_j].color != self.color and explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n available_moves.append((explore_i, explore_j))\n except:\n pass\n \n explore_i = i + 1\n explore_j = j - 1\n if explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n while board[explore_i][explore_j] == 0 and explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n available_moves.append((explore_i, explore_j))\n explore_i += 1\n explore_j -= 1\n if explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n continue\n else:\n break\n try:\n if board[explore_i][explore_j] != 0 and board[explore_i][explore_j].color != self.color and explore_i >= 0 and explore_j >= 0 and explore_i < 8 and explore_j < 8:\n available_moves.append((explore_i, explore_j))\n except:\n pass\n\n return available_moves\n\nclass Knight(Piece):\n def __init__(self, value=3):\n super().__init__(value)\n self.set_image(\"./asset/chess-knight\" + self.image_postfix)\n self.wscoreboard = [\n [-5.0,-4.0,-3.0,-3.0,-3.0,-3.0,-4.0,-5.0],\n [-4.0,-2.0,0.0,0.0,0.0,0.0,-2.0,-4.0],\n [-3.0,0.0,1.0,1.5,1.5,1.0,0.0,-3.0],\n [-3.0,0.5,1.5,2.0,2.0,1.5,0.5,-3.0],\n [-3.0,0.5,1.5,2.0,2.0,1.5,0.5,-3.0],\n [-3.0,0.0,1.0,1.5,1.5,1.0,0.0,-3.0],\n [-4.0,-2.0,0.0,0.0,0.0,0.0,-2.0,-4.0],\n [-5.0,-4.0,-3.0,-3.0,-3.0,-3.0,-4.0,-5.0]\n ]\n \n self.bscoreboard = [\n [-5.0,-4.0,-3.0,-3.0,-3.0,-3.0,-4.0,-5.0],\n [-4.0,-2.0,0.0,0.0,0.0,0.0,-2.0,-4.0],\n [-3.0,0.0,1.0,1.5,1.5,1.0,0.0,-3.0],\n [-3.0,0.5,1.5,2.0,2.0,1.5,0.5,-3.0],\n [-3.0,0.5,1.5,2.0,2.0,1.5,0.5,-3.0],\n [-3.0,0.0,1.0,1.5,1.5,1.0,0.0,-3.0],\n [-4.0,-2.0,0.0,0.0,0.0,0.0,-2.0,-4.0],\n [-5.0,-4.0,-3.0,-3.0,-3.0,-3.0,-4.0,-5.0]\n ]\n\n def get_available_moves(self, board):\n i, j = self.index\n moves = list(product([i-1, i+1],[j-2, j+2])) + list(product([i-2,i+2],[j-1,j+1]))\n available_moves = []\n for i, j in moves:\n if i >= 0 and j >= 0 and i < 8 and j < 8:\n if board[i][j] == 0:\n available_moves.append((i, j))\n if board[i][j] != 0 and board[i][j].color != self.color:\n available_moves.append((i, j))\n return available_moves\n\nclass ChessBoard(AbstractBoard):\n \"\"\"\n Represents ChessBoard \n \"\"\"\n\n def __init__(self, blank=False):\n\n self.turn = 0\n\n self.surface = -1\n self.position = [0, 0]\n self.pieces = []\n \n self.white_pieces = []\n self.black_pieces = []\n self.black_king = King(-900)\n self.white_king = King(900)\n\n # Initialize a Blank board.\n self.board = [[0 for _ in range(8)] for _ in range(8)]\n if not blank:\n self.board = [\n [Rook(-50), Knight(-30), Bishop(-30), Queen(-90), self.black_king, Bishop(-30), Knight(-30), Rook(-50)],\n [Pawn(-10), Pawn(-10), Pawn(-10), Pawn(-10), Pawn(-10), Pawn(-10), Pawn(-10), Pawn(-10)],\n [ 0, 0, 0, 0, 0 , 0, 0, 0],\n [ 0, 0, 0, 0, 0 , 0, 0, 0],\n [ 0, 0, 0, 0, 0 , 0, 0, 0],\n [ 0, 0, 0, 0, 0 , 0, 0, 0],\n [Pawn( 10), Pawn( 10), Pawn( 10), Pawn( 10), Pawn( 10), Pawn( 10), Pawn( 10), Pawn( 10)],\n [Rook( 50), Knight( 30), Bishop( 30), Queen( 90), self.white_king, Bishop( 30), Knight( 30), Rook( 50)]\n ]\n self.board_score = 0\n\n # Initialize piece\n self.update_pieces_list()\n \n def __str__(self):\n result = \"\"\n for row in self.board:\n row_result = \"[\"\n for piece in row:\n if type(piece).__name__ == \"int\":\n row_result += str(piece) + \", \"\n else:\n row_result += str(piece.value) + \", \"\n result += row_result + \"]\\n\"\n return result\n \n def set_image(self, image_path, size=(720,720)):\n \"\"\"\n Set chessboard image.\n \"\"\"\n\n self.surface = pygame.transform.scale(pygame.image.load(image_path), size)\n \n def update_pieces_list(self):\n self.pieces.clear()\n self.black_pieces.clear()\n self.white_pieces.clear()\n white_score = 0\n black_score = 0\n for index_row, row in enumerate(self.board):\n for index_column, piece in enumerate(row):\n if type(piece).__name__ != 'int':\n if piece.color == 'black':\n black_score += piece.value\n self.black_pieces.append(piece)\n else:\n white_score += piece.value\n self.white_pieces.append(piece)\n\n piece.set_position(pygame.Vector2(index_column, index_row)) \n self.pieces.append(piece)\n self.board_score = white_score - black_score\n\n def make_move(self, origin, dest):\n \"\"\"\n Make move on chessboard\n \"\"\"\n\n # copy_board = deepcopy(self.board)\n\n ori0, ori1 = int(origin[0]), int(origin[1])\n dest0, dest1 = int(dest[0]), int(dest[1])\n print(\"make move\", origin, dest)\n\n # i,j = i,j \n temp_piece = self.board[ori0][ori1]\n\n # Check if pawn moves\n if temp_piece.value == -10 or temp_piece.value == 10:\n temp_piece.never_moved = False\n\n # i,j = y,x, Converts Index to COORD\n temp_piece.position = pygame.Vector2(dest1, dest0)\n temp_piece.index = (dest0, dest1)\n\n # i,j = i,j \n self.board[dest0][dest1] = temp_piece\n \n # i,j = i,j \n self.board[ori0][ori1] = 0\n\n # Update these lines, to improve apps performance\n # copy_chessboard = ChessBoard(blank=True)\n # copy_chessboard.board = copy_board\n # copy_chessboard.update_pieces_list()\n # copy_chessboard.turn += 1\n \n self.pieces.clear()\n self.update_pieces_list()\n\n self.turn += 1\n\n # return copy_chessboard\n\n def calculate_board_score(self):\n \"\"\"Calculate current board's score\"\"\"\n \n white_score = 0\n black_score = 0\n\n for piece in self.pieces:\n if piece.color == 'black':\n black_score += piece.value + 1 *piece.bscoreboard[piece.index[0]][piece.index[1]]\n else:\n white_score += piece.value + 1 *piece.wscoreboard[piece.index[0]][piece.index[1]]\n \n return abs(white_score) - abs(black_score)\n\n def get_possible_moves(self, color=None):\n \"\"\"Returns current board possible moves.\"\"\"\n\n if color:\n if color == 'white':\n return self.get_movements_from(self.white_pieces)\n return self.get_movements_from(self.black_pieces)\n\n if self.turn % 2 == 0:\n return self.get_movements_from(self.white_pieces)\n return self.get_movements_from(self.black_pieces)\n\n def get_movements_from(self, pieces):\n available_movements = []\n for piece in pieces:\n movement = {\n 'index': piece.index,\n 'available_moves': piece.get_available_moves(self.board)\n }\n if len(movement['available_moves']) > 0:\n available_movements.append(movement)\n return available_movements\n\n def is_finished(self):\n \"\"\"returns bool of board condition. game is end or not\"\"\"\n\n if self.black_king in self.black_pieces and self.white_king in self.white_pieces:\n return False\n return True\n \n def draw(self, win):\n \"\"\"\n method to draw chessboard\n \"\"\"\n\n # draw board\n win.blit(self.surface, self.position)\n\n # draw pieces\n for piece in self.pieces:\n piece.draw(win)\n","sub_path":"engine/chess.py","file_name":"chess.py","file_ext":"py","file_size_in_byte":23860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"64039121","text":"import functools\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\n\n\ndef option_setter(module, default_options, args):\n # Argument setting\n for name, value in default_options.items():\n if name in args:\n setattr(module, name, args[name])\n else:\n setattr(module, name, value)\n\n\ndef init_weights(net, init_type='normal', init_gain=0.02):\n \"\"\"Initialize network weights.\n\n Parameters:\n net (network) -- network to be initialized\n init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal\n init_gain (float) -- scaling factor for normal, xavier and orthogonal.\n\n We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might\n work better for some applications. Feel free to try yourself.\n \"\"\"\n def init_func(m): # define the initialization function\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):\n if init_type == 'normal':\n init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n init.xavier_uniform_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n init.kaiming_uniform_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n if hasattr(m, 'bias') and m.bias is not None:\n init.constant_(m.bias.data, 0.0)\n elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.\n init.normal_(m.weight.data, 1.0, init_gain)\n init.constant_(m.bias.data, 0.0)\n\n print('initialize network with %s' % init_type)\n net.apply(init_func) # apply the initialization function <init_func>\n\n\ndef get_norm_layer(dim, norm_type='instance'):\n \"\"\"Return a normalization layer\n\n Parameters:\n norm_type (str) -- the name of the normalization layer: batch | instance | none\n\n For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).\n For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.\n \"\"\"\n if norm_type == 'batch' and dim == 1:\n norm_layer = functools.partial(nn.BatchNorm1d, affine=True, track_running_stats=True)\n elif norm_type == 'batch' and dim == 2:\n norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)\n elif norm_type == 'instance' and dim == 1:\n norm_layer = functools.partial(nn.InstanceNorm1d, affine=False, track_running_stats=False)\n elif norm_type == 'instance' and dim == 2:\n norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)\n elif norm_type == 'none':\n norm_layer = None\n else:\n raise NotImplementedError('normalization layer [%s] is not found' % norm_type)\n return norm_layer\n\n\ndef get_use_bias(args):\n if type(args['norm_layer']) == functools.partial:\n return args['norm_layer'].func != nn.BatchNorm1d \\\n and args['norm_layer'].func != nn.BatchNorm2d\n else:\n return args['norm_layer'] != nn.BatchNorm1d \\\n and args['norm_layer'] != nn.BatchNorm2d\n","sub_path":"styletransfer/models/networks/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"378451380","text":"\"\"\"\ninput:[2,7,11,15]\ntarget:9\noutput:[0,1]\nauthor:yqtong\ndata:2019-09-13\n\"\"\"\nclass Solution:\n def twoSum1(self, nums, target):\n result_list = []\n for idx in range(len(nums)):\n for idy in range(idx+1,len(nums)):\n result = nums[idx] + nums[idy]\n if result == target:\n result_list.append(idx)\n result_list.append(idy)\n return result_list\n\n def twoSum2(self, nums, target):\n result_dict = {}\n for index, number in enumerate(nums):\n second_number = target - number\n if second_number in result_dict:\n return [result_dict[second_number], index]\n result_dict[number] = index\n return None\n","sub_path":"Algorithm/1_two_sum.py","file_name":"1_two_sum.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"464123120","text":"import argparse\r\nimport sys\r\n\r\n\r\ndef cal(args):\r\n if args.o==\"add\":\r\n return args.x + args.y \r\n\r\n\r\n elif args.o==\"sub\":\r\n return args.x - args.y \r\n\r\n elif args.o==\"mul\":\r\n return args.x * args.y \r\n\r\n elif args.o==\"div\":\r\n return args.x / args.y \r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument('--x',type=float,default=1.0,help=\"Enter First Number:- \")\r\n\r\n parser.add_argument('--y',type=float,default=1.0,help=\"Enter Second Number:- \")\r\n\r\n\r\n parser.add_argument('--o',type=str,default=\"add\",help=\"Addition:- \")\r\n\r\n\r\n args=parser.parse_args()\r\n\r\n sys.stdout.write(str(cal(args)))\r\n","sub_path":"CLI/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"376845255","text":"\"\"\"\n Read the save filesystem for all of the required information specified by\n (1) the models specified for partition function and\n (2) the electronic structure levels\n in order to write portions of MESS strings for species and reaction paths\n and calculate electronic and zero-point vibrational energies.\n\"\"\"\n\nimport automol\nimport autofile\nfrom routines.pf.models import ene\nfrom routines.pf.models import typ\nfrom routines.pf.models import etrans\nfrom routines.pf.models import _rot as rot\nfrom routines.pf.models import _tors as tors\nfrom routines.pf.models import _sym as sym\nfrom routines.pf.models import _vib as vib\nfrom routines.pf.models import _flux as flux\nfrom routines.pf.models import _pst as pst\nfrom routines.pf.models import _util as util\nfrom routines.pf.thermo import basis \nfrom routines.pf.thermo import heatform\nfrom lib.structure import tors as torsprep\nfrom lib.phydat import phycon\nfrom lib import filesys\n\n\n# General readers\ndef read_spc_data(spc_dct, spc_name,\n chn_pf_models, chn_pf_levels,\n run_prefix, save_prefix, chn_basis_ene_dct,\n ref_pf_models=(), ref_pf_levels=(), calc_chn_ene=True):\n \"\"\" Determines which block writer to use tau\n \"\"\"\n print(('\\n++++++++++++++++++++++++++++++++++++++++++++++++' +\n '++++++++++++++++++++++++++++++++++++++'))\n print('\\nReading filesystem info for {}'.format(spc_name))\n\n vib_model, tors_model = chn_pf_models['vib'], chn_pf_models['tors']\n spc_dct_i = spc_dct[spc_name]\n if typ.is_atom(spc_dct_i):\n inf_dct = atm_data(\n spc_dct, spc_name,\n chn_pf_models, chn_pf_levels,\n ref_pf_models, ref_pf_levels,\n run_prefix, save_prefix)\n writer = 'atom_block'\n else:\n if vib_model == 'tau' or tors_model == 'tau':\n inf_dct = tau_data(\n spc_dct_i,\n chn_pf_models, chn_pf_levels,\n run_prefix, save_prefix, saddle=False)\n writer = 'tau_block'\n else:\n inf_dct, chn_basis_ene_dct = mol_data(\n spc_name, spc_dct,\n chn_pf_models, chn_pf_levels,\n ref_pf_models, ref_pf_levels, chn_basis_ene_dct,\n run_prefix, save_prefix, calc_chn_ene=calc_chn_ene, saddle=False)\n writer = 'species_block'\n\n # Add writer to inf dct\n inf_dct['writer'] = writer\n\n return inf_dct, chn_basis_ene_dct\n\n\ndef read_ts_data(spc_dct, tsname, rcts, prds,\n chn_pf_models, chn_pf_levels,\n run_prefix, save_prefix, chn_basis_ene_dct,\n ts_class, ts_sadpt, ts_nobarrier,\n ref_pf_models=(), ref_pf_levels=()):\n \"\"\" Determine which block function to useset block functions\n \"\"\"\n\n print(('\\n++++++++++++++++++++++++++++++++++++++++++++++++' +\n '++++++++++++++++++++++++++++++++++++++'))\n print('\\nReading filesystem info for {}'.format(tsname))\n ts_dct = spc_dct[tsname]\n reac_dcts = [spc_dct[name] for name in rcts]\n prod_dcts = [spc_dct[name] for name in prds]\n\n # Get all of the information for the filesystem\n if not typ.var_radrad(ts_class):\n\n # Set up the saddle point keyword\n sadpt = True\n search = ts_dct.get('ts_search')\n if search is not None:\n if 'vtst' in search:\n sadpt = False\n\n # Build MESS string for TS at a saddle point\n if ts_sadpt == 'pst':\n inf_dct = pst_data(\n ts_dct, reac_dcts,\n chn_pf_levels,\n run_prefix, save_prefix)\n writer = 'pst_block'\n elif ts_sadpt == 'rpvtst':\n inf_dct = rpvtst_data(\n ts_dct, reac_dcts,\n chn_pf_models, chn_pf_levels,\n ref_pf_models, ref_pf_levels,\n run_prefix, save_prefix, sadpt=sadpt)\n writer = 'rpvtst_block'\n else:\n inf_dct, chn_basis_ene_dct = mol_data(\n tsname, spc_dct,\n chn_pf_models, chn_pf_levels,\n ref_pf_models, ref_pf_levels, chn_basis_ene_dct,\n run_prefix, save_prefix, saddle=True)\n writer = 'species_block'\n else:\n\n # Build MESS string for TS with no saddle point\n if ts_nobarrier == 'pst':\n if len(rcts) == 2:\n inf_dct = pst_data(\n ts_dct, reac_dcts,\n chn_pf_levels,\n run_prefix, save_prefix)\n else:\n inf_dct = pst_data(\n ts_dct, prod_dcts,\n chn_pf_levels,\n run_prefix, save_prefix)\n writer = 'pst_block'\n elif ts_nobarrier == 'rpvtst':\n inf_dct = rpvtst_data(\n ts_dct, reac_dcts,\n chn_pf_models, chn_pf_levels,\n ref_pf_models, ref_pf_levels,\n run_prefix, save_prefix, sadpt=False)\n writer = 'rpvtst_block'\n elif ts_nobarrier == 'vrctst':\n inf_dct = flux_data(\n ts_dct,\n chn_pf_models, chn_pf_levels,\n ref_pf_models, ref_pf_levels)\n writer = 'vrctst_block'\n\n # Add writer to inf dct\n inf_dct['writer'] = writer\n\n return inf_dct, chn_basis_ene_dct\n\n\n# Data Readers\ndef atm_data(spc_dct, spc_name,\n chn_pf_models, chn_pf_levels, ref_pf_models, ref_pf_levels,\n run_prefix, save_prefix):\n \"\"\" Pull all neccessary info for the atom\n \"\"\"\n\n spc_dct_i = spc_dct[spc_name]\n # Set up all the filesystem objects using models and levels\n pf_filesystems = filesys.models.pf_filesys(\n spc_dct_i, chn_pf_levels, run_prefix, save_prefix, False)\n\n print('\\nObtaining the geometry...')\n geom = rot.read_geom(pf_filesystems)\n\n print('\\nObtaining the electronic energy...')\n ene_chnlvl = ene.read_energy(\n spc_dct_i, pf_filesystems, chn_pf_models, chn_pf_levels,\n run_prefix, read_ene=True, read_zpe=False)\n\n ene_reflvl = None\n _, _ = ref_pf_models, ref_pf_levels\n zpe_chnlvl = None\n\n ref_scheme = chn_pf_models['ref_scheme']\n ref_enes = chn_pf_models['ref_enes']\n \n # Determine info about the basis species used in thermochem calcs\n basis_dct, uniref_dct = basis.prepare_refs(\n ref_scheme, spc_dct, [[spc_name, None]])\n \n # Get the basis info for the spc of interest\n spc_basis, coeff_basis = basis_dct[spc_name]\n\n # Get the energies for the spc and its basis\n ene_spc, ene_basis = basis.basis_energy(\n spc_name, spc_basis, uniref_dct, spc_dct,\n chn_pf_levels, chn_pf_models,\n run_prefix, save_prefix)\n\n print('ene from thmroutines: ', ene_spc)\n # Calculate and store the 0 K Enthalpy\n hf0k = heatform.calc_hform_0k(\n ene_spc, ene_basis, spc_basis, coeff_basis, ref_set=ref_enes)\n\n ts_ref_scheme = 'cbh0'\n if 'basic' in ref_scheme:\n ts_ref_scheme = ref_scheme\n elif '_' in ref_scheme:\n ts_ref_scheme = 'cbh' + ref_scheme.split('_')[1]\n print('ts_ref_scheme test:', ts_ref_scheme)\n if ref_scheme != ts_ref_scheme:\n basis_dct_trs, uniref_dct_trs = basis.prepare_refs(ts_ref_scheme, \n spc_dct, [[spc_name, None]])\n spc_basis_trs, coeff_basis_trs = basis_dct_trs[spc_name]\n ene_spc_trs, ene_basis_trs = basis.basis_energy(\n spc_name, spc_basis_trs, uniref_dct_trs, spc_dct,\n chn_pf_levels, chn_pf_models,\n run_prefix, save_prefix)\n hf0K_trs = heatform.calc_hform_0k(\n ene_spc_trs, ene_basis_trs, spc_basis_trs, coeff_basis_trs, ref_set=ref_enes)\n else:\n hf0K_trs = hf0k\n\n print('ABS Energy: ', ene_chnlvl)\n print('Hf0K Energy: ', hf0k * phycon.KCAL2KJ)\n ene_chnlvl = hf0k * phycon.KCAL2EH\n hf0K_trs *= phycon.KCAL2EH\n\n # Create info dictionary\n inf_dct = {\n 'geom': geom,\n 'sym_factor': 1.0,\n 'freqs': [],\n 'mess_hr_str': '',\n 'mass': util.atom_mass(spc_dct_i),\n 'elec_levels': spc_dct_i['elec_levels'],\n 'ene_chnlvl': ene_chnlvl,\n 'ene_reflvl': ene_reflvl,\n 'ene_tsref': hf0K_trs,\n 'zpe_chnlvl': zpe_chnlvl\n }\n\n return inf_dct\n\n\ndef mol_data(spc_name, spc_dct,\n chn_pf_models, chn_pf_levels, ref_pf_models, ref_pf_levels, chn_basis_ene_dct,\n run_prefix, save_prefix, calc_chn_ene=True, saddle=False):\n \"\"\" Pull all of the neccessary information from the filesystem for a species\n \"\"\"\n \n spc_dct_i = spc_dct[spc_name]\n ene_chnlvl = None\n ene_reflvl = None\n zpe = None\n hf0K_trs = None\n\n # Initialize all of the elements of the inf dct\n geom, sym_factor, freqs, imag, elec_levels = None, None, None, None, None\n allr_str, mdhr_dat = '', ''\n xmat, rovib_coups, rot_dists = None, None, None\n\n # Set up all the filesystem objects using models and levels\n pf_filesystems = filesys.models.pf_filesys(\n spc_dct_i, chn_pf_levels, run_prefix, save_prefix, saddle)\n\n # Set information for transition states\n [cnf_fs, _, min_cnf_locs, _, _] = pf_filesystems['harm']\n # cnf_path = cnf_fs[-1].path(min_cnf_locs)\n frm_bnd_keys, brk_bnd_keys = util.get_bnd_keys(\n cnf_fs, min_cnf_locs, saddle)\n print('bnd keys in mol_data', frm_bnd_keys, brk_bnd_keys)\n rxn_class = util.set_rxn_class(spc_dct_i, saddle)\n\n # Obtain rotor information used to determine new information\n print('\\nPreparing internal rotor info building partition functions...')\n rotors = tors.build_rotors(\n spc_dct_i, pf_filesystems, chn_pf_models, chn_pf_levels,\n rxn_class=rxn_class,\n frm_bnd_keys=frm_bnd_keys, brk_bnd_keys=brk_bnd_keys)\n print('Frm and brk key in model build', frm_bnd_keys, brk_bnd_keys)\n if typ.nonrigid_tors(chn_pf_models, rotors):\n run_path = filesys.models.make_run_path(pf_filesystems, 'tors')\n tors_strs = tors.make_hr_strings(\n rotors, run_path, chn_pf_models['tors'],\n )\n [allr_str, hr_str, _, prot_str, mdhr_dat] = tors_strs\n\n # Obtain rotation partition function information\n print('\\nObtaining info for rotation partition function...')\n geom = rot.read_geom(pf_filesystems)\n\n if typ.nonrigid_rotations(chn_pf_models):\n rovib_coups, rot_dists = rot.read_rotational_values(pf_filesystems)\n\n # Obtain vibration partition function information\n print('\\nObtaining the vibrational frequencies and zpves...')\n if typ.nonrigid_tors(chn_pf_models, rotors):\n # Calculate initial proj. freqs, unproj. imag, tors zpe and scale fact\n freqs, imag, tors_zpe, pot_scalef = vib.tors_projected_freqs_zpe(\n pf_filesystems, hr_str, prot_str, run_prefix, saddle=saddle)\n # Make final hindered rotor strings and get corrected tors zpe\n if typ.scale_1d(chn_pf_models):\n tors_strs = tors.make_hr_strings(\n rotors, run_path, chn_pf_models['tors'],\n scale_factor=pot_scalef)\n [allr_str, hr_str, _, prot_str, mdhr_dat] = tors_strs\n _, _, tors_zpe, _ = vib.tors_projected_freqs_zpe(\n pf_filesystems, hr_str, prot_str, run_prefix, saddle=saddle)\n # Calculate current zpe assuming no freq scaling: tors+projfreq\n zpe = tors_zpe + (sum(freqs) / 2.0) * phycon.WAVEN2EH\n\n # For mdhrv model no freqs needed in MESS input, zero out freqs lst\n if 'mdhrv' in chn_pf_models['tors']:\n freqs = ()\n else:\n freqs, imag, zpe = vib.read_harmonic_freqs(\n pf_filesystems, saddle=saddle)\n tors_zpe = 0.0\n\n # Scale the frequencies\n if freqs:\n freqs, zpe = vib.scale_frequencies(\n freqs, tors_zpe, chn_pf_levels, scale_method='3c')\n\n # print('zpe in mol_data test:', zpe)\n if typ.anharm_vib(chn_pf_models):\n xmat = vib.read_anharmon_matrix(pf_filesystems)\n\n # Obtain symmetry factor\n print('\\nDetermining the symmetry factor...')\n sym_factor = sym.symmetry_factor(\n pf_filesystems, chn_pf_models, spc_dct_i, rotors,\n frm_bnd_keys=frm_bnd_keys, brk_bnd_keys=brk_bnd_keys)\n\n # Obtain electronic energy levels\n elec_levels = spc_dct_i['elec_levels']\n\n # Obtain energy levels\n print('\\nObtaining the electronic energy + zpve...')\n if calc_chn_ene:\n chn_ene = ene.read_energy(\n spc_dct_i, pf_filesystems, chn_pf_models, chn_pf_levels,\n run_prefix, read_ene=True, read_zpe=False, saddle=saddle)\n print('zpe in models build ', zpe)\n print('elec ene in models build ', chn_ene)\n ene_chnlvl = chn_ene + zpe\n print('ene_chnlvl: ', ene_chnlvl)\n\n ref_scheme = chn_pf_models['ref_scheme']\n ref_enes = chn_pf_models['ref_enes']\n \n # Determine info about the basis species used in thermochem calcs\n basis_dct, uniref_dct = basis.prepare_refs(\n ref_scheme, spc_dct, [[spc_name, None]], ts_geom=(geom, brk_bnd_keys, frm_bnd_keys))\n\n print('basis_dct test in mol_data:', basis_dct)\n\n # Get the basis info for the spc of interest\n spc_basis, coeff_basis = basis_dct[spc_name]\n\n print('spc_basis test in mol_data:', spc_basis)\n ene_spc = ene_chnlvl\n ene_basis = []\n energy_missing = False\n for spc_basis_i in spc_basis:\n if not isinstance(spc_basis_i, str):\n basreacs, basprods = spc_basis_i\n spc_basis_i = ''\n for entry in basreacs:\n spc_basis_i += entry\n for entry in basprods:\n spc_basis_i += entry\n if spc_basis_i in chn_basis_ene_dct:\n print('Energy already found for basis species: ', spc_basis_i)\n ene_basis.append(chn_basis_ene_dct[spc_basis_i])\n else:\n print('Energy will be determined for basis species: ', spc_basis_i)\n energy_missing = True\n\n # Get the energies for the spc and its basis\n if energy_missing:\n _, ene_basis = basis.basis_energy(\n spc_name, spc_basis, uniref_dct, spc_dct,\n chn_pf_levels, chn_pf_models,\n run_prefix, save_prefix)\n for spc_basis_i, ene_basis_i in zip(spc_basis, ene_basis):\n if not isinstance(spc_basis_i, str):\n basreacs, basprods = spc_basis_i\n spc_basis_i = ''\n for entry in basreacs:\n spc_basis_i += entry\n for entry in basprods:\n spc_basis_i += entry\n chn_basis_ene_dct[spc_basis_i] = ene_basis_i\n print('ene from thmroutines: ', ene_spc)\n\n # Calculate and store the 0 K Enthalpy\n hf0k = heatform.calc_hform_0k(\n ene_spc, ene_basis, spc_basis, coeff_basis, ref_set=ref_enes)\n\n #if rxn_class in basis.IMPLEMENTED_CBH_TS_CLASSES:\n # ts_ref_scheme = 'cbh0'\n #else:\n # ts_ref_scheme = None\n if 'basic' in ref_scheme:\n ts_ref_scheme = 'basic'\n else:\n ts_ref_scheme = 'cbh0'\n if '_' in ref_scheme:\n ts_ref_scheme = 'cbh' + ref_scheme.split('_')[1]\n print('ts_ref_scheme test:', ts_ref_scheme)\n if not saddle:\n if ref_scheme != ts_ref_scheme:\n basis_dct_trs, uniref_dct_trs = basis.prepare_refs(ts_ref_scheme, \n spc_dct, [[spc_name, None]], ts_geom=(geom, brk_bnd_keys, frm_bnd_keys))\n spc_basis_trs, coeff_basis_trs = basis_dct_trs[spc_name]\n ene_basis_trs = []\n energy_missing = False\n for spc_basis_i in spc_basis_trs:\n if spc_basis_i in chn_basis_ene_dct:\n print('Energy already found for basis species: ', spc_basis_i)\n ene_basis_trs.append(chn_basis_ene_dct[spc_basis_i])\n else:\n print('Energy will be determined for basis species: ', spc_basis_i)\n energy_missing = True\n if energy_missing:\n _, ene_basis_trs = basis.basis_energy(\n spc_name, spc_basis_trs, uniref_dct_trs, spc_dct,\n chn_pf_levels, chn_pf_models,\n run_prefix, save_prefix)\n for spc_basis_i, ene_basis_i in zip(spc_basis_trs, ene_basis_trs):\n chn_basis_ene_dct[spc_basis_i] = ene_basis_i\n ene_spc_trs = ene_chnlvl \n hf0K_trs = heatform.calc_hform_0k(\n ene_spc_trs, ene_basis_trs, spc_basis_trs, coeff_basis_trs, ref_set=ref_enes)\n else:\n hf0K_trs = hf0k\n else:\n hf0K_trs = 0.0\n\n print('ABS Energy: ', ene_chnlvl)\n print('Hf0K Energy: ', hf0k * phycon.KCAL2KJ)\n ene_chnlvl = hf0k * phycon.KCAL2EH\n hf0K_trs *= phycon.KCAL2EH\n\n ene_reflvl = None\n _, _ = ref_pf_models, ref_pf_levels\n # if chn_model == ref_model:\n # ene_reflvl = ene_chnlvl\n # else:\n # ene_reflvl = get_fs_ene_zpe(spc_dct, prod,\n # thy_dct, model_dct, model,\n # save_prefix, saddle=False,\n # read_ene=True, read_zpe=True)\n\n # Build the energy transfer section strings\n if not saddle:\n print('\\n Determining energy transfer parameters...')\n well_info = filesys.inf.get_spc_info(spc_dct_i)\n print('well_inf', well_info)\n #bath_info = ['InChI=1S/N2/c1-2', 0, 1] # how to do...\n bath_info = ['InChI=1S/Ar', 0, 1] # how to do...\n etrans_dct = etrans.build_etrans_dct(spc_dct_i)\n\n edown_str, collid_freq_str = etrans.make_energy_transfer_strs(\n well_info, bath_info, etrans_dct)\n else:\n edown_str, collid_freq_str = None, None\n\n # Create info dictionary\n keys = ['geom', 'sym_factor', 'freqs', 'imag', 'elec_levels',\n 'mess_hr_str', 'mdhr_dat',\n 'xmat', 'rovib_coups', 'rot_dists',\n 'ene_chnlvl', 'ene_reflvl', 'zpe_chnlvl', 'ene_tsref',\n 'edown_str', 'collid_freq_str']\n vals = [geom, sym_factor, freqs, imag, elec_levels,\n allr_str, mdhr_dat,\n xmat, rovib_coups, rot_dists,\n ene_chnlvl, ene_reflvl, zpe, hf0K_trs,\n edown_str, collid_freq_str]\n inf_dct = dict(zip(keys, vals))\n\n return inf_dct, chn_basis_ene_dct\n\n\n# VRCTST\ndef flux_data(ts_dct,\n chn_pf_models, chn_pf_levels,\n ref_pf_models, ref_pf_levels):\n \"\"\" Grab the flux file from the filesystem\n \"\"\"\n\n # Fake setting for plugin\n _, _, _ = chn_pf_models, ref_pf_models, ref_pf_levels\n\n # Read the flux file from the filesystem\n _, ts_save_path, _, _ = filesys.models.set_rpath_filesys(\n ts_dct, chn_pf_levels['rpath'][1])\n\n flux_str = flux.read_flux(ts_save_path)\n\n # Create info dictionary\n inf_dct = {'flux_str': flux_str}\n\n return inf_dct\n\n\n# VTST\ndef rpvtst_data(ts_dct, reac_dcts,\n chn_pf_models, chn_pf_levels, ref_pf_models, ref_pf_levels,\n run_prefix, save_prefix, sadpt=False):\n \"\"\" Pull all of the neccessary information from the\n filesystem for a species\n \"\"\"\n\n # Fake setting for plugin\n _, _, _ = chn_pf_models, ref_pf_models, ref_pf_levels\n\n # Set up all the filesystem objects using models and levels\n if sadpt:\n # Set up filesystems and coordinates for saddle point\n # Scan along RxnCoord is under THY/TS/CONFS/cid/Z\n pf_filesystems = filesys.models.pf_filesys(\n ts_dct, chn_pf_levels, run_prefix, save_prefix, True)\n tspaths = pf_filesystems['harm']\n [_, cnf_save_path, min_locs, _, cnf_run_fs] = tspaths\n ts_run_path = cnf_run_fs[-1].path(min_locs)\n\n # Set TS reaction coordinate\n frm_name = 'IRC'\n scn_vals = filesys.models.get_rxn_scn_coords(cnf_save_path, frm_name)\n scn_vals.sort()\n scn_ene_info = chn_pf_levels['ene'][1][0][1] # fix to be ene lvl\n scn_prefix = cnf_save_path\n else:\n # Set up filesystems and coordinates for reaction path\n # Scan along RxnCoord is under THY/TS/Z\n tspaths = filesys.models.set_rpath_filesys(\n ts_dct, chn_pf_levels['rpath'][1])\n ts_run_path, ts_save_path, _, thy_save_path = tspaths\n\n # Set TS reaction coordinate\n frm_bnd_keys, _ = util.get_bnd_keys2(ts_save_path, True)\n frm_name = util.get_rxn_coord_name(\n ts_save_path, frm_bnd_keys, sadpt=sadpt, zma_locs=(0,))\n scn_vals = filesys.models.get_rxn_scn_coords(thy_save_path, frm_name)\n scn_vals.sort()\n scn_ene_info = chn_pf_levels['rpath'][1][0]\n scn_prefix = thy_save_path\n\n # Modify the scn thy info\n print('scn thy info', scn_ene_info)\n print('scn vals', scn_vals)\n mod_scn_ene_info = filesys.inf.modify_orb_restrict(\n filesys.inf.get_spc_info(ts_dct), scn_ene_info)\n # scn thy info [[1.0, ['molpro2015', 'ccsd(t)', 'cc-pvdz', 'RR']]]\n\n # Need to read the sp vals along the scan. add to read\n ref_ene = 0.0\n enes, geoms, grads, hessians, _, _ = torsprep.read_hr_pot(\n [frm_name], [scn_vals],\n scn_prefix,\n mod_scn_ene_info, ref_ene,\n constraint_dct=None, # No extra frozen treatments\n read_geom=True,\n read_grad=True,\n read_hess=True)\n freqs = torsprep.calc_hr_frequencies(\n geoms, grads, hessians, ts_run_path)\n\n # Get the energies and zpes at R_ref\n if not sadpt:\n idx, ene_hs_sr_ref, ene_hs_mr_ref = ene.rpath_ref_idx(\n ts_dct, scn_vals, frm_name, scn_prefix,\n chn_pf_levels['ene'],\n chn_pf_levels['rpath'][1])\n fr_idx = len(scn_vals) - 1\n zpe_ref = (sum(freqs[(fr_idx,)]) / 2.0) * phycon.WAVEN2KCAL\n\n # Get the reactants and infinite seperation energy\n reac_ene = 0.0\n ene_hs_sr_inf = 0.0\n for dct in reac_dcts:\n pf_filesystems = filesys.models.pf_filesys(\n dct, chn_pf_levels, run_prefix, save_prefix, False)\n pf_levels = {\n 'ene': chn_pf_levels['ene'],\n 'harm': chn_pf_levels['harm'],\n 'tors': chn_pf_levels['tors']\n }\n reac_ene += ene.read_energy(\n dct, pf_filesystems, chn_pf_models, pf_levels,\n run_prefix, read_ene=True, read_zpe=True, saddle=sadpt)\n\n print('rpath', chn_pf_levels['rpath'][1])\n pf_levels = {\n 'ene': ['mlvl', [[1.0, chn_pf_levels['rpath'][1][2]]]],\n 'harm': chn_pf_levels['harm'],\n 'tors': chn_pf_levels['tors']\n }\n ene_hs_sr_inf += ene.read_energy(\n dct, pf_filesystems, chn_pf_models, pf_levels,\n run_prefix, read_ene=True, read_zpe=False)\n\n # Scale the scn values\n if sadpt:\n scn_vals = [val / 100.0 for val in scn_vals]\n # scn_vals = [val * phycon.BOHR2ANG for val in scn_vals]\n\n # Grab the values from the read\n inf_dct = {}\n inf_dct['rpath'] = []\n pot_info = zip(scn_vals, enes.values(), geoms.values(), freqs.values())\n for rval, pot, geo, frq in pot_info:\n\n # Scale the r-values\n\n # Get the relative energy (edit for radrad scans)\n zpe = (sum(frq) / 2.0) * phycon.WAVEN2KCAL\n if sadpt:\n zero_ene = (pot + zpe) * phycon.KCAL2EH\n else:\n print('enes')\n print('reac ene', reac_ene)\n print('hs sr', ene_hs_sr_ref)\n print('inf', ene_hs_sr_inf)\n print('hs mr', ene_hs_mr_ref)\n print('pot R', pot * phycon.KCAL2EH)\n print('zpe', zpe)\n print('zpe ref', zpe_ref)\n\n elec_ene = (\n ene_hs_sr_ref - ene_hs_sr_inf -\n ene_hs_mr_ref + pot * phycon.KCAL2EH\n )\n zpe_pt = zpe - zpe_ref\n zero_ene = reac_ene + (elec_ene + zpe_pt * phycon.KCAL2EH)\n print('elec ene', elec_ene)\n print('zero ene', zero_ene)\n\n # ENE\n # ene = (reac_ene +\n # ene_hs_sr(R_ref) - ene_hs_sr(inf) +\n # ene_ls_mr(R_ref) - ene_hs_mr(R_ref) +\n # ene_ls_mr(R) - ene_ls_mr(R_ref))\n # ene = (reac_ene +\n # ene_hs_sr(R_ref) - ene_hs_sr(inf) -\n # ene_hs_mr(R_ref) + ene_ls_mr(R))\n # inf_sep_ene = reac_ene + hs_sr_ene - hs_mr_ene\n # inf_sep_ene_p = (reac_ene +\n # hs_sr_ene(R_ref) - ene_hs_sr(inf) +\n # ls_mr_ene(R_ref) - hs_mr_ene(R_ref))\n # ene = inf_sep_ene_p + ene_ls_mr(R) - ene_ls_mr(R_ref)\n # ZPE\n # zpe = zpe(R) - zpe(inf)\n # or\n # zpe = zpe_ls_mr(R) - zpe_ls_mr(R_ref)\n\n # Set values constant across the scan\n elec_levels = ts_dct['elec_levels']\n\n # Create info dictionary and append to lst\n keys = ['rval', 'geom', 'freqs', 'elec_levels', 'ene_chnlvl']\n vals = [rval, geo, frq, elec_levels, zero_ene]\n inf_dct['rpath'].append(dict(zip(keys, vals)))\n\n # Calculate and store the imaginary mode\n if sadpt:\n _, imag, _ = vib.read_harmonic_freqs(\n pf_filesystems, saddle=True)\n ts_idx = scn_vals.index(0.00)\n else:\n imag = None\n ts_idx = 0\n inf_dct.update({'imag': imag})\n inf_dct.update({'ts_idx': ts_idx})\n\n return inf_dct\n\n\n# PST\ndef pst_data(ts_dct, reac_dcts,\n chn_pf_levels,\n run_prefix, save_prefix):\n \"\"\" Set up the data for PST parameters\n \"\"\"\n\n # Get the k(T), T, and n values to get a Cn\n kt_pst, temp_pst, n_pst = pst.set_vals_for_cn(ts_dct)\n\n print('\\nDetermining parameters for Phase Space Theory (PST)',\n 'treatment that yields k({} K) = {}'.format(temp_pst, kt_pst))\n print(' Assuming PST model potential V = C0 / R^{}'.format(n_pst))\n\n # Obtain the reduced mass of the reactants\n print('\\nReading reactant geometries to obtain reduced mass...')\n geoms = []\n for dct in reac_dcts:\n pf_filesystems = filesys.models.pf_filesys(\n dct, chn_pf_levels, run_prefix, save_prefix, False)\n geoms.append(rot.read_geom(pf_filesystems))\n mred = automol.geom.reduced_mass(geoms[0], geoms[1])\n\n cn_pst = pst.calc_cn_for_pst(kt_pst, n_pst, mred, temp_pst)\n\n # Create info dictionary\n keys = ['n_pst', 'cn_pst']\n vals = [n_pst, cn_pst]\n inf_dct = dict(zip(keys, vals))\n\n return inf_dct\n\n\n# TAU\ndef tau_data(spc_dct_i,\n chn_pf_models, chn_pf_levels,\n run_prefix, save_prefix, saddle=False):\n \"\"\" Read the filesystem to get information for TAU\n \"\"\"\n\n frm_bnd_keys = ()\n brk_bnd_keys = ()\n\n # Set up all the filesystem objects using models and levels\n pf_filesystems = filesys.models.pf_filesys(\n spc_dct_i, chn_pf_levels, run_prefix, save_prefix, saddle)\n [harm_cnf_fs, _,\n harm_min_locs, harm_save, _] = pf_filesystems['harm']\n # [tors_cnf_fs, _, tors_min_locs, _, _] = pf_filesystems['tors']\n\n # Get the conformer filesys for the reference geom and energy\n if harm_min_locs:\n geom = harm_cnf_fs[-1].file.geometry.read(harm_min_locs)\n min_ene = harm_cnf_fs[-1].file.energy.read(harm_min_locs)\n\n # Set the filesystem\n tau_save_fs = autofile.fs.tau(harm_save)\n\n # Set the ground and reference energy to set values for now\n rxn_class = None\n\n # Get the rotor info\n rotors = tors.build_rotors(\n spc_dct_i, pf_filesystems, chn_pf_models,\n chn_pf_levels,\n rxn_class=rxn_class,\n frm_bnd_keys=frm_bnd_keys, brk_bnd_keys=brk_bnd_keys)\n\n run_path = filesys.models.make_run_path(pf_filesystems, 'tors')\n tors_strs = tors.make_hr_strings(\n rotors, run_path, chn_pf_models['tors'])\n [_, hr_str, flux_str, prot_str, _] = tors_strs\n\n # Use model to determine whether to read grads and hessians\n vib_model = chn_pf_models['vib']\n freqs = ()\n _, _, proj_zpve, harm_zpve = vib.tors_projected_freqs_zpe(\n pf_filesystems, hr_str, prot_str, run_prefix, saddle=False)\n zpe_chnlvl = proj_zpve * phycon.EH2KCAL\n\n # Set reference energy to harmonic zpve\n db_style = 'directory'\n reference_energy = harm_zpve * phycon.EH2KCAL\n if vib_model == 'tau':\n if db_style == 'directory':\n tau_locs = [locs for locs in tau_save_fs[-1].existing()\n if tau_save_fs[-1].file.hessian.exists(locs)]\n elif db_style == 'jsondb':\n tau_locs = [locs for locs in tau_save_fs[-1].json_existing()\n if tau_save_fs[-1].json.hessian.exists(locs)]\n else:\n if db_style == 'directory':\n tau_locs = tau_save_fs[-1].existing()\n elif db_style == 'jsondb':\n tau_locs = tau_save_fs[-1].json_existing()\n\n # Read the geom, ene, grad, and hessian for each sample\n samp_geoms, samp_enes, samp_grads, samp_hessians = [], [], [], []\n for locs in tau_locs:\n\n # print('Reading tau info at path {}'.format(\n # tau_save_fs[-1].path(locs)))\n\n if db_style == 'directory':\n geo = tau_save_fs[-1].file.geometry.read(locs)\n elif db_style == 'jsondb':\n geo = tau_save_fs[-1].json.geometry.read(locs)\n\n geo_str = autofile.data_types.swrite.geometry(geo)\n samp_geoms.append(geo_str)\n\n if db_style == 'directory':\n tau_ene = tau_save_fs[-1].file.energy.read(locs)\n elif db_style == 'jsondb':\n tau_ene = tau_save_fs[-1].json.energy.read(locs)\n rel_ene = (tau_ene - min_ene) * phycon.EH2KCAL\n ene_str = autofile.data_types.swrite.energy(rel_ene)\n samp_enes.append(ene_str)\n\n if vib_model == 'tau':\n if db_style == 'directory':\n grad = tau_save_fs[-1].file.gradient.read(locs)\n elif db_style == 'jsondb':\n grad = tau_save_fs[-1].json.gradient.read(locs)\n grad_str = autofile.data_types.swrite.gradient(grad)\n samp_grads.append(grad_str)\n\n if db_style == 'directory':\n hess = tau_save_fs[-1].file.hessian.read(locs)\n elif db_style == 'jsondb':\n hess = tau_save_fs[-1].json.hessian.read(locs)\n hess_str = autofile.data_types.swrite.hessian(hess)\n samp_hessians.append(hess_str)\n\n # Read a geometry, grad, and hessian for a reference geom if needed\n ref_geom, ref_grad, ref_hessian = [], [], []\n if vib_model != 'tau':\n\n # Get harmonic filesystem information\n [harm_save_fs, _, harm_min_locs, _, _] = pf_filesystems['harm']\n\n # Read the geometr, gradient, and Hessian\n geo = harm_save_fs[-1].file.geometry.read(harm_min_locs)\n geo_str = autofile.data_types.swrite.geometry(geo)\n ref_geom.append(geo_str)\n\n grad = harm_save_fs[-1].file.gradient.read(harm_min_locs)\n grad_str = autofile.data_types.swrite.gradient(grad)\n ref_grad.append(grad_str)\n\n hess = harm_save_fs[-1].file.hessian.read(harm_min_locs)\n hess_str = autofile.data_types.swrite.hessian(hess)\n ref_hessian.append(hess_str)\n\n # Obtain symmetry factor\n print('\\nDetermining the symmetry factor...')\n sym_factor = sym.symmetry_factor(\n pf_filesystems, chn_pf_models, spc_dct_i, rotors,\n frm_bnd_keys=(), brk_bnd_keys=())\n\n # Create info dictionary\n keys = ['geom', 'sym_factor', 'elec_levels', 'freqs', 'flux_mode_str',\n 'samp_geoms', 'samp_enes', 'samp_grads', 'samp_hessians',\n 'ref_geom', 'ref_grad', 'ref_hessian',\n 'zpe_chnlvl', 'reference_energy']\n vals = [geom, sym_factor, spc_dct_i['elec_levels'], freqs, flux_str,\n samp_geoms, samp_enes, samp_grads, samp_hessians,\n ref_geom, ref_grad, ref_hessian,\n zpe_chnlvl, reference_energy]\n inf_dct = dict(zip(keys, vals))\n\n return inf_dct\n","sub_path":"routines/pf/models/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":32138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"227082242","text":"\r\nfrom Prims_Functions import *\r\nfrom Weighted_Graph import *\r\nfrom another_prims_function import *\r\n\r\n\r\n\r\ndef Prims(textfile, starting_vertex = 0, show_cost = False, show = False):\r\n \r\n G = Weighted_Graph(textfile) \r\n T = initialize_tree(starting_vertex)\r\n \r\n while T[0] != G.vertex_set():\r\n update_tree(G, T)\r\n \r\n if show == True:\r\n G.draw_subgraph(T)\r\n \r\n if show_cost == True:\r\n c = 0\r\n for e in T[1]:\r\n c += cost(G, e)\r\n print('Optimal tree cost:', c)\r\n \r\n return T\r\n","sub_path":"Final_Project_pkg/FP_Prims_pkg/Prims.py","file_name":"Prims.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"218464404","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport json\n\nclass Website():\n\n def __init__(self):\n self.plug = {}\n self.info = {}\n self.tags = []\n self.keys = ['Cookies', 'HTTPServer', 'Title', 'IP', \n 'Country', 'PasswordField', 'Email']\n\n def to_tags(self):\n return self.tags\n\n def to_dict(self):\n return self.info\n\n def to_td(self, var):\n if var.startswith('http://') or var.startswith('https://'):\n return '<td title=\"' + var + '\"> <a target=\"_blank\" href=\"' +var+ '\"> ' + var[:20] + '</a> </td>'\n else:\n return '<td title=\"' + var + '\">' + var[:20] + '</td>'\n\n def to_td2(self, var):\n return '<td title=\"' + var + '\">' + var + '</td>'\n\n def to_tr(self):\n tr = '<tr>'\n tr += self.to_td(self.info['target'])\n tr += self.to_td(str(self.info['http_status']))\n tr += self.to_td(self.info['Title'])\n tr += self.to_td(self.info['IP'])\n tr += self.to_td(self.info['HTTPServer'])\n tr += self.to_td(self.info['Country'])\n tr += self.to_td(self.info['PasswordField'])\n tr += self.to_td(self.info['Cookies'])\n tr += self.to_td(self.info['Email'])\n tr += self.to_td2(' '.join(self.tags))\n tr += '</tr>'\n return tr\n\n def to_sort(self):\n return self.plug\n\n def sort(self, plg):\n dict = plg['plugins']\n for k in dict.keys():\n if k not in self.keys:\n if self.plug.has_key(k):\n self.plug[k] += \" \"+plg['target']\n else:\n self.plug[k] = plg['target']\n\n def parse(self, dict):\n self.dict = dict\n self.parse_dict()\n self.parse_tags()\n\n def parse_tags(self):\n dict = self.dict['plugins']\n for k in dict.keys():\n if k not in self.keys:\n if dict[k]:\n self.tags.append(k+':'+self.plugin_value(dict[k]))\n else:\n self.tags.append(k)\n\n def parse_dict(self):\n self.info['target'] = self.dict['target']\n self.info['http_status'] = self.dict['http_status']\n dict = self.dict['plugins']\n for k in self.keys:\n self.info[k] = ''\n if dict.has_key(k):\n if dict[k]:\n self.info[k] = self.plugin_value(dict[k])\n\n def plugin_value(self, pv):\n value = ''\n if pv.has_key('string'):\n value += ''.join(pv['string'])\n elif pv.has_key('version'):\n value += ''.join(pv['version'])\n elif pv.has_key('module'):\n value += ''.join(pv['module'])\n else:\n value += str(pv)\n return value\n\n\ndef logJson():\n log = '''{\"target\":\"http://touchtt.abc.com\",\"http_status\":200,\"plugins\":{\"Cookies\":{\"string\":[\"JSESSIONID\"]},\"Country\":{\"string\":[\"CHINA\"],\"module\":[\"CN\"]},\"HTML5\":{},\"HTTPServer\":{\"string\":[\"yy\"]},\"IP\":{\"string\":[\"13.13.20.13\"]},\"Java\":{},\"Title\":{\"string\":[\"abc网\"]}}}'''\n return json.loads(log) \n\nif __name__ == '__main__':\n data = logJson()\n ws = Website()\n ws.parse(data)\n ws.sort(data)\n print(ws.to_dict())\n print(ws.to_tags())\n print(ws.to_tr())\n print(ws.to_sort())\n","sub_path":"3rd_audit/lib/website.py","file_name":"website.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"458759923","text":"import sys\nimport logging\nfrom avalon import api\nfrom avalon.vendor.Qt import QtCore\nfrom maya import cmds\n\nfrom . import PYMEL_MOCK_FLAG\n\nself = sys.modules[__name__]\nself._menu = api.Session.get(\"AVALON_LABEL\", \"Avalon\") + \"menu\"\n\nlog = logging.getLogger(__name__)\n\n\ndef _arnold_update_full_scene(*args):\n try:\n from . import arnold\n except RuntimeError:\n return\n\n arnold.utils.update_full_scene()\n\n\ndef install():\n from . import interactive\n\n def deferred():\n # Append to Avalon's menu\n cmds.menuItem(divider=True)\n\n cmds.menuItem(\"Snap!\", command=interactive.active_view_snapshot)\n\n # Rendering tools\n cmds.menuItem(\"Menu_Render\",\n label=\"Render\",\n tearOff=True,\n subMenu=True,\n parent=self._menu)\n\n cmds.menuItem(divider=True, dividerLabel=\"Arnold\")\n\n cmds.menuItem(\"ArnoldUpdateFullScene\",\n label=\"Update Full Scene\",\n parent=\"Menu_Render\",\n image=\"playbackLoopingContinuous.png\",\n command=_arnold_update_full_scene)\n\n # LookDev tools\n cmds.menuItem(\"Menu_LookDev\",\n label=\"LookDev\",\n tearOff=True,\n subMenu=True,\n parent=self._menu)\n\n# cmds.menuItem(\"V-Ray Attributes\", command=\"\"\"\n# import reveries.maya.tools\n# reveries.maya.tools.show('vray_attrs_setter')\n# \"\"\")\n cmds.menuItem(\"Look Assigner\", parent=\"Menu_LookDev\", command=\"\"\"\nimport reveries.maya.tools\nreveries.maya.tools.show('mayalookassigner')\n\"\"\")\n\n cmds.menuItem(\"Set AvalonUUID\", parent=\"Menu_LookDev\",\n command=interactive.apply_avalon_uuid)\n\n cmds.menuItem(\"Swap Modle\", parent=\"Menu_LookDev\",\n command=interactive.swap_to_published_model)\n\n # XGen tools\n cmds.menuItem(\"Menu_XGen\",\n label=\"XGen\",\n tearOff=True,\n subMenu=True,\n parent=self._menu)\n\n cmds.menuItem(divider=True, dividerLabel=\"XGen Legacy\")\n\n cmds.menuItem(\"Bake All Descriptions\",\n parent=\"Menu_XGen\",\n command=interactive.bake_all_xgen_legacy_descriptions)\n cmds.menuItem(\"Bake All Modifiers\",\n parent=\"Menu_XGen\",\n command=interactive.bake_all_xgen_legacy_modifiers)\n cmds.menuItem(\"Copy Mesh To World\",\n parent=\"Menu_XGen\",\n command=interactive.copy_mesh_to_world)\n cmds.menuItem(\"Link Hair System\",\n parent=\"Menu_XGen\",\n command=interactive.link_palettes_to_hair_system)\n cmds.menuItem(\"Set RefWires Frame By Nucleus\",\n parent=\"Menu_XGen\",\n command=interactive.set_refwires_frame_by_nucleus)\n\n cmds.menuItem(divider=True, dividerLabel=\"XGen Interactive Groom\")\n\n # System\n cmds.menuItem(\"Load PyMel\", parent=\"System\", command=\"\"\"\nimport sys, os\nMOCK_FLAG = {!r}\nif os.path.isfile(MOCK_FLAG):\n os.remove(MOCK_FLAG)\nif \"pymel.core\" in sys.modules:\n del sys.modules[\"pymel.core\"]\nimport pymel.core\n\"\"\".format(PYMEL_MOCK_FLAG))\n\n cmds.menuItem(\"Mock PyMel\", parent=\"System\", command=\"\"\"\nwith open({!r}, \"w\") as flag:\n flag.write(\"\")\n\"\"\".format(PYMEL_MOCK_FLAG))\n\n # Allow time for uninstallation to finish.\n QtCore.QTimer.singleShot(200, deferred)\n\n\ndef uninstall():\n pass\n","sub_path":"reveries/maya/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"298304784","text":"\"\"\"\nRead daily baomu captured files and summary\n\"\"\"\nimport os\nimport re\nimport sys\nimport json\nfrom helpers.utils import list_files\nfrom helpers.constants import FIRST_TIER_CITIES_PY, SECOND_TIER_CITIES_PY\n\nuser_dict = {}\nfpath, pattern = sys.argv[1:]\n\ndef load_files():\n \"\"\"Read daily baomu files to memory\"\"\"\n global user_dict\n for filename in list_files(fpath, pattern):\n print(filename)\n with open(filename, 'r', encoding='utf8') as a_file:\n # 跳过文��头\n next(a_file)\n for line in a_file:\n bm_type, city, sid, name = line.strip().split(',')[:4]\n capture_dt = re.search(r\"(\\d{4}-\\d{2})-\\d{2}\", filename).group(0)\n user_dict[sid] = [bm_type, city, sid, name, capture_dt]\n\ndef get_staff():\n \"\"\"Get Staff Infos by city/service.\n Get city coverage infos\n \"\"\"\n staff_by_city = [set(), set(), set()]\n staff_by_service = [set(), set(), set()]\n city_coverage = [set(), set(), set()]\n for fields in user_dict.values():\n bm_type, city, sid = fields[:3]\n if city in FIRST_TIER_CITIES_PY:\n staff_by_city[0].add(sid)\n city_coverage[0].add(city)\n elif city in SECOND_TIER_CITIES_PY:\n staff_by_city[1].add(sid)\n city_coverage[1].add(city)\n else:\n staff_by_city[2].add(sid)\n city_coverage[2].add(city)\n if bm_type == 'livein':\n staff_by_service[0].add(sid)\n elif bm_type == 'outlive':\n staff_by_service[1].add(sid)\n elif bm_type == 'hourly':\n staff_by_service[2].add(sid)\n\n # 打印结果\n print('***Staff By City***')\n for tier in range(3):\n print('%s 线城市: %s' % (tier + 1, len(staff_by_city[tier])))\n print('***Staff By Service***')\n for tier in range(3):\n print('%s 线城市: %s' % (tier + 1, len(staff_by_service[tier])))\n print('***City Coverage***')\n for tier in range(3):\n print('%s 线城市: %s' % (tier + 1, len(city_coverage[tier])))\n\ndef get_new():\n \"\"\"Get new baomu of each month\"\"\"\n if os.path.exists('../data/58daojia.baomu.txt'):\n with open('../data/58daojia.baomu.txt', 'r+', encoding='utf8') as a_file:\n old_dict = {}\n for line in a_file:\n fields = line.strip().split(',')\n old_dict[fields[2]] = fields\n new_users = user_dict.keys() - old_dict.keys()\n print('New Users: ' + str(len(new_users)))\n # 合并新阿姨\n for new in new_users:\n a_file.write(','.join(user_dict[new]) + '\\n')\n else:\n with open('../data/58daojia.baomu.txt', 'w', encoding='utf8') as b_file:\n for user in user_dict.values():\n b_file.write(','.join(user) + '\\n')\n\ndef main():\n load_files()\n get_staff()\n get_new()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"analyzer/58daojia_baomu.py","file_name":"58daojia_baomu.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"6131045","text":"import json\nimport os\nfrom subprocess import Popen, call\nimport sys\nfrom time import sleep\nfrom mininet.net import Mininet\nfrom mininet.node import Controller, RemoteController, OVSController\nfrom mininet.node import CPULimitedHost, Host, Node\nfrom mininet.node import OVSKernelSwitch, UserSwitch, OVSSwitch\nfrom mininet.node import IVSSwitch\nfrom mininet.log import setLogLevel, info\nfrom mininet.link import TCLink\nfrom mininet.cli import CLI\n\n# Ryu script\nRYU_SCRIPT = '/home/annie/ryu/ryu/app/simple_switch_14.py' \n\n# A client's qPort\nclientAFQPort = '14450'\n\n# A client's AF receive endpoint\nclientAFRxEndpoint = 'tcp://127.0.0.1:12345'\n\n# A client's AF transmit endpoint\nclientAFTxEndpoint = 'tcp://127.0.0.1:12346'\n\n##########\n# Debug\n##########\ndebugSabot = False\ndebugSabotExtra = ''\ndebugSabotFile = 'SABOT'\n\ndebugDispatcher = False\ndebugDispatcherExtra = '--leak-check=full'\ndebugDispatcherFile = 'ED'\n\ndebugArmishFireplace = False\ndebugArmishFireplaceExtra = '-v'#'--leak-check=full'\ndebugArmishFireplaceFile = 'AF'\n\ndef get_client_af_string(endpoint, rxDispatcherEndpoint, txDispatcherEndpoint):\n '''\n Construct the string list to call armish-fireplace for a client.\n '''\n basesFile = os.path.join(os.path.dirname(os.path.realpath(__file__)),\n \"bases.bin\")\n puParams = '--e {}:{} --rd {} --td {} --b {}'.format(endpoint,\n clientAFQPort,\n rxDispatcherEndpoint,\n txDispatcherEndpoint,\n basesFile)\n af = ['armish-fireplace',\n '-o', clientAFRxEndpoint,\n '-i', clientAFTxEndpoint,\n '-m', 'brazil',\n '-t', 'bobwire_circuit',\n '-u', puParams]\n \n if debugArmishFireplace:\n af.insert(0, '--log-file={}-{} {}'.format(debugArmishFireplaceFile,\n endpoint,\n debugArmishFireplaceExtra))\n af.insert(0, 'valgrind')\n return af\n\ndef get_switch_af_string(endpoint, AFrx, AFtx, txDispatcherEndpoint):\n '''\n Construct the string list to call armish-fireplace for a qswitch.\n '''\n puParams = '--e {} --p 3 --td {}'.format(endpoint, txDispatcherEndpoint)\n af = ['armish-fireplace',\n '-o', 'ipc:///tmp/qs2',\n '-i', 'ipc:///tmp/qs1',\n '-m', 'trabea',\n '-t', 'circulator_switch',\n '-u', puParams]\n \n if debugArmishFireplace:\n af.insert(0, '--log-file={}-{} {}'.format(debugArmishFireplaceFile,\n endpoint,\n debugArmishFireplaceExtra))\n af.insert(0, 'valgrind')\n return af\n\ndef launch_qsim_backend(node=None, **args):\n '''\n Launch qsim backend on a mininet node or in the current namespace.\n \n Returns a list of instances from popen.\n '''\n sabot = ['sabot',\n '-e', str(args['sabotEndpoint']),\n '-t', str(args['sabotThreadCount'])]\n if debugSabot:\n #sabot.insert(0, '--log-file={} {}'.format(debugSabotFile,\n # debugSabotExtra))\n sabot.insert(0, 'valgrind')\n \n eldispacho = ['eldispacho',\n '--rs', str(args['dispatcherRxEndpoint']),\n '--ts', str(args['dispatcherTxEndpoint']),\n '--s', str(args['dispatcherSabotEndpoint']),\n '--st', str(args['dispatcherThreadCount']),\n '-t', str(args['dispatcherTopology'])]\n if 'dispatcherLoggerEndpoint' in args:\n eldispacho.extend(['-l', str(args['dispatcherLoggerEndpoint'])])\n if debugDispatcher:\n eldispacho.insert(0, '--log-file={} {}'.format(debugDispatcherFile,\n debugDispatcherExtra))\n \n instanceList = list()\n launchF = Popen if node is None else none.popen\n instanceList.append(launchF(sabot))\n instanceList.append(launchF(eldispacho))\n \n return instanceList\n\ndef generate_topology(net, topoJson, controller):\n '''\n Generate a topology inside net with controller.\n '''\n def ip_2_str(ip):\n '''\n Convert a list of 4 octets to dot-decimal notation.\n '''\n assert(len(ip) == 4)\n return '.'.join([str(x) for x in ip])\n \n def ipstr2num(s):\n '''\n Convert dot notation IP string to numeric.\n '''\n return sum([(int(x) << 8*(3-idx)) for idx,x in enumerate(s.split('.'))])\n \n def inc_ip(lastIP):\n lastIP[3] = lastIP[3] + 1\n return lastIP\n \n # The IP range given in the net object\n baseIP = [int(x) for x in net.ipBase.split('/', 1)[0].split('.')]\n # An incrementer used when adding hosts to the net object\n lastIP = baseIP\n # List of switch objects for classical switches\n cSwitches = []\n # List of node objects for quantum switches\n qSwitches = []\n # List of node objects for hosts\n hosts = []\n # List of links between node objects in the classical plane\n cLinks = []\n # List of links between node objects in the quantum plane\n qLinks = []\n \n for switchObject in topoJson['switches']:\n # Add classical part of switch\n cSwitch = net.addSwitch(switchObject['name'], cls=OVSSwitch)\n cSwitches.append(cSwitch)\n \n if 'isQuantum' in switchObject and switchObject['isQuantum'] is False:\n # Only a classical switch\n info('*** Switch: ' + switchObject['name'])\n continue\n \n # Generate IP of qswitch node\n lastIP = inc_ip(lastIP)\n qSwitchIP = ip_2_str(lastIP)\n \n # Add quantum part of switch\n qSwitch = net.addHost('q'+switchObject['name'], cls=Host, ip=qSwitchIP)\n qSwitches.append(qSwitch)\n \n # Add link between classical part of switch and quantum part of switch\n net.addLink(cSwitch, qSwitch)\n \n info('*** Switch: {}; q{} at {}\\n'.format(switchObject['name'],\n switchObject['name'],\n qSwitchIP))\n for hostObject in topoJson['hosts']:\n # Generate IP of the host\n lastIP = inc_ip(lastIP)\n hostIP = ip_2_str(lastIP)\n \n # Add host\n host = net.addHost(hostObject['name'], cls=Host, ip=hostIP)\n hosts.append(host)\n \n info('*** Host: {} at {}\\n'.format(hostObject['name'], hostIP))\n \n for link in topoJson['connections']:\n # Add link between two nodes\n endpointA = net.get(link['endpointA'])\n endpointB = net.get(link['endpointB'])\n net.addLink(endpointA, endpointB)\n cLinks.append([endpointA, endpointB])\n \n info('*** Link: {} <-> {}'.format(endpointA.name, endpointB.name))\n \n quantumEA = link['endpointA']\n quantumEB = link['endpointB']\n if any(d['name'] == link['endpointA'] for d in topoJson['switches']):\n quantumEA = 'q' + quantumEA\n if any(d['name'] == link['endpointB'] for d in topoJson['switches']):\n quantumEB = 'q' + quantumEB\n if quantumEA is not link['endpointA'] or quantumEB is not link['endpointB']:\n qLinks.append([net.get(quantumEA), net.get(quantumEB)])\n info('; {} <~> {}\\n'.format(quantumEA, quantumEB))\n \n # Create mininet topology\n net.build()\n # Start the controller\n controller.start()\n \n for switch in cSwitches:\n # Connect the switch to our controller\n switch.start([controller,])\n # Set our switch to use OF 1.4\n switch.cmd('ovs-vsctl set Bridge ' + switch.name + ' protocols=OpenFlow14')\n # Take the edge off\n sleep(1)\n \n simIP = ''\n dispatcherTxEndpoint = 'ipc:///tmp/dispatcher_tx'\n dispatcherRxEndpoint = 'ipc:///tmp/dispatcher_rx'\n dispatcherTopologyJson = {\n 'nodes': [\n {\n 'model' : 'client',\n 'id' : ipstr2num(host.IP())\n } for host in hosts\n ],\n 'connections': [\n {\n 'endpoints' : [\n ipstr2num(link[0].IP()), ipstr2num(link[1].IP())\n ]\n } for link in qLinks\n ]\n }\n for item in qSwitches:\n jItem = next(a for a in topoJson['switches'] if a['name'] == item.name[1:])\n newSwitchItem = {}\n newSwitchItem['model'] = str(jItem['model'])\n newSwitchItem['id'] = ipstr2num(item.IP())\n if 'ports' in jItem:\n newSwitchItem['portCount'] = jItem['ports']\n if newSwitchItem['portCount'] < len(jItem['connections']):\n raise ValueError('port value smaller than connection count for switch ' + item.name[1:])\n else:\n newSwitchItem['portCount'] = len(jItem['connections'])\n newSwitchItem['ports'] = [ipstr2num(net.get(host).IP()) for host in jItem['connections']]\n \n dispatcherTopologyJson['nodes'].append(newSwitchItem)\n \n dispatcherTopology = json.dumps(dispatcherTopologyJson)\n quantumBackend = launch_qsim_backend(None,\n sabotEndpoint = 'ipc:///tmp/sabot',\n sabotThreadCount = 1,\n dispatcherSabotEndpoint = 'ipc:///tmp/sabot',\n #dispatcherLoggerEndpoint = 'ipc:///tmp/eldispacho_dspy', # This enables logging using dspy (Greatly increases CPU usage)\n dispatcherTxEndpoint = dispatcherTxEndpoint,\n dispatcherRxEndpoint = dispatcherRxEndpoint,\n dispatcherThreadCount = 1,\n dispatcherTopology=dispatcherTopology)\n sleep(.5)\n switchAFList = []\n switchAFList.append(\n qSwitches[0].popen(\n get_switch_af_string(qSwitches[0].IP(),\n 'tcp://{}:12345'.format(qSwitches[0].IP()),\n 'tcp://{}:12346'.format(qSwitches[0].IP()),\n dispatcherTxEndpoint)))\n clientAFList = []\n for client in hosts:\n clientAFList.append(\n client.popen(\n get_client_af_string(client.IP(),\n dispatcherRxEndpoint,\n dispatcherTxEndpoint)))\n sleep(.5)\n return [net, quantumBackend, switchAFList, clientAFList]\n\nsetLogLevel('info')\ncontrollerP = Popen(['exec ryu-manager --ofp-tcp-listen-port 6633 '+RYU_SCRIPT+' > CONTROLLER_LOG 2>&1'], shell=True)\nsleep(1)\ncontrollerab = RemoteController(name='c0',\n controller=Controller,\n protocol='tcp',\n port=6633)\n\nnet = Mininet(topo=None,\n build=False,\n ipBase='10.0.2.0/24',\n link=TCLink)\n\ntopoFile = open('3host.json')\ntopoJson = json.loads(topoFile.read())\ntopoFile.close()\n\ntopoOutput = generate_topology(net, topoJson, controllerab)\nnet = topoOutput[0]\n\n#\n#\n# Install quantum flows on switch.\n# \n# This prevents us from having to hardcode this stuff in the controller.\n#\n#\n#\n\n# dl_type=2048\t\t= ipv4\n# nw_proto=6\t\t= tcp\n# tp_dst=14450\t\t= tcp dest. port 14450\n# dl_type=2048,nw_proto=6 = tcp\n#proto_ver = 0,\n#options = 2 ???\nqsFlowBase = 'ovs-ofctl add-flow s0 -O OpenFlow14 \"priority=11,tcp,tcp_{}=14450,tcp_flags=+psh,in_port={},dl_dst={},action=qscon:(0;2;{};{};ipc:///tmp/qs1),output:{}\"'\n\n# ({'src', 'dst'}, cin_port, dest_mac, qin_port, qout_port, cout_port)\nnet.get('alice').cmd(qsFlowBase.format('src', 2, net.get('bob').MAC(), 0, 1, 3))\nnet.get('alice').cmd(qsFlowBase.format('dst', 2, net.get('bob').MAC(), 0, 1, 3))\nnet.get('alice').cmd(qsFlowBase.format('src', 3, net.get('alice').MAC(), 1, 0, 2))\nnet.get('alice').cmd(qsFlowBase.format('dst', 3, net.get('alice').MAC(), 1, 0, 2))\n\n# Fill out rest of MAC table in controller\nnet.get('alice').cmd('ping ' + net.get('bob').IP() + ' -c 3')\nnet.get('bob').cmd('ping ' + net.get('charlie').IP() + ' -c 3')\n\nCLI(net)\n\n#####\ninfo('*** Stopping client quantum software\\n')\nfor af in topoOutput[3]:\n while af.returncode is None:\n af.terminate()\n af.poll()\n sleep(0.05)\n\n#####\ninfo('*** Stopping quantum simulator backend\\n')\n# Currently, the dispatcher doesn't respond to SIGINT, so we use KILL here.\n# Eventually, we want to change to terminate\nfor i in topoOutput[1]:\n i.kill()\n\n#####\ninfo('*** Stopping quantum switch software\\n')\nfor af in topoOutput[2]:\n while af.returncode is None:\n af.terminate()\n af.poll()\n sleep(0.05)\n\n#####\ninfo('*** Stopping controller\\n')\nwhile controllerP.returncode is None:\n controllerP.kill()\n controllerP.poll()\n sleep(0.05)\n","sub_path":"examples/bobwire/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"201686458","text":"from django.contrib.auth.models import User\nfrom django.template import TemplateSyntaxError, engines\n\nimport cio\nfrom cio.backends import cache\nfrom cio.pipeline import pipeline\nfrom djedi.templatetags.template import register\nfrom djedi.tests.base import AssertionMixin, DjediTest\n\n\nclass TagTest(DjediTest, AssertionMixin):\n def render(self, source, context=None):\n source = \"{% load djedi_tags %}\" + source.strip()\n return engines[\"django\"].from_string(source).render(context).strip()\n\n def test_node_tag(self):\n html = self.render(\"{% node 'page/title' edit=False %}\")\n assert html == \"\"\n\n cio.set(\"i18n://sv-se@page/title.txt\", \"Djedi\")\n cio.set(\"i18n://sv-se@page/body.txt\", \"Lightning fast!\")\n\n with self.assertCache(calls=1, misses=0):\n with self.assertDB(calls=0):\n html = self.render(\n \"<h1>{% node 'page/title' edit=False %}</h1><p>{% node 'page/body' edit=False %}</p>\"\n )\n assert html == \"<h1>Djedi</h1><p>Lightning fast!</p>\"\n\n cache.clear()\n\n with self.assertCache(calls=1, misses=2):\n with self.assertDB(calls=1):\n html = self.render(\n \"<h1>{% node 'page/title' edit=False %}</h1><p>{% node 'page/body' %}</p>\"\n )\n assert (\n html\n == '<h1>Djedi</h1><p><span data-i18n=\"sv-se@page/body\">Lightning fast!</span></p>'\n )\n\n html = self.render(\"{% node 'foo/bar' default='bogus' %}\")\n assert html == '<span data-i18n=\"sv-se@foo/bar\">bogus</span>'\n html = self.render(\"{% node 'l10n://foo/bar' default='bogus' %}\")\n self.assertEqual(html, '<span data-i18n=\"djedi@foo/bar\">bogus</span>')\n\n def test_node_tag_with_default_scheme(self):\n cio.set(\"i18n://sv-se@page/title.txt\", \"Swedish Djedi\")\n html = self.render(\"{% node 'page/title' edit=False %}\")\n assert html == \"Swedish Djedi\"\n\n with self.settings(DJEDI={\"URI_DEFAULT_SCHEME\": \"l10n\"}):\n html = self.render(\"{% node 'page/title' edit=False %}\")\n assert html == \"\"\n\n cio.set(\"l10n://djedi@page/title.txt\", \"Local Djedi\")\n html = self.render(\"{% node 'page/title' edit=False %}\")\n assert html == \"Local Djedi\"\n\n def test_blocknode_tag(self):\n with self.assertRaises(TemplateSyntaxError):\n self.render(\"{% blocknode 'page/body' arg %}{% endblocknode %}\")\n\n html = self.render(\n \"\"\"\n {% blocknode 'page/body.md' edit=False %}\n # Djedi\n Lightning *fast*!\n {% endblocknode %}\n \"\"\"\n )\n self.assertRenderedMarkdown(html, \"# Djedi\\nLightning *fast*!\")\n\n cio.set(\"i18n://sv-se@page/body.txt\", \"Lightning fast!\")\n html = self.render(\n \"\"\"\n {% blocknode \"page/body\" %}\n Lorem ipsum\n {% endblocknode %}\n \"\"\"\n )\n assert html == '<span data-i18n=\"sv-se@page/body\">Lightning fast!</span>'\n\n cio.set(\"i18n://sv-se@page/body.txt\", \"\")\n html = self.render(\n \"{% blocknode 'page/body' edit=False %}Lorem ipsum{% endblocknode %}\"\n )\n assert html == \"\"\n\n def test_blocknode_with_context(self):\n cio.set(\"i18n://sv-se@page/title.txt\", \"Hej {name}!\")\n\n source = \"\"\"\n {% blocknode 'page/title' edit=False name=user.get_full_name %}\n Hello {name}!\n {% endblocknode %}\n \"\"\"\n\n context = {\"user\": User(first_name=\"Jonas\", last_name=\"Lundberg\")}\n html = self.render(source, context)\n assert html == \"Hej Jonas Lundberg!\"\n\n with cio.env(i18n=\"en-us\"):\n html = self.render(source, context)\n assert html == \"Hello Jonas Lundberg!\"\n\n html = self.render(\n \"\"\"\n {% blocknode 'page/title' edit=False %}\n Hello {name}!\n {% endblocknode %}\n \"\"\"\n )\n assert html == \"Hej {name}!\"\n\n def test_collected_nodes(self):\n source = \"\"\"\n {% node 'page/title' edit=False %}\n {% node 'page/title' default='fallback' edit=False %}\n {% node 'page/body' edit=False %}\n \"\"\"\n pipeline.history.clear()\n self.render(source)\n assert len(pipeline.history) == 2\n\n def test_invalid_lazy_tag(self):\n with self.assertRaises(TemplateSyntaxError):\n register.lazy_tag(\"\")\n\n def test_lazy_tag(self):\n @register.lazy_tag\n def foo():\n return lambda _: \"bar\"\n\n html = self.render(\"{% foo %}\")\n assert html == \"bar\"\n\n @register.lazy_tag()\n def bar():\n return lambda _: \"foo\"\n\n html = self.render(\"{% bar %}\")\n assert html == \"foo\"\n\n def test_djedi_admin_tag(self):\n source = \"\"\"\n {% load djedi_admin %}\n {% djedi_admin %}\n \"\"\"\n\n user = User(first_name=\"Jonas\", last_name=\"Lundberg\")\n\n class RequestMock:\n def __init__(self, user):\n self.user = user\n\n context = {\"request\": RequestMock(user=user)}\n html = self.render(source, context)\n assert html == \"\"\n\n user.is_superuser = True\n html = self.render(source, context)\n assert \"<script>window.DJEDI_NODES = {};</script>\" in html\n","sub_path":"djedi/tests/test_templatetags.py","file_name":"test_templatetags.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"329388455","text":"import os\nfrom PIL import Image\nimport logging\nfrom lindh.jsonobject import register_schema, PropertySet, Property\n\nfrom ..system import current_system\nfrom ..job import JobHandler, register_job_handler\nfrom ..entry import get_entry_by_id, update_entry_by_id, Purpose, Variant\nfrom ..localfile import FileCopy, FolderScanner\n\n\nclass AmendOptions(PropertySet):\n entry_id = Property(int)\n amend_metadata = Property(bool, default=True)\n amend_variants = Property(bool, default=True)\n\n\nregister_schema(AmendOptions)\n\n\nclass AmendJobHandler(JobHandler):\n method = 'amend'\n Options = AmendOptions\n\n def run(self, job):\n logging.info('Starting amending.')\n assert job is not None, \"Job can't be None\"\n assert job.options is not None, \"Job Options can't be None\"\n logging.info('Job\\n%s', job.to_json())\n\n options = job.options\n entry = get_entry_by_id(options.entry_id)\n before = entry.to_json()\n logging.info('Original entry is\\n%s', before)\n\n if options.amend_metadata:\n if entry.metadata.Copyright == '[]':\n entry.metadata.Copyright = None\n\n if options.amend_variants:\n latest_source = None\n for variant in entry.variants:\n full_path = os.path.join(\n current_system().media_root,\n variant.get_filename(entry.id),\n )\n\n if variant.mime_type == 'image/jpeg':\n img = Image.open(full_path)\n variant.width, variant.height = img.size\n img.close()\n\n if variant.purpose in (Purpose.original, Purpose.derivative):\n latest_source = variant\n\n if variant.purpose in (Purpose.original, Purpose.raw):\n variant.source_purpose = None\n variant.source_version = None\n\n elif variant.purpose in (Purpose.proxy, Purpose.check, Purpose.thumb):\n variant.source_purpose = latest_source.purpose\n variant.source_version = latest_source.version\n\n after = entry.to_json()\n logging.info('Amended entry is\\n%s', after)\n\n if after != before:\n update_entry_by_id(entry.id, entry)\n\n logging.info('Done amending.')\n\n\nregister_job_handler(AmendJobHandler)\n","sub_path":"images6/job/amend.py","file_name":"amend.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"555115199","text":"\"\"\" setup python path \"\"\"\n# import os\nimport sys\n# import json\n\n\n# def add_environ(cfg):\n# with open(cfg) as f:\n# environ = json.loads(f.read())\n# for k in environ:\n# os.environ[k] = environ[k]\n# print(\"Set os.environ: `%s`\" % k)\n\n\ndef add_path(path):\n if path not in sys.path:\n sys.path.insert(0, path)\n\n\n# add_environ('../ENVIRON')\n\nadd_path('../../')\nprint(\"add code root path (with `rllib`).\")\n","sub_path":"workspace/Pong/_init_paths.py","file_name":"_init_paths.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"21020230","text":"import string\ndef count_words(text,words):\n \"\"\"\n Count how many words are included in the given text\n \"\"\"\n count = 0\n temp = text.lower()\n list = words.split(',')\n print(list)\n for i in range(len(list)):\n if list[i] in temp:\n count = count + 1\n return count\n\ntext = raw_input(\"Enter a text: \")\nwords = raw_input(\"Enter a set of words seperated by comma: \")\nresult = count_words(text,words)\nprint(result)","sub_path":"Monkey Typing.py","file_name":"Monkey Typing.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"274453548","text":"from __future__ import annotations\n\nimport warnings\nfrom builtins import id as identifier\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Literal, Protocol, TypeVar, overload\n\nfrom toga.command import CommandSet\nfrom toga.handlers import AsyncResult, wrapped_handler\nfrom toga.platform import get_platform_factory\nfrom toga.widgets.base import WidgetRegistry\n\nif TYPE_CHECKING:\n from toga.app import App\n from toga.widgets.base import Widget\n\n\nclass OnCloseHandler(Protocol):\n def __call__(self, window: Window, **kwargs: Any) -> bool:\n \"\"\"A handler to invoke when a window is about to close.\n\n The return value of this callback controls whether the window is allowed to close.\n This can be used to prevent a window closing with unsaved changes, etc.\n\n .. note::\n ``**kwargs`` ensures compatibility with additional arguments\n introduced in future versions.\n\n :param window: The window instance that is closing.\n :returns: ``True`` if the window is allowed to close; ``False`` if the window is not\n allowed to close.\n \"\"\"\n ...\n\n\nT = TypeVar(\"T\")\n\n\nclass DialogResultHandler(Protocol[T]):\n def __call__(self, window: Window, result: T, **kwargs: Any) -> None:\n \"\"\"A handler to invoke when a dialog is closed.\n\n .. note::\n ``**kwargs`` ensures compatibility with additional arguments\n introduced in future versions.\n\n :param window: The window that opened the dialog.\n :param result: The result returned by the dialog.\n \"\"\"\n ...\n\n\nclass Dialog(AsyncResult):\n RESULT_TYPE = \"dialog\"\n\n def __init__(self, window: Window):\n super().__init__()\n self.window = window\n self.app = window.app\n\n\nclass Window:\n \"\"\"The top level container of an application.\n\n Args:\n id: The ID of the window.\n title: Title for the window.\n position: Position of the window, as x,y coordinates.\n size: Size of the window, as (width, height) sizes, in pixels.\n toolbar: (Deprecated, will have no effect)\n resizeable: Toggle if the window is resizable by the user.\n closeable: Toggle if the window is closable by the user.\n minimizable: Toggle if the window is minimizable by the user.\n on_close: A callback to invoke when the user makes a request to close the window.\n \"\"\"\n\n _WINDOW_CLASS = \"Window\"\n\n def __init__(\n self,\n id: str | None = None,\n title: str | None = None,\n position: tuple[int, int] = (100, 100),\n size: tuple[int, int] = (640, 480),\n toolbar: list[Widget | None] = None,\n resizeable: bool = True,\n closeable: bool = True,\n minimizable: bool = True,\n factory: None = None, # DEPRECATED !\n on_close: OnCloseHandler | None = None,\n ) -> None:\n ######################################################################\n # 2022-09: Backwards compatibility\n ######################################################################\n # factory no longer used\n if factory:\n warnings.warn(\"The factory argument is no longer used.\", DeprecationWarning)\n ######################################################################\n # End backwards compatibility.\n ######################################################################\n\n self.widgets = WidgetRegistry()\n\n self._id = str(id if id else identifier(self))\n self._impl = None\n self._app = None\n self._content = None\n self._is_full_screen = False\n\n self.resizeable = resizeable\n self.closeable = closeable\n self.minimizable = minimizable\n\n self.factory = get_platform_factory()\n self._impl = getattr(self.factory, self._WINDOW_CLASS)(\n interface=self,\n title=\"Toga\" if title is None else title,\n position=position,\n size=size,\n )\n\n self._toolbar = CommandSet(widget=self, on_change=self._impl.create_toolbar)\n\n self.on_close = on_close\n\n @property\n def id(self) -> str:\n \"\"\"The DOM identifier for the window.\n\n This id can be used to target CSS directives.\n \"\"\"\n return self._id\n\n @property\n def app(self) -> App | None:\n \"\"\"Instance of the :class:`toga.App` that this window belongs to.\n\n Returns:\n The app that it belongs to :class:`toga.App`.\n\n Raises:\n Exception: If the window already is associated with another app.\n \"\"\"\n return self._app\n\n @app.setter\n def app(self, app: App) -> None:\n if self._app:\n raise Exception(\"Window is already associated with an App\")\n\n self._app = app\n self._impl.set_app(app._impl)\n\n if self.content:\n self.content.app = app\n\n @property\n def title(self) -> str:\n \"\"\"Title of the window. If no title is given it defaults to ``\"Toga\"``.\"\"\"\n return self._impl.get_title()\n\n @title.setter\n def title(self, title: str) -> None:\n if not title:\n title = \"Toga\"\n\n self._impl.set_title(title)\n\n @property\n def toolbar(self) -> CommandSet:\n \"\"\"Toolbar for the window.\"\"\"\n return self._toolbar\n\n @property\n def content(self) -> Widget | None:\n \"\"\"Content of the window. On setting, the content is added to the same app as\n the window and to the same app.\"\"\"\n return self._content\n\n @content.setter\n def content(self, widget: Widget) -> None:\n # Set window of old content to None\n if self._content:\n self._content.window = None\n\n # Assign the content widget to the same app as the window.\n widget.app = self.app\n\n # Assign the content widget to the window.\n widget.window = self\n\n # Track our new content\n self._content = widget\n\n # Manifest the widget\n self._impl.set_content(widget._impl)\n\n # Update the geometry of the widget\n widget.refresh()\n\n @property\n def size(self) -> tuple[int, int]:\n \"\"\"Size of the window, as a ``(width, height)`` tuple.\"\"\"\n return self._impl.get_size()\n\n @size.setter\n def size(self, size: tuple[int, int]) -> None:\n self._impl.set_size(size)\n if self.content:\n self.content.refresh()\n\n @property\n def position(self) -> tuple[int, int]:\n \"\"\"Position of the window, as an ``(x, y)`` tuple.\"\"\"\n return self._impl.get_position()\n\n @position.setter\n def position(self, position: tuple[int, int]) -> None:\n self._impl.set_position(position)\n\n def show(self) -> None:\n \"\"\"Show window, if hidden.\"\"\"\n if self.app is None:\n raise AttributeError(\n \"Can't show a window that doesn't have an associated app\"\n )\n self._impl.show()\n\n def hide(self) -> None:\n \"\"\"Hide window, if shown.\"\"\"\n if self.app is None:\n raise AttributeError(\n \"Can't hide a window that doesn't have an associated app\"\n )\n self._impl.hide()\n\n @property\n def full_screen(self) -> bool:\n return self._is_full_screen\n\n @full_screen.setter\n def full_screen(self, is_full_screen: bool) -> None:\n self._is_full_screen = is_full_screen\n self._impl.set_full_screen(is_full_screen)\n\n @property\n def visible(self) -> bool:\n return self._impl.get_visible()\n\n @visible.setter\n def visible(self, visible: bool) -> None:\n if visible:\n self.show()\n else:\n self.hide()\n\n @property\n def on_close(self) -> OnCloseHandler:\n \"\"\"The handler to invoke before the window is closed.\"\"\"\n return self._on_close\n\n @on_close.setter\n def on_close(self, handler: OnCloseHandler | None) -> None:\n def cleanup(window: Window, should_close: bool) -> None:\n if should_close:\n window.close()\n\n self._on_close = wrapped_handler(self, handler, cleanup=cleanup)\n\n def close(self) -> None:\n self.app.windows -= self\n self._impl.close()\n\n ############################################################\n # Dialogs\n ############################################################\n\n def info_dialog(\n self,\n title: str,\n message: str,\n on_result: DialogResultHandler[None] | None = None,\n ) -> Dialog:\n \"\"\"Ask the user to acknowledge some information.\n\n Presents as a dialog with a single 'OK' button to close the dialog.\n\n :param title: The title of the dialog window.\n :param message: The message to display.\n :param on_result: A callback that will be invoked when the user\n selects an option on the dialog.\n :returns: An awaitable Dialog object. The Dialog object returns\n ``None`` after the user pressed the 'OK' button.\n \"\"\"\n dialog = Dialog(self)\n self.factory.dialogs.InfoDialog(\n dialog, title, message, on_result=wrapped_handler(self, on_result)\n )\n return dialog\n\n def question_dialog(\n self,\n title: str,\n message: str,\n on_result: DialogResultHandler[bool] | None = None,\n ) -> Dialog:\n \"\"\"Ask the user a yes/no question.\n\n Presents as a dialog with a 'YES' and 'NO' button.\n\n :param title: The title of the dialog window.\n :param message: The question to be answered.\n :param on_result: A callback that will be invoked when the user\n selects an option on the dialog.\n :returns: An awaitable Dialog object. The Dialog object returns\n ``True`` when the 'YES' button was pressed, ``False`` when\n the 'NO' button was pressed.\n \"\"\"\n dialog = Dialog(self)\n self.factory.dialogs.QuestionDialog(\n dialog, title, message, on_result=wrapped_handler(self, on_result)\n )\n return dialog\n\n def confirm_dialog(\n self,\n title: str,\n message: str,\n on_result: DialogResultHandler[bool] | None = None,\n ) -> Dialog:\n \"\"\"Ask the user to confirm if they wish to proceed with an action.\n\n Presents as a dialog with 'Cancel' and 'OK' buttons (or whatever labels\n are appropriate on the current platform)\n\n :param title: The title of the dialog window.\n :param message: A message describing the action to be confirmed.\n :param on_result: A callback that will be invoked when the user\n selects an option on the dialog.\n :returns: An awaitable Dialog object. The Dialog object returns\n ``True`` when the 'OK' button was pressed, ``False`` when\n the 'CANCEL' button was pressed.\n \"\"\"\n dialog = Dialog(self)\n self.factory.dialogs.ConfirmDialog(\n dialog, title, message, on_result=wrapped_handler(self, on_result)\n )\n return dialog\n\n def error_dialog(\n self,\n title: str,\n message: str,\n on_result: DialogResultHandler[None] | None = None,\n ) -> Dialog:\n \"\"\"Ask the user to acknowledge an error state.\n\n Presents as an error dialog with a 'OK' button to close the dialog.\n\n :param title: The title of the dialog window.\n :param message: The error message to display.\n :param on_result: A callback that will be invoked when the user\n selects an option on the dialog.\n :returns: An awaitable Dialog object. The Dialog object returns\n ``None`` after the user pressed the 'OK' button.\n \"\"\"\n dialog = Dialog(self)\n self.factory.dialogs.ErrorDialog(\n dialog, title, message, on_result=wrapped_handler(self, on_result)\n )\n return dialog\n\n @overload\n def stack_trace_dialog(\n self,\n title: str,\n message: str,\n content: str,\n retry: Literal[False] = False,\n on_result: DialogResultHandler[None] | None = None,\n ) -> Dialog:\n ...\n\n @overload\n def stack_trace_dialog(\n self,\n title: str,\n message: str,\n content: str,\n retry: Literal[True] = False,\n on_result: DialogResultHandler[bool] | None = None,\n ) -> Dialog:\n ...\n\n @overload\n def stack_trace_dialog(\n self,\n title: str,\n message: str,\n content: str,\n retry: bool = False,\n on_result: DialogResultHandler[bool | None] | None = None,\n ) -> Dialog:\n ...\n\n def stack_trace_dialog(\n self,\n title: str,\n message: str,\n content: str,\n retry: bool = False,\n on_result: DialogResultHandler[bool | None] | None = None,\n ) -> Dialog:\n \"\"\"Open a dialog that allows to display a large text body, such as a stack\n trace.\n\n :param title: The title of the dialog window.\n :param message: Contextual information about the source of the stack trace.\n :param content: The stack trace, pre-formatted as a multi-line string.\n :param retry: A Boolean; if True, the user will be given a \"Retry\" and\n \"Quit\" option; if False, a single option to acknowledge the error will\n be displayed.\n :param on_result: A callback that will be invoked when the user\n selects an option on the dialog.\n :returns: An awaitable Dialog object. If retry is enabled, the Dialog object\n returns ``True`` if the user selected retry, and ``False`` otherwise;\n if retry is not enabled, the dialog object returns ``None``.\n \"\"\"\n dialog = Dialog(self)\n self.factory.dialogs.StackTraceDialog(\n dialog,\n title,\n message,\n content=content,\n retry=retry,\n on_result=wrapped_handler(self, on_result),\n )\n return dialog\n\n def save_file_dialog(\n self,\n title: str,\n suggested_filename: Path | str,\n file_types: list[str] | None = None,\n on_result: DialogResultHandler[Path | None] | None = None,\n ) -> Dialog:\n \"\"\"Prompt the user for a location to save a file.\n\n Presents the user a system-native \"Save file\" dialog.\n\n This opens a native dialog where the user can select a place to save a file.\n It is possible to suggest a filename and force the user to use a specific file extension.\n If no path is returned (e.g. dialog is canceled), a ValueError is raised.\n\n :param title: The title of the dialog window\n :param suggested_filename: A default filename\n :param file_types: A list of strings with the allowed file extensions.\n :param on_result: A callback that will be invoked when the user\n selects an option on the dialog.\n :returns: An awaitable Dialog object. The Dialog object returns\n a path object for the selected file location, or ``None`` if\n the user cancelled the save operation.\n \"\"\"\n dialog = Dialog(self)\n # Convert suggested filename to a path (if it isn't already),\n # and break it into a filename and a directory\n suggested_path = Path(suggested_filename)\n initial_directory = suggested_path.parent\n if initial_directory == Path(\".\"):\n initial_directory = None\n filename = suggested_path.name\n\n self.factory.dialogs.SaveFileDialog(\n dialog,\n title,\n filename=filename,\n initial_directory=initial_directory,\n file_types=file_types,\n on_result=wrapped_handler(self, on_result),\n )\n return dialog\n\n @overload\n def open_file_dialog(\n self,\n title: str,\n initial_directory: Path | str | None = None,\n file_types: list[str] | None = None,\n multiselect: Literal[False] = False,\n on_result: DialogResultHandler[Path | None] | None = None,\n ) -> Dialog:\n ...\n\n @overload\n def open_file_dialog(\n self,\n title: str,\n initial_directory: Path | str | None = None,\n file_types: list[str] | None = None,\n multiselect: Literal[True] = True,\n on_result: DialogResultHandler[list[Path] | None] | None = None,\n ) -> Dialog:\n ...\n\n @overload\n def open_file_dialog(\n self,\n title: str,\n initial_directory: Path | str | None = None,\n file_types: list[str] | None = None,\n multiselect: bool = False,\n on_result: DialogResultHandler[list[Path] | Path | None] | None = None,\n ) -> Dialog:\n ...\n\n def open_file_dialog(\n self,\n title: str,\n initial_directory: Path | str | None = None,\n file_types: list[str] | None = None,\n multiselect: bool = False,\n on_result: DialogResultHandler[list[Path] | Path | None] | None = None,\n ) -> Dialog:\n \"\"\"Ask the user to select a file (or files) to open.\n\n Presents the user a system-native \"Open file\" dialog.\n\n :param title: The title of the dialog window\n :param initial_directory: The initial folder in which to open the dialog.\n If ``None``, use the default location provided by the operating system\n (which will often be \"last used location\")\n :param file_types: A list of strings with the allowed file extensions.\n :param multiselect: If True, the user will be able to select multiple\n files; if False, the selection will be restricted to a single file/\n :param on_result: A callback that will be invoked when the user\n selects an option on the dialog.\n :returns: An awaitable Dialog object. The Dialog object returns\n a list of ``Path`` objects if ``multiselect`` is ``True``, or a single\n ``Path`` otherwise. Returns ``None`` if the open operation is\n cancelled by the user.\n \"\"\"\n dialog = Dialog(self)\n self.factory.dialogs.OpenFileDialog(\n dialog,\n title,\n initial_directory=Path(initial_directory) if initial_directory else None,\n file_types=file_types,\n multiselect=multiselect,\n on_result=wrapped_handler(self, on_result),\n )\n return dialog\n\n @overload\n def select_folder_dialog(\n self,\n title: str,\n initial_directory: Path | str | None = None,\n multiselect: Literal[False] = False,\n on_result: DialogResultHandler[Path | None] | None = None,\n ) -> Dialog:\n ...\n\n @overload\n def select_folder_dialog(\n self,\n title: str,\n initial_directory: Path | str | None = None,\n multiselect: Literal[True] = True,\n on_result: DialogResultHandler[list[Path] | None] | None = None,\n ) -> Dialog:\n ...\n\n @overload\n def select_folder_dialog(\n self,\n title: str,\n initial_directory: Path | str | None = None,\n multiselect: bool = False,\n on_result: DialogResultHandler[list[Path] | Path | None] | None = None,\n ) -> Dialog:\n ...\n\n def select_folder_dialog(\n self,\n title: str,\n initial_directory: Path | str | None = None,\n multiselect: bool = False,\n on_result: DialogResultHandler[list[Path] | Path | None] | None = None,\n ) -> Dialog:\n \"\"\"Ask the user to select a directory/folder (or folders) to open.\n\n Presents the user a system-native \"Open folder\" dialog.\n\n :param title: The title of the dialog window\n :param initial_directory: The initial folder in which to open the dialog.\n If ``None``, use the default location provided by the operating system\n (which will often be \"last used location\")\n :param multiselect: If True, the user will be able to select multiple\n files; if False, the selection will be restricted to a single file/\n :param on_result: A callback that will be invoked when the user\n selects an option on the dialog.\n :returns: An awaitable Dialog object. The Dialog object returns\n a list of ``Path`` objects if ``multiselect`` is ``True``, or a single\n ``Path`` otherwise. Returns ``None`` if the open operation is\n cancelled by the user.\n \"\"\"\n dialog = Dialog(self)\n self.factory.dialogs.SelectFolderDialog(\n dialog,\n title,\n initial_directory=Path(initial_directory) if initial_directory else None,\n multiselect=multiselect,\n on_result=wrapped_handler(self, on_result),\n )\n return dialog\n","sub_path":"core/src/toga/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":20832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"383723281","text":"\"\"\"\nexercise_03\n2/8/2018\n\nFor this Exercise you will write one definition that will take in the name of a\ndirectory as a string, and return a dictionary containing every sequence in every FASTA file where\nthe sequence header is the key and the DNA sequences are values.\n\nYour definition will be tested with improperly formatted FASTA files and should handle the following cases:\n 1) If there are extra new line characters, or empty lines, your program should still process sequences normally\n 2) If a duplicate header exists between two entries your definition should check to see if the sequences are the same\n * If the headers and sequences are identical, your program should print a message that \"a duplicate entry exists\n for <header>\" and continue normally.\n * If the only the headers match, you should print a message that \"duplicate headers with non-identical\n sequences were found for <header>\" and neither entry should be added in the dictionary.\n (your print statements don't need to be identical to what I have written here)\n 3) If a file in the directory is not a fasta file, your program should not open it.\n 4) If a sequence contains characters that are not A, C, G, or T, then it should not be added to the dictionary.\n\nIf your program is working correctly, the dictionary should only contain the 4 \"good sequence\"s in the test folder.\n\n\nThe following syntax may be helpful:\n\n# deleting from a dictionary\ndel my_dictionary[key]\n\n# printing and formatting a string\nx = 'my_variable'\nprint('Error related to variable: {}'.format(x))\n\n# checking your final dictionary by printing out key, value pairs\nfor key, value in my_dictionary.items():\n print('Key is: {}\\tValue is: {}'.format(key, value))\n\n\"\"\"\n\nimport os\n\n# /Users/don/PycharmProjects/CS696/Exercises/test_files\n\ndef fasta_folder_to_dict(folder_path):\n \"\"\"\n Constructs a dictionary of all of the FASTA formatted entries from a folder containing FASTA files.\n :param folder_path: string\n :return: dictionary\n \"\"\"\n results ={}\n fileName = []\n for file in os.listdir(folder_path):\n if not file.endswith(('.fasta')):\n continue\n else:\n fileName.append(os.path.join(folder_path, file))\n\n for i in fileName:\n with open(i, 'r') as x:\n line = x.read().split('>')\n for j in line:\n newLine = j.split('\\n', 1)\n #skip empty lines\n if \"\" in newLine:\n continue\n key = newLine[0]\n val = newLine[1].replace(\"\\n\", \"\")\n\n # prevents duplicate key and vals\n if key in results and val in results.values():\n print(\"a duplicate entry exists for key {}\".format(key))\n continue\n # prevents duplicate keys\n if key in results:\n print(\"a duplicate header exists for key {}\".format(key))\n continue\n # prevents empty values\n if not val:\n continue\n\n # prevents accidental non DNA seqs\n if all(letter in \"CGAT\" for letter in val):\n results[key] = val\n\n return results\n\n\n","sub_path":"Exercises/exercise_03.py","file_name":"exercise_03.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"121943079","text":"import os\nimport easygui as es\nimport string\ndef genLib(i):\n value='R0'+str(i) if i<10 else 'R'+str(i)\n return value\n#def ui:\ntitle = \"DeSeq & RPKM Comparrison\"\nlibraries = [genLib(i) for i in range(1,166)] \nmsg =\"Hello user, below listed are the libraries made available for you\"\n\nchoice = es.multchoicebox(msg,title,libraries)\nfieldValues = es.multenterbox(msg,title,libraries) \n","sub_path":"DeSeqComplete.py","file_name":"DeSeqComplete.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"320196966","text":"import torch \nfrom torch import nn\nimport matplotlib.pyplot as plt\n\nimport sys\nsys.path.append('D:/Uni/Kogni/Bachelorarbeit/Code/BA_BAPTAT')\nfrom CoreLSTM.core_lstm import CORE_NET\nfrom CoreLSTM.test_core_lstm import LSTM_Tester\nfrom Data_Compiler.data_preparation import Preprocessor\nfrom torch.utils.data import TensorDataset, DataLoader\n\n\nclass LSTM_Trainer():\n ## General parameters \n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n def __init__(self, loss_function, learning_rate, momentum, l2_penality, batch_size):\n self._model = CORE_NET()\n self.batch_size = batch_size\n self._loss_function = loss_function\n self._optimizer = torch.optim.SGD(self._model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=l2_penality)\n # self._optimizer = torch.optim.SGD(self._model.parameters(), lr=learning_rate)\n # self._optimizer = torch.optim.Adam(self._model.parameters(), lr=learning_rate)\n\n print('Initialized model!')\n print(self._model)\n print(self._loss_function)\n print(self._optimizer)\n\n\n def train(self, epochs, train_sequence, save_path):\n losses = []\n num_batches = len(train_sequence)\n # print(num_batches)\n for ep in range(epochs):\n self._model.zero_grad()\n self._optimizer.zero_grad()\n ep_loss = 0\n\n #########################################################################################\n # Teacher Forcing\n inputs = []\n targets = []\n for seq, labels in train_sequence:\n inputs.append(seq)\n # print(seq[1:,:].shape)\n # print(labels.shape)\n target = torch.cat((seq[1:,:], labels), dim=0)\n targets.append(target)\n \n batch_size = seq.size()[0]\n ins = []\n tars = []\n for i in range(len(train_sequence)):\n if i%batch_size==0:\n ins.append(inputs[i])\n tars.append(targets[i])\n ins = torch.stack(ins)\n tars = torch.stack(tars)\n num_batches = ins.size()[0]\n num_input = ins.size()[2]\n # print(num_batches)\n state = self._model.init_hidden(num_batches)\n # print(ins.shape)\n # print(tars.shape)\n \n outs = []\n for i in range(batch_size):\n input = ins[:,i,:].view(num_batches, num_input)\n # print(input.shape)\n out, state = self._model.forward(input, state)\n outs.append(out)\n\n outs = torch.stack(outs)\n single_loss = self._loss_function(outs, tars.permute(1,0,2))\n single_loss.backward()\n self._optimizer.step()\n # print(single_loss.item())\n\n ep_loss = single_loss \n\n #########################################################################################\n # Update State nach jedem Batch mit End-Prediction, inkl. Teacher Forcing \n # DISTINCT! but same batches every epoch! \n # inputs = []\n # targets = []\n # i = 0\n # batch_size = train_sequence[0][0].size()[0]\n # for seq, labels in train_sequence:\n # if i%batch_size==0:\n # inputs.append(seq)\n # targets.append(labels)\n \n # i += 1\n # inputs = torch.stack(inputs)\n # num_batches = inputs.size()[0]\n # num_input = inputs.size()[2]\n # targets = torch.stack(targets).view(num_batches, num_input)\n\n # state = self._model.init_hidden(num_batches)\n\n # outs = []\n # for i in range(batch_size):\n # input = inputs[:,i,:].view(num_batches, num_input)\n # out, state = self._model.forward(input, state)\n \n # single_loss = self._loss_function(out, targets)\n # single_loss.backward()\n # self._optimizer.step()\n\n # ep_loss = single_loss / num_batches\n # print(ep_loss)\n # # print(foo)\n\n #########################################################################################\n # Update State nach jedem Batch mit End-Prediction, inkl. Teacher Forcing \n # DISTINCT! but same batches every epoch! \n # if ep == 0: \n\n # inputs = []\n # targets = []\n # batch_size = train_sequence[0][0].size()[0]\n # for seq, labels in train_sequence:\n # inputs.append(seq)\n # targets.append(labels)\n \n # inputs = torch.stack(inputs)\n # num_batches = inputs.size()[0]\n # batch_length = inputs.size()[1]\n # num_input = inputs.size()[2]\n # targets = torch.stack(targets)\n\n # print(inputs.shape)\n # print(targets.shape)\n # train_loader = DataLoader(\n # dataset=TensorDataset(inputs, targets),\n # batch_size=5,\n # pin_memory=True,\n # shuffle=True\n # )\n\n # batches_per_epoch = len(train_loader)\n # print(batches_per_epoch)\n \n # print(batch_length)\n\n # for (ins, tars) in train_loader: \n # real_num_batches = ins.size()[0]\n\n # self._optimizer.zero_grad()\n\n # state = self._model.init_hidden(real_num_batches)\n # outs = []\n # for i in range(batch_length):\n # input = ins[:,i,:].view(real_num_batches, num_input)\n # out, state = self._model.forward(input, state)\n # # outs.append(out)\n \n # # outs = torch.stack(outs)\n # # print(out.shape)\n # # print(tars.view(real_num_batches, num_input).shape)\n # single_loss = self._loss_function(out, tars.view(real_num_batches, num_input))\n # single_loss.backward()\n # self._optimizer.step()\n\n # ep_loss += single_loss\n # print(ep_loss)\n # print(foo)\n\n\n #########################################################################################\n # Update State nach jedem Batch mit End-Prediction, inkl. Teacher Forcing \n # NOT DISTINCT! \n # for seq, labels in train_sequence:\n # batch_size = seq.size()[0]\n # self._optimizer.zero_grad()\n # state = self._model.init_hidden(1)\n # for s in seq:\n # s = s.view(1,45)\n # y_pred, state = self._model(s, state)\n \n # single_loss = self._loss_function(y_pred, labels)\n # single_loss.backward()\n # self._optimizer.step()\n\n # ep_loss += single_loss \n\n #########################################################################################\n # Update State nach jedem Batch, kein Teacher Forcing\n # for seq, labels in train_sequence:\n # batch_size = seq.size()[0]\n # self._optimizer.zero_grad()\n # state = self._model.init_hidden(batch_size)\n # y_pred, state = self._model(seq, state)\n\n # # print(foo)\n # single_loss = self._loss_function(y_pred[-1], labels[0])\n # single_loss.backward()\n # self._optimizer.step()\n\n # ep_loss += single_loss \n \n # ep_loss /= num_batches\n\n # ep_loss /= batches_per_epoch\n\n # save loss of epoch\n losses.append(ep_loss.item())\n if ep%25 == 1:\n print(f'epoch: {ep:3} loss: {single_loss.item():10.8f}')\n \n print(f'epoch: {ep:3} loss: {single_loss.item():10.10f}')\n\n self.save_model(save_path)\n self.plot_losses(losses)\n\n return losses\n \n\n def plot_losses(self, losses):\n fig = plt.figure()\n axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) \n axes.plot(losses, 'r')\n axes.grid(True)\n axes.set_xlabel('epochs')\n axes.set_ylabel('loss')\n axes.set_title('History of MSELoss during training')\n plt.show()\n\n\n def save_model(self, path):\n torch.save(self._model.state_dict(), path)\n print('Model was saved in: ' + path)\n\n\n\n \n\ndef main():\n # LSTM parameters\n frame_samples = 1000\n train_window = 10\n testing_size = 100\n num_features = 15\n num_dimensions = 3\n\n # Training parameters\n epochs = 10000\n mse=nn.MSELoss()\n # loss_function=nn.MSELoss()\n # loss_function= lambda x, y: mse(x, y) * (num_features * num_dimensions)\n loss_function=nn.L1Loss()\n learning_rate=0.01\n momentum=0.0\n l2_penality=0.1\n\n # Init tools\n prepro = Preprocessor(num_features, num_features, num_dimensions)\n trainer = LSTM_Trainer(\n loss_function, \n learning_rate, \n momentum, \n l2_penality, \n train_window\n )\n tester = LSTM_Tester(loss_function)\n tester_1 = LSTM_Tester(mse)\n\n # Init tools\n data_asf_path = 'Data_Compiler/S35T07.asf'\n data_amc_path = 'Data_Compiler/S35T07.amc'\n model_save_path = 'CoreLSTM/models/LSTM_46_cell.pt'\n\n with torch.no_grad():\n # Preprocess data\n io_seq, dt_train, dt_test = prepro.get_LSTM_data(\n data_asf_path, \n data_amc_path, \n frame_samples, \n testing_size, \n train_window\n )\n\n # Train LSTM\n # losses = trainer.train(epochs, io_seq, model_save_path)\n\n test_input = dt_train[0,-train_window:]\n\n # Test LSTM\n tester.test(testing_size, model_save_path, test_input, dt_test, train_window)\n tester_1.test(testing_size, model_save_path, test_input, dt_test, train_window)\n \n\n\nif __name__ == \"__main__\":\n main()\n\n\n# class LSTMLayer_Trainer():\n# ## General parameters \n# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# def __init__(self, loss_function, learning_rate, momentum):\n# self._model = CORE_NET_Layer()\n# self._loss_function = loss_function\n# self._optimizer = torch.optim.SGD(self._model.parameters(), lr=learning_rate, momentum=momentum)\n# # self._optimizer = torch.optim.SGD(self._model.parameters(), lr=learning_rate)\n# # self._optimizer = torch.optim.Adam(self._model.parameters(), lr=learning_rate)\n\n# print('Initialized model!')\n# print(self._model)\n# print(self._loss_function)\n# print(self._optimizer)\n\n\n# def train(self, epochs, train_sequence, save_path):\n# losses = []\n\n# for i in range(epochs):\n# for seq, labels in train_sequence:\n# self._optimizer.zero_grad()\n\n# y_pred, state = self._model(seq)\n\n# single_loss = self._loss_function(y_pred, labels[0])\n# single_loss.backward()\n# self._optimizer.step()\n\n# # save loss of epoch\n# losses.append(single_loss.item())\n# if i%25 == 1:\n# print(f'epoch: {i:3} loss: {single_loss.item():10.8f}')\n \n# print(f'epoch: {i:3} loss: {single_loss.item():10.10f}')\n\n# self.save_model(save_path)\n# self.plot_losses(losses)\n\n# return losses\n \n\n# def plot_losses(self, losses):\n# fig = plt.figure()\n# axes = fig.add_axes([0.1, 0.1, 0.8, 0.8]) \n# axes.plot(losses, 'r')\n# axes.grid(True)\n# axes.set_xlabel('epochs')\n# axes.set_ylabel('loss')\n# axes.set_title('History of MSELoss during training')\n# plt.show()\n\n\n# def save_model(self, path):\n# torch.save(self._model.state_dict(), path)\n# print('Model was saved in: ' + path)","sub_path":"CoreLSTM/train_core_lstm.py","file_name":"train_core_lstm.py","file_ext":"py","file_size_in_byte":12213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"129762377","text":"# coding:utf-8\r\n# 功能: 扫描一个B段开放80和8080端口的网站并记录网页Title\r\n# 一个这样的任务需要在一个独立的目录下\r\nfrom task_manage.concurrent_frame_async import Concurrent, MyTimer\r\nimport asyncio\r\nimport aiohttp\r\nimport json\r\nfrom bs4 import BeautifulSoup\r\nimport random\r\nfrom asyncio.windows_events import ProactorEventLoop\r\nloop = ProactorEventLoop()\r\nasyncio.set_event_loop(loop)\r\n\r\nimport logging\r\nimport loghandler\r\nloghandler.setup_logging()\r\nLOGGER = logging.getLogger()\r\n\r\n# 获取html文本中的title\r\ndef get_title(html):\r\n s = BeautifulSoup(html, 'lxml')\r\n title = s.title.string\r\n return title\r\n\r\n\r\nclass MyTask(Concurrent):\r\n\r\n # 自定义任务执行方法\r\n @asyncio.coroutine\r\n def work(self, task):\r\n\r\n # 自定义的task对象的taskobj字段为url\r\n try:\r\n with aiohttp.Timeout(5):\r\n # with aiohttp.Timeout(2):\r\n with MyTimer() as timer:\r\n s = random.randint(1, 10)\r\n yield from asyncio.sleep(s)\r\n # print('[+] ok {}s'.format(timer.total))\r\n return {'res': '[+] ok {}s supposed: {}s'.format(timer.total, s)}\r\n\r\n except Exception as exception:\r\n print('[-] error:{}'.format(str(exception)))\r\n return None\r\n\r\n\r\n def handle_result(self):\r\n # 任务结束后,将结果以json形式保存\r\n fmt_result = {'data': []}\r\n\r\n # 获取结果队列中的结果对象\r\n while not self.results.empty():\r\n processResult = self.results.get_nowait()\r\n\r\n # 获取所有处理结果\r\n fmt_result['data'].append(processResult['result'])\r\n\r\n return fmt_result\r\n\r\n@asyncio.coroutine\r\ndef main():\r\n # 构造保存任务对象的任务列表\r\n tasks = [{'taskid': str(i) , 'taskobj': ''} for i in range(100000)]\r\n\r\n # 任务初始化\r\n mytask = MyTask(concurrent=1000, event_loop=loop, state_file='test3',save_mid_result=True, check_done=True)\r\n\r\n # 开始并发执行任务\r\n result = yield from mytask.start(tasks)\r\n\r\n # 保存任务完成返回的结果\r\n if result:\r\n try:\r\n with open('test3.txt', 'wb') as f:\r\n f.write(json.dumps(result, indent=2).encode('utf-8'))\r\n except Exception as exception:\r\n print(exception)\r\n print(result)\r\n\r\nif __name__ == '__main__':\r\n # loop的作用范围?\r\n loop.run_until_complete(main())\r\n","sub_path":"messy/python_basic/并发/协程/task_manage/async_frame_test2.py","file_name":"async_frame_test2.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"369814329","text":"# Last Update - 07/17 09:30\r\n#-*- coding:utf-8 -*-\r\n\r\nimport xml.etree.ElementTree as ElementTree\r\nimport os\r\nimport chardet\r\nimport sys\r\nfrom queue import Queue\r\nimport multiprocessing\r\nfrom multiprocessing.pool import ThreadPool\r\nimport threading\r\nimport time\r\nimport subprocess\r\nimport shlex\r\ndef printProgress (iteration, total, prefix = '', suffix = '', decimals = 1, barLength = 100):\r\n formatStr = \"{0:.\" + str(decimals) + \"f}\"\r\n percent = formatStr.format(100 * (iteration / float(total)))\r\n filledLength = int(round(barLength * iteration / float(total)))\r\n bar = '#' * filledLength + '-' * (barLength - filledLength)\r\n sys.stdout.write('\\r%s |%s| %s%s %s' % (prefix, bar, percent, '%', suffix)),\r\n sys.stdout.flush()\r\n\r\n\r\ndef explore_path():\r\n img_files=[]\r\n for path, dirs, files in os.walk(Default_Dir+'/temp'):\r\n if files:\r\n for filename in files:\r\n if(filename.split('.')[1] == 'jpg'\r\n or filename.split('.')[1]=='png'\r\n or filename.split('.')[1]=='jpeg'\r\n or filename.split('.')[1]=='bmp'\r\n ):\r\n fullname=os.path.join(path,filename)\r\n img_files.append(fullname)\r\n print(\"number of files to extract: %d\" %(len(img_files)))\r\n return img_files;\r\n \r\n# Main function\r\nDefault_Dir = os.getcwd()\r\nResult_Dir=Default_Dir+'/images/'\r\ntry:\r\n os.stat(Result_Dir)\r\nexcept:\r\n os.mkdir(Result_Dir)\r\n\r\nimg_list=explore_path();\r\nimg_list.sort();\r\ntotal=len(img_list);\r\ncount=0;\r\nprintProgress (count, total, 'Progress','Complete',1,50)\r\nfor img in img_list:\r\n cmd=\"mv \"+img+\" \"+Result_Dir\r\n cmd=cmd.replace('(','\\(')\r\n cmd=cmd.replace(')','\\)')\r\n cmd=cmd.replace(',','\\,')\r\n os.system(cmd)\r\n count+=1;\r\n printProgress (count, total, 'Progress','Complete',1,50)\r\n print(\"\\nExtraction Finished\")\r\n\r\n\r\n","sub_path":"parser/ST96_XML_Extractor.py","file_name":"ST96_XML_Extractor.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"614245985","text":"import websockets\nimport asyncio\nfrom callback import Callback\n\nclass WSListener(Callback):\n\n def __init__(self, ws_url, sleep_ms=30000, command_list=[]):\n super().__init__(\"WSS\")\n self.ws_url = ws_url\n self.command_list = command_list\n self.sleep_ms = int(sleep_ms)\n\n async def run(self):\n self.logger.warning(\"APP STARTED! GOING TO LISTEN %s\" % self.ws_url)\n while True:\n try:\n async with websockets.connect(self.ws_url) as socket:\n self.logger.warning(\"successfully connected to %s\" % self.ws_url)\n\n #send messages if any supplied\n for cmd in self.command_list:\n self.logger.warning(\"sending message: %s\" % cmd)\n await socket.send(cmd)\n\n while True:\n message = await socket.recv()\n #self.logger.info(\"got message %s\" % message)\n # tasks = [\n # cb.callback(message) for cb in self.callbackList\n # ]\n # await asyncio.wait(tasks)\n await self.sendCallback(message)\n except Exception as e:\n self.logger.exception(\"error while performing listen to websocket %s\" % self.ws_url)\n self.logger.exception(e)\n self.logger.exception(\"going for a sleep to %sms\" % self.sleep_ms)\n await asyncio.sleep(self.sleep_ms / 1000)\n","sub_path":"src/lib/ws_listener.py","file_name":"ws_listener.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"272010735","text":"# Pygame Template by Jayden\nimport pygame as pg\n\n\nclass Game:\n def __init__(self, surface):\n # Attributes that should be in every graphical game \n self.close_clicked = False\n self.continue_game = True\n self.game_clock = pg.time.Clock()\n self.frames_per_second = 60\n self.surface = surface\n self.bg_color = pg.Color('black')\n self.frame_counter = 0\n self.width, self.height = self.surface.get_size()\n self.font = pg.font.SysFont('Comic Sans MS', int((self.height + self.width) / 50))\n self.curr_time = self.curr_time = self.game_clock.tick(self.frames_per_second)\n self.show_debug_info = False\n \n def play(self):\n while not self.close_clicked:\n self.handle_events()\n self.draw()\n if self.continue_game:\n self.update()\n self.decide_continue()\n \n def handle_events(self):\n events = pg.event.get()\n keys = pg.key.get_pressed()\n\n if keys[pg.K_NUMLOCK] == 1:\n self.show_debug_info = True\n else:\n self.show_debug_info = False\n\n if keys[pg.K_SPACE] == 1:\n self.continue_game = False\n\n for event in events:\n if event.type == pg.QUIT:\n self.close_clicked = True\n \n def display_debug_info(self):\n text = (\"T\" if self.continue_game else \"F\") + \" | \" + str(int(self.frame_counter * 1000 / self.curr_time)) + \"fps | \" + str(self.frame_counter) + \"frames | \" + str(self.curr_time / 1000) + \"s\"\n self.surface.blit(self.font.render(text, False, (255, 255, 255)), (self.font.get_height() / 2, self.height - self.font.get_height()))\n \n def draw(self):\n self.surface.fill(self.bg_color) \n if self.show_debug_info:\n self.display_debug_info()\n\n pg.display.update()\n \n def update(self):\n self.frame_counter += 1\n self.curr_time += self.game_clock.tick(self.frames_per_second)\n \n def decide_continue(self):\n # Example end of game condition\n if False:\n self.continue_game = False\n \n\ndef main():\n # Initialize pygame\n pg.init()\n # create a graphical display and set the caption\n pg.display.set_caption('Template') \n \n # MAIN GAME LOOP \n Game(pg.display.set_mode((800, 600))).play()\n \n pg.quit()\n\nmain()\n\n\n","sub_path":"python/CMPUT 174/Pong/pg_template.py","file_name":"pg_template.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"72567680","text":"'''\nCreated on 8920\n\n@author: saul\n'''\n\n\nclass bd():\n\n '''\n # a starting point for break/build\n need some context to clean this up. .\n a drop would probably help. . . .\n\n basic req:\n . oop. object no tuples\n . build up from item cost/value\n . clean variable names\n . cost * items = # REV\n . matc + wk + leaks = opcost\n rev - opcost = profit/loss (-+) red/black trees? bst...\n profit breakdown ==> stockbuybackbs or bonus?\n . . . . . . .\n fu math should've done this on the beach. . . .\n what if the noobs were preprogrammed for connectivity?\n # ====================== +++++++++++++++++++++\n'''\n\n\n val = 1.1\n div = 1\n\n def __init__(self, v=0):\n if v == 0: pass\n else: self.val = v\n # ====================== +++++++++++++++++++++\n # ====================== +++++++++++++++++++++\n def testStrings(self, inargs, msg):\n print(\". . . --->AcMe rock.its<<---. . . \")\n print(\" \" + msg)\n v = inargs.readInput()\n print(v)\n self.checkCommandArgs(inargs)\n\n # ====================== +++++++++++++++++++++\n\n# -------------------------------------------------------------\n\n #some blank functions/methods/what. . .\ndef t11(): print('non') #print('no initial args')\ndef t1( a): print ('1 : ', a)\ndef t1111(a): print('1000 : ', a)\n # ths = something conditional ? ok : no way;\n # condition, input, tf, ff\n # returned output\ndef qny(c, arg, f, m):\n b = []\n # el\n if len(arg) == 0: #t11()\n if c : b = f()\n else : b = m()\n # elif len(arg) == 1:\n # if c : b = f(arg)\n # else : b = m(arg)\n return b\n\ndef qtest():\n #manage arguments for gen funcs.\n aaa = []\n a = ' blah'\n aa = ['stuff', 'jumk', 'junk']\n print('? test. . . ')\n t1(\" blahhh\")\n t1111(aa)\n for i in range(11):\n if i%3==1: aa.append(a)\n qny( i%4==0, aaa, t1, t11 )\n\n\n# -------------------------------------------------------------\n#\n# \"{:><15}|{:-^10}|{:<>15}\".format(left_aligned, center, right_aligned)\n# 'Left Align>>>>>|-Centered-|<<<<Right Align'\n\ndef evenSplit(a, n, showSplit=False, si = False):\n s = a // n # NOTE: whole percentage only. no change ;p\n r = a % n\n st = '{:,.2f}'.format(n) + ' split : {:,.2f}'.format(a) + ' ; extra: {:,.2f}'.format(r)\n if showSplit:\n show(st)\n for i in range(0, int(n) ):\n show( \"{: <11}\".format(i+1 ) + \"... $: {:,.2f}\".format(s) )\n if si: show(\" n: {: <3}\".format(n) + \" x p: {:,.2f}\".format(s) +\" extra: {:,.2f} \".format(r) )\n return s\n\ndef bigSplit(a, n, nn, showSplit=False):\n hf = a/3\n evenSplit(hf, n, showSplit)\n evenSplit(hf, nn, showSplit)\n return hf\n\ndef adsp(msg):\n msg+= '\\n'\n return msg\n\n\n# -------------------------------------------------------------\n\ndef rankSplit(a, n, showSplit=False, si = False):\n nn = n+2 #one part to split\n s = a// nn\n ss = s/n #i dont not not not not know if if know what i'm doing here, maybe...\n r = a%nn + s #one part for extra\n st = '{:,.2f}'.format(n) + ' split : {:,.2f}'.format(a) + ' ; extra: {:,.2f}'.format(r)\n if showSplit:\n show(st)\n for i in range(0, int(n) ):\n show( \" {: <5}\".format(i+1 ) + \"... $: {:,.2f}\".format(s+(n-i)*ss) )\n if si: show(\" n: {:.0f}\".format(n) + \" x p: {:,.2f}\".format(s) +\" extra: {:,.2f}\".format(r) )\n return r\n\n\n# -------------------------------------------------------------\n# print with prefix/sufix formatting...\ndef show(msg, pre = ' ', sux = ' . . . '):\n s = pre + str(msg)\n l = len(s)\n y = 55-len(sux)-l\n for i in range(y): s+= ' '\n s += sux\n print(s)\n return s\n\n# show a msg and get a response, expect a float/int...\ndef getuserval(msg, printInput=False ):\n print(msg)\n n = input()\n f = float( n )\n # i = int( n )\n if printInput:\n print(\"Value input: {:.2f}\".format( f ) )\n return f\n\n\ndef toc(a):\n return a *9876\n#idk deal with list to str <-->\n#use a real gui i guess...\ndef showL(lst, showIndex = False):\n i = 0\n for e in lst:\n f = float(e)\n i+=1\n show( '{:.0f}'.format(i) + ' {:.2f}'.format(f))\n# -------------------------------------------------------------\n#returns true for list, false for string/other...\ndef show(msgs, printOn=False, pre = ' ', sux = ' . . . '):\n #chekc if its a string or list of strings...\n #send them to show to display std pre/suf\n if isinstance(msgs, list):\n # print(' -- list found ---')\n for m in msgs:\n shows(m, pre, sux)\n return True\n elif isinstance(msgs, str):\n # print(' -- str found ---')\n shows(msgs, pre, sux) #expects strings, so i'm not checking others now...\n return False\n else :\n print(' -- invalid input found ---')\n return False\n\n\n#add pre/suf to string then print..\ndef shows(msg,pre, sux):\n s = pre + str(msg)\n l = len(s)\n y = 55-len(sux)-l\n for i in range(y): s+= ' '\n s += sux\n print(s)\n return s\n\n\ndef showDemo(t = 10000):\n\n # t = 27777\n f = 15\n tt=0\n l = [t, f, tt]\n # print('-- acme rockets made in rrlabs -_^')\n show('mepmep!!!')\n # tt = showDiv(t, f , True)\n show('Even split in ' + str(f) + ' parts. ')\n # l[2] +=\n evenSplit(t, f, True, True)\n show('3/3 split in ' + str(f) + ' parts. ')\n l[2] += bigSplit(t, f, 3, True)\n total = l[2]\n # rk = rankSplit(t, 3, True)\n show( \" total $ {:,.2f}\".format( total) )\n show( \" split $ {:,.2f}\".format( t ) )\n show(' --> Break Down {:,.2f}'.format(f) +' parts. done.')\n return total\n\ndef demo2(temp):\n islist = show(temp)\n if islist:\n show('found the list with ' + str(len(temp)) + ' args for input')\n else : show('found a single input, checking type...')\n\n\ndef multiIn(msgs):\n show(msgs)\n tetmsg = ' Testing multi arg input '\n show(tetmsg)\n checkInput = input()\n ck = checkInput.split(' ')\n demo2(ck)\n return ck\n\nif __name__ == '__main__':\n # prompt user for values, do the math. with some formatting. .\n entries = [ ['na', 'just a test example'] ]\n done = False\n while not done:\n t = multiIn(\"Enter a value( $ ) to analyze :\")\n if len(t) >=1 and t[0] == 'exit':\n show(' previous entries: ')\n show(' Loops: ' + str(len(entries)))\n\n if len(t) >=2:\n if t[1] == 'all': show(entries)\n if t[1] == 'now': done = True\n if len(t) == 1:\n for li in entries: show(li)\n\n show(' and then? ')\n entries.append(t)\n # f = getuserval(\"Enter number of items (1. f): \", False)\n # tl = demo2(t)\n # show( \"xx $ {:,.2f}\".format(tl ) )\n","sub_path":"nausated.py","file_name":"nausated.py","file_ext":"py","file_size_in_byte":6878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"205207460","text":"import os, json, cv2\r\nimport numpy as np\r\n\r\n\r\n## leitura dos arquivos json -- output yolo\r\n\r\ndef read_json(crop_size):\r\n json_path = os.path.join(os.getcwd(),'yolo',crop_size,'test','out')\r\n json_list = [x for x in os.listdir(json_path) if x.endswith('.json')]\r\n x_array=[]\r\n for each in json_list:\r\n f = open(os.path.join(json_path,each),'r')\r\n r = json.loads(f.read())\r\n if r:\r\n xmin = r[0]['topleft']['x']\r\n ymin = r[0]['topleft']['y']\r\n xmax = r[0]['bottomright']['x']\r\n ymax = r[0]['bottomright']['y']\r\n else:\r\n xmin,ymin,xmax,ymax = [0,0,0,0] # find no bounding box\r\n x_array.append([xmin,ymin,xmax,ymax])\r\n x_array = np.array(x_array)\r\n return x_array\r\n\r\n\r\ndef compute_i(img,BB):\r\n \"\"\"Calculates intersection between contour and bounding box, return contour with large intersection.\r\n img: list [number_of_contours (x1,y1)]\r\n BoundingBox: (x1,y1,x2,y2)\r\n\r\n output: img with intersection BB\r\n \"\"\"\r\n # Calculate intersection areas\r\n _, c_img, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\r\n area_intersect = 0\r\n contour = None\r\n if c_img and sum(BB)!=0:\r\n for each in c_img:\r\n x1 = np.maximum(np.min(each[:,:,0]),BB[0])\r\n x2 = np.minimum(np.max(each[:,:,0]),BB[2])\r\n y1 = np.maximum(np.min(each[:,:,1]),BB[1])\r\n y2 = np.minimum(np.max(each[:,:,1]),BB[3])\r\n intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0)\r\n if intersection > area_intersect:\r\n contour = each\r\n if area_intersect==0 and c_img:\r\n lengths = []\r\n for coord in c_img:\r\n lengths.append(len(coord))\r\n contour = c_img[np.argmax(lengths)]\r\n return contour\r\n\r\n\r\ndef softmax_fcn(*args):\r\n total_exp_arg = np.sum(np.exp(args))\r\n exp_arg = np.exp(args)\r\n soft = exp_arg/total_exp_arg\r\n ind = np.argmax(soft, axis=None)\r\n return int(ind)\r\n\r\n\r\ndef squeeze_predict(x_data_pred):\r\n '''input pred with 3 ohe classes - output pred with range 0-2 equal classes\r\n x_data_pred :[samples,width,height,ohe_classes]\r\n output :\r\n imfinal [samples,width,height,1]'''\r\n ch0_arr = np.asarray (x_data_pred[:,:,:,0])\r\n ch1_arr = np.asarray (x_data_pred[:,:,:,1])\r\n ch2_arr = np.asarray (x_data_pred[:,:,:,2])\r\n ch_shape = ch0_arr.shape\r\n # transformando array em um vetor\r\n ch0_arr = np.reshape(ch0_arr,[ch_shape[0]*ch_shape[1]*ch_shape[2]])\r\n ch1_arr = np.reshape(ch1_arr,[ch_shape[0]*ch_shape[1]*ch_shape[2]])\r\n ch2_arr = np.reshape(ch2_arr,[ch_shape[0]*ch_shape[1]*ch_shape[2]])\r\n\r\n im_final = np.zeros(ch0_arr.shape)\r\n for ii in range(len(ch0_arr)):\r\n im_final[ii] = softmax_fcn(ch0_arr[ii],ch1_arr[ii],ch2_arr[ii])\r\n im_final = np.reshape(im_final,ch_shape)\r\n return im_final","sub_path":"utilities/pos_process_imgs.py","file_name":"pos_process_imgs.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"292666868","text":"# 处理结构\nimport tensorflow as tf\nimport numpy as np\n\n# create data\nx_data = np.random.rand(100).astype(np.float32)\ny_data = x_data*0.1+0.3\n\nWeights = tf.Variable(tf.random_uniform([1], -1, 1.))\nbiases = tf.Variable(tf.zeros([1]))\n\ny = Weights*x_data+biases\n\nloss = tf.reduce_mean(tf.square(y-y_data))\n\noptimizer = tf.train.GradientDescentOptimizer(0.5)\ntrain_op = optimizer.minimize(loss)\n\ninit_op = tf.global_variables_initializer()\n\nsess = tf.Session()\nsess.run(init_op)\n\nfor step in range(201):\n sess.run(train_op)\n if step % 20 == 0:\n print(step, sess.run(Weights), sess.run(biases))\n\n\n","sub_path":"tutorials/tensorflow_01.py","file_name":"tensorflow_01.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"138750666","text":"# List of lists to dictionary extra credit\r\n\r\nfrom seat import Premium, Choice, Regular\r\n\r\n# read from file into chart\r\nchart = [] \r\nwith open(\"lab7input2.txt\") as infile:\r\n (premium, choice, regular) = infile.readline().split()\r\n for line in infile : \r\n row = []\r\n for item in line.split() : # build each row\r\n if item == premium :\r\n seat = Premium(premium)\r\n elif item == choice :\r\n seat = Choice(choice)\r\n else :\r\n seat = Regular(regular)\r\n row.append(seat)\r\n chart.append(row) \r\n\r\n# print chart\r\nprint()\r\nfor row in range(len(chart)): \r\n for seatObj in chart[row]:\r\n print(\"%5s\" % seatObj.getPrice(), end=\"\")\r\n print() \r\nprint() \r\n\r\n# buy 3 seats \r\nfor i in range(3) : \r\n val = input(\"Enter row,col: \")\r\n (row, col) = [int(elem) - 1 for elem in val.split(',')]\r\n\r\n if chart[row][col].isTaken() == False :\r\n chart[row][col].setPrice('X') # set seat to 'X'\r\n else:\r\n print(\"Sorry, that seat is not available.\")\r\n\r\n# print chart\r\nprint()\r\nfor row in range(len(chart)):\r\n for seatObj in chart[row]:\r\n print(\"%5s\" % seatObj.getPrice(), end=\"\")\r\n print() \r\nprint() \r\n \r\n\r\n","sub_path":"Lab EC/EC.py","file_name":"EC.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"382418961","text":"import tensorflow as tf\nimport numpy as np\nimport pandas as pd\n\nimport tensorflow_datasets as tfds\nfrom tensorflow.keras import backend as K\n\ndef gen_encoder():\n dt = pd.read_csv(\"data/textonly.csv\")\n\n encoder = tfds.features.text.SubwordTextEncoder.build_from_corpus(\n dt[\"text\"], target_vocab_size=2**15)\n encoder.save_to_file(\"encoders/tweettextencoder\")\n\ndef check_weights():\n model = tf.keras.models.load_model('cnnmodel.h5')\n print(model.layers[-1].get_weights())\n\ndef test_model():\n encoder = tfds.features.text.SubwordTextEncoder.load_from_file(\"encoders/tweettextencoder\")\n model = tf.keras.models.load_model('cnnmodel.h5')\n\n\ndef try_model():\n encoder = tfds.features.text.SubwordTextEncoder.load_from_file(\"encoders/tweettextencoder\")\n dt = pd.read_csv(\"data/newtextonly.csv\")\n model = tf.keras.models.load_model('cnnmodel.h5')\n \n while True:\n line = input()\n print(model.predict([encoder.encode(line)]))\n\n# test = np.array([encoder.encode(row[0]) for row in dt])\n# test = tf.keras.preprocessing.sequence.pad_sequences(test,value=0,padding='post',maxlen=256)\n\n inp = model.input\n outputs = [layer.output for layer in model.layers]\n functor = K.function([inp, K.learning_phase()], outputs)\n\n \n# res = dt.sample()\n# out = functor([test, 1.])\n #print(len(out))\n #print(len(out[-2]))\n #for arr in out[-2]:\n # print(len(arr))\n # print(len(arr[0]))\n if True:\n res = dt.sample(1000)\n test = np.array([encoder.encode(row) for row in res[\"text\"].values])\n test = tf.keras.preprocessing.sequence.pad_sequences(test,value=0,padding='post',maxlen=256)\n result = list(zip(functor([test, 1.])[-2], res[\"text\"].values, model.predict(test)))\n #print(len(result))\n #print(len(result[99]))\n #print(len(result[99][0]))\n #print(len(layer_outs[0][0][0]))\n\n\n def sort_by_attribute(i):\n def wraps(a):\n return a[0][i]\n return wraps\n\n \n results = []\n for i in range(16):\n ressorted = sorted(result, key=sort_by_attribute(i))\n results.append([(case,sort_by_attribute(i)(case)) for case in ressorted])\n\n printable = pd.DataFrame(columns=[str(\"attr\" + str(i)) for i in range(16)])\n for i in range(len(res.index)):\n for key, result in enumerate(results):\n printable.loc[i] = [str(result[i][0][1]) + \" ({})\".format(result[i][1]) for result in results]\n print(printable)\n printable.to_csv(\"out.csv\")\n # Get the second last layer, the dense layer with 16 outputs\n\n\n\ndef main():\n\n percentage = 0.7\n frac = 1\n if True:\n dt = pd.read_csv(\"data/newtextonly.csv\")\n dt = dt.sample(frac=frac)\n y_values = dt[\"is_fake\"].values\n y_len = len(y_values)\n y_train = y_values[:int(y_len*percentage)]\n y_test = y_values[int(y_len*percentage):]\n\n encoder = tfds.features.text.SubwordTextEncoder.load_from_file(\"encoders/tweettextencoder\")\n\n x_values = dt[\"text\"].values\n x_len = len(x_values)\n x_train = x_values[:int(x_len*percentage)]\n x_train = np.array([encoder.encode(row) for row in x_train])\n x_train = tf.keras.preprocessing.sequence.pad_sequences(x_train,value=0,padding='post',maxlen=256)\n x_test = x_values[int(x_len*percentage):]\n x_test = np.array([encoder.encode(row) for row in x_test])\n x_test = tf.keras.preprocessing.sequence.pad_sequences(x_test,value=0,padding='post',maxlen=256)\n else:\n dttrain = pd.read_csv(\"data/splittextonlytrain.csv\")\n dttest = pd.read_csv(\"data/splittextonlytest.csv\")\n dttrain = dttrain.sample(frac=frac)\n dttest = dttest.sample(frac=frac)\n y_train = dttrain[\"is_fake\"].values\n y_test = dttest[\"is_fake\"].values\n\n x_train = dttrain[\"text\"].values\n x_test = dttest[\"text\"].values\n\n encoder = tfds.features.text.SubwordTextEncoder.load_from_file(\"encoders/tweettextencoder\")\n\n x_train = np.array([encoder.encode(row) for row in x_train])\n x_train = tf.keras.preprocessing.sequence.pad_sequences(x_train,value=0,padding='post',maxlen=256)\n\n x_test = np.array([encoder.encode(row) for row in x_test])\n x_test = tf.keras.preprocessing.sequence.pad_sequences(x_test,value=0,padding='post',maxlen=256)\n\n model = tf.keras.models.Sequential([\n tf.keras.layers.Embedding(2**15,16,input_length=256),\n# tf.keras.layers.Conv1D(32, kernel_size=5,activation=tf.nn.relu),\n tf.keras.layers.GlobalMaxPooling1D(),\n# tf.keras.layers.Conv1D(64, kernel_size=5,activation=tf.nn.relu),\n# tf.keras.layers.GlobalMaxPooling1D(),\n# tf.keras.layers.Conv1D(64, kernel_size=5,activation=tf.nn.relu),\n# tf.keras.layers.GlobalMaxPooling1D(),\n tf.keras.layers.Dense(16, activation=tf.nn.relu),\n tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)\n ])\n\n model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy', 'Precision', 'Recall'])\n\n model.fit(x_train, y_train, epochs=10)\n model.save(\"cnnmodel2.h5\");\n metrics = model.evaluate(x_test, y_test)\n loss = metrics[0]\n accuracy = metrics[1]\n precision = metrics[2]\n recall = metrics[3]\n F1score = 2 * (recall * precision) / (recall + precision)\n\n print(\"Report:\\nAccuracy: {}%\\nPrecision: {}%\\nRecall: {}% \\nF1 Score: {}%\".format(accuracy * 100, precision * 100, recall * 100, F1score * 100))\n\n with open(\"results.csv\", \"a\") as f:\n f.write(\"dnn,{},{},{},{},{}\\n\".format(accuracy, precision, recall, F1score, percentage, len(dttrain.index) + len(dttest.index)))\n\n \n#gen_encoder()\nmain()\n#try_model()\n#check_weights()\n","sub_path":"tests/dnn.py","file_name":"dnn.py","file_ext":"py","file_size_in_byte":5828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"604708463","text":"from datetime import datetime, timedelta\nimport math\nimport os\n\nclass ADSBData:\n\tdef __init__(self, planeHex, flightNumber=\"\", dataRecord=[], station=\"INIT\" ):\n\t\tself.planeHex \t\t= planeHex.upper()\n\t\tself.flightNumber\t= flightNumber.upper()\n\t\tself.dataRecord \t= []\n\t\tself.stationRecord\t= []\n\t\tself.lastUpdate \t= datetime.now()\n\t\tself.lastUsedDataRecord = -1\n\t\tself.stationRecord.append(station)\n\t\tself.dataRecord.append(dataRecord)\n\n\tdef clearStationRecord(self):\n\t\twhile (len(self.stationRecord)>1):\n\t\t\tself.stationRecord.pop(0)\n\t\tpass\n\n\tdef returnHistory(self):\n\t\tif len(self.dataRecord)>1 :\n\t\t\treturn ', \"history\" :' + str(self.dataRecord).replace(\"'\",'\"')\n\t\telse:\n\t\t\treturn \"\"\n\t\n\tdef returnShortDict(self):\n\t\tif len(self.dataRecord)>1 :\n\t\t\tcurValidPos = 1\n\t\t\t\n\t\t\tif(self.dataRecord[-1][4] != \"\"):\n\t\t\t\tcurSquawk = self.dataRecord[-1][4]\n\t\t\telse:\n\t\t\t\tcurSquawk = \"----\"\n\n\t\t\tif(str(self.flightNumber) != \"\"):\n\t\t\t\tcurFlightNum=self.flightNumber\n\t\t\telse:\n\t\t\t\tcurFlightNum = 'N/A'\n\t\t\t\n\t\t\tif(self.dataRecord[-1][5] != \"\"):\n\t\t\t\tcurLat = self.dataRecord[-1][5]\n\t\t\telse:\n\t\t\t\tcurLat = 0.0\n\t\t\t\tcurValidPos = 0\n\t\t\t\t\n\t\t\tif(self.dataRecord[-1][6] != \"\"):\n\t\t\t\tcurLon = self.dataRecord[-1][6]\n\t\t\telse:\n\t\t\t\tcurLon = 0.0\n\t\t\t\tcurValidPos = 0\n\n\t\t\tif(self.dataRecord[-1][10] != \"\"):\n\t\t\t\tcurAlt = self.dataRecord[-1][10]\n\t\t\telse:\n\t\t\t\tcurAlt = '\"N/A\"'\n\n\t\t\tif(self.dataRecord[-1][16] != \"\"):\n\t\t\t\tcurVRate = self.dataRecord[-1][16]\n\t\t\telse:\n\t\t\t\tcurVRate = '\"N/A\"'\n\t\t\t\n\t\t\tif(self.dataRecord[-1][19] != \"\"):\n\t\t\t\tcurTrack = self.dataRecord[-1][19]\n\t\t\telse:\n\t\t\t\tcurTrack = '\"N/A\"'\n\n\t\t\tif(self.dataRecord[-1][24] != \"\"):\n\t\t\t\tcurSpeed = self.dataRecord[-1][24]\n\t\t\telse:\n\t\t\t\tcurSpeed = '\"N/A\"'\n\n\t\t\treturn '\"hex\": \"'+str(self.planeHex)+'\", \"squawk\": \"'+ str(curSquawk) + '\", \"flight\": \"'+ str(curFlightNum) +'\", \"lat\": '+ str(curLat) + ', \"lon\": '+ str(curLon) +', \"validposition\": '+ str(curValidPos) +', \"altitude\": '+ str(curAlt) +', \"vert_rate\": '+ str(curVRate) +', \"track\": '+ str(curTrack) +', \"validtrack\": 1, \"speed\": '+ str(curSpeed) +', \"messages\": '+str(len(self.dataRecord))+', \"seen\": '+str(math.ceil((datetime.now()-self.lastUpdate).total_seconds()))+', \"station\": \"'+str(self.stationRecord[-1])+'\"'\n\t\telse:\n\t\t\treturn \"\"\n\t\t\t\n\t\t\n\tdef returnAircraftKML(self):\n\t\tif len(self.dataRecord)>1 :\n\t\t\tcurValidPos=1\n\t\t\tif(str(self.flightNumber) != \"\"):\n\t\t\t\tcurFlightNum=self.flightNumber\n\t\t\telse:\n\t\t\t\tcurFlightNum = 'N/A'\n\t\t\t\n\t\t\tif(self.dataRecord[-1][5] != \"\"):\n\t\t\t\tcurLat = self.dataRecord[-1][5]\n\t\t\telse:\n\t\t\t\tcurLat = 0.0\n\t\t\t\tcurValidPos = 0\n\t\t\t\t\n\t\t\tif(self.dataRecord[-1][6] != \"\"):\n\t\t\t\tcurLon = self.dataRecord[-1][6]\n\t\t\telse:\n\t\t\t\tcurLon = 0.0\n\t\t\t\tcurValidPos = 0\n\n\t\t\tcurReg = \"\"\t\t\t\t##need more insights about this\n\t\t\tcurType = \"\"\t\t\t\t##need more insights about this\n\t\t\t\n\t\t\tif(self.dataRecord[-1][10] != \"\"):\n\t\t\t\tcurAlt = self.dataRecord[-1][10]\n\t\t\telse:\n\t\t\t\tcurAlt = '\"N/A\"'\n\t\t\t\tcurValidPos = 0\n\n\t\t\tif(self.dataRecord[-1][9] != \"\"):\n\t\t\t\tcurFLevel = self.dataRecord[-1][9]\n\t\t\telse:\n\t\t\t\tcurFLevel = '\"N/A\"'\n\t\t\t\n\t\t\tif(self.dataRecord[-1][19] != \"\"):\n\t\t\t\tcurTrack = self.dataRecord[-1][19]\n\t\t\telse:\n\t\t\t\tcurTrack = '\"N/A\"'\n\t\t\t\tcurValidPos = 0\n\n\t\t\tif (curValidPos ==1):\n\t\t\t\treturn \"<Placemark><description>Flight : \" + str(curFlightNum) + \"\\nReg : \" + str(curReg) + \"\\nHex : \" + str(self.planeHex) + \"\\nType : \" + str(curType) + \"\\nFlt Level : \" + str(curFLevel) + \"</description><name>\" + curFlightNum + \" \" + str(curReg) + \" \" + self.planeHex + \" \" + str(curType) + \" \" + str(curFLevel) + \"</name><styleUrl>#mystyle\" + str(int(float(curTrack)/5)).zfill(2) + \"</styleUrl><visibility>1</visibility><Point><altitudeMode>absolute</altitudeMode><coordinates>\" + str(curLon) + \",\" + str(curLat) + \",\" + str(int(float(curAlt)*0.3048)) + \"</coordinates></Point></Placemark>\"\n\t\t\telse:\n\t\t\t\treturn \"\"\n\t\t\n\tdef returnTrailKML(self): \n\t\tif len(self.dataRecord)>1 :\n\t\t\tlistOfCoordinate = []\n\t\t\tif(str(self.flightNumber) != \"\"):\n\t\t\t\tcurFlightNum=self.flightNumber\n\t\t\telse:\n\t\t\t\tcurFlightNum =self.planeHex\n\t\t\t\n\t\t\tfor x in self.dataRecord:\n\t\t\t\tif((x[6] != \"\") and (x[5] != \"\") and (x[10] != \"\")):\n\t\t\t\t\tcoordInfo = str(x[6]+\",\"+x[5]+\",\"+str(int(float(x[10])*0.3048)))\n\t\t\t\t\tif (coordInfo not in listOfCoordinate):\n\t\t\t\t\t\tlistOfCoordinate.append(coordInfo)\n\n\t\t\tif len(listOfCoordinate)>1:\n\t\t\t\tstrReturn = \"<Placemark> <name>\" + curFlightNum + \"-trail</name> <styleUrl>#mystyle72</styleUrl> <visibility>1</visibility> <LineString> <extrude>0</extrude> <tessellate>1</tessellate> <altitudeMode>absolute</altitudeMode> <coordinates> \"\n\t\t\t\tfor i in listOfCoordinate:\n\t\t\t\t\tstrReturn+=i+\" \"\n\t\t\t\tstrReturn+= \"</coordinates> </LineString> </Placemark>\"\n\t\t\t\treturn strReturn\n\t\t\telse:\n\t\t\t\treturn \"\"\n","sub_path":"ADSBData.py","file_name":"ADSBData.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"43295068","text":"\ndef convert_units(value, unit_from, unit_to):\n import scipy.constants as sc\n rydberg_to_ev = sc.value(\"Rydberg constant times hc in eV\")\n rydberg_to_joule = sc.value(\"Rydberg constant times hc in J\")\n bohr_to_meter = sc.value(\"Bohr radius\")\n\n if unit_from == \"cubic bohr\" and unit_to == \"cubic angstrom\":\n temp = value**(1/3)\n converted_value_volume = ((temp*bohr_to_meter)*10**10)**3\n return converted_value_volume\n\n elif unit_from == \"rydberg\" and unit_to == \"ev\":\n converted_value_energy = value * rydberg_to_ev\n return converted_value_energy\n\n elif unit_from == \"rydberg per cubic bohr\" and unit_to == \"gigapascals\":\n numerator = value * rydberg_to_joule\n denominator = bohr_to_meter**3\n converted_value_bulk_modulus = (value*numerator/denominator) / 1000000000\n return converted_value_bulk_modulus\n else:\n raise ImportError(\"Your inputs cannot be converted\")\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Final Exam and Rewiew/convert_units.py","file_name":"convert_units.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"418395882","text":"import math\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\nfrom plotting_parameters import *\n\ndef latexify(fig_width=None, fig_height=None, columns=1, square=False):\n \"\"\"Set up matplotlib's RC params for LaTeX plotting.\n Call this before plotting a figure.\n\n Parameters\n ----------\n fig_width : float, optional, inches\n fig_height : float, optional, inches\n columns : {1, 2}, optional\n square: boolean,optional\n \"\"\"\n\n # code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples\n\n # Width and max height in inches for IEEE journals taken from\n # computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf\n\n assert(columns in [1,2,3])\n\n if fig_width is None:\n if columns == 1:\n fig_width = COLUMN_WIDTH\n elif columns == 2:\n fig_width = COLUMN_WIDTH * COLUMN_HALFSIZE \n else:\n fig_width = COLUMN_WIDTH * COLUMN_THIRDSIZE\n\n if fig_height is None:\n golden_mean = (math.sqrt(5)-1.0)/2.0 # Aesthetic ratio\n fig_height = fig_width*golden_mean # height in inches\n \n if square:\n fig_height = fig_width\n \n MAX_HEIGHT_INCHES = 8.0\n if fig_height > MAX_HEIGHT_INCHES:\n print(\"WARNING: fig_height too large:\" + fig_height + \n \"so will reduce to\" + MAX_HEIGHT_INCHES + \"inches.\")\n fig_height = MAX_HEIGHT_INCHES\n\n params = {'backend': 'ps',\n 'text.latex.preamble': '\\\\usepackage{gensymb}\\n\\\\usepackage{amsmath}',\n 'axes.labelsize': FONTSIZE, # fontsize for x and y labels (was 10)\n 'axes.titlesize': FONTSIZE,\n 'font.size': FONTSIZE, # was 10\n 'legend.fontsize': FONTSIZE, # was 10\n 'xtick.labelsize': FONTSIZE,\n 'ytick.labelsize': FONTSIZE,\n 'lines.linewidth': 1.0,\n 'text.usetex': True,\n 'figure.figsize': [fig_width,fig_height],\n 'font.family': 'serif'\n }\n\n matplotlib.rcParams.update(params)\n\ndef remove_spines(axis, axis_side='left', sharex=False, sharey=False):\n axis.spines['top'].set_visible(False)\n if not sharex:\n axis.xaxis.set_ticks_position('bottom')\n if axis_side == 'left':\n axis.spines['right'].set_visible(False)\n if not sharey:\n axis.yaxis.set_ticks_position('left')\n axis.yaxis.set_label_position('left')\n else:\n axis.spines['left'].set_visible(False)\n if not sharey:\n axis.yaxis.set_ticks_position('right')\n axis.yaxis.set_label_position('right')\n\ndef create_axes(n_columns=1, axis_side='left', subplots_rows=1, subplots_columns=1, sharex=False, sharey=False, square=False):\n latexify(columns=n_columns, square=square)\n fig, axes = plt.subplots(subplots_rows, subplots_columns, sharex=sharex, sharey=sharey)\n if subplots_rows == 1 and subplots_columns == 1:\n remove_spines(axes, axis_side)\n else:\n for axis in axes.flatten():\n remove_spines(axis, axis_side, sharex=sharex, sharey=sharey)\n return fig, axes\n\ndef save_plot(filename):\n plt.savefig(filename, pad_inches=PAD_INCHES, bbox_inches = 'tight')\n plt.show()\n\ndef attach_colorbar(axis, im, side='right'):\n divider = make_axes_locatable(axis)\n cax = divider.append_axes(side, size=\"5%\", pad=0.05)\n if side == 'right' or side =='left':\n orientation = 'vertical'\n else:\n orientation = 'horizontal'\n return plt.colorbar(im, cax=cax, orientation=orientation)\n","sub_path":"shared/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"540559306","text":"import pathlib #where we are?\nfrom pathlib import Path\nimport os #catalogs and files support\nfrom shutil import copyfile\n\n#variables\nsource_path = pathlib.Path().absolute()\nsource_index_file = source_path / \"index.html\"\nsource_images_path = source_path / \"/images\"\nsource_content_path = source_path / \"/content\"\nsource_favicon_path = source_path / \"./content/images/\" / \"favicon.png\"\nsource_texts_files_path = source_path / \"content/texts\"\n\npublic_path = str(source_path) + \"/\" + \"public\"\npublic_file_path = ''\npublic_index_file = public_path + \"/\" + \"index.html\"\npublic_styles_file = public_path + \"/\" + \"style.css\"\npublic_images_path = public_path + \"/\" + \"images\"\npublic_content_path = public_path + \"/\" + \"content\"\npublic_texts_path = public_content_path + \"/\" + \"texts\"\npublic_favicon_path = public_images_path + \"/content\" + \"favicon.png\"\npublic_texts_files_path = public_path + \"/content/texts\"\npublic_categories_files_path = public_path + \"/\" + \"content/texts/\"\n\n\nfilename = ''\nfilePath = \"public\"\nmeta = str(open('index.html', 'r').read())\nheader = str(open('header.html', 'r').read())\nnav = str(open('nav.html', 'r').read())\naside = str(open('aside.html', 'r').read())\ncontent = str(open('index-content.html', 'r').read())\nfooter = str(open('footer.html', 'r').read())\n\n#functions\n## generowanie plików html dla content bez kategorii\ndef htmlGen(filename, filePath, fileType, meta, header, nav, aside, content, footer):\n if (filename == \"index.html\"):\n public_file_path = filePath\n else:\n public_file_index = filePath.find('/content/')\n public_file_path = filePath[:public_file_index] + '/public' + filePath[public_file_index:] ##tutaj jest dodawany folder public do ścieżki\n #zlepianie części składowych w jedną zmienną i zapis do pliku\n with open(public_file_path,'wt') as f:\n f.write(meta)\n f.write(header)\n f.write(nav)\n f.write(aside)\n f.write(content)\n f.write(footer)\n f.close()\n\n#files and folders creation\nos.makedirs((public_path))\nos.makedirs((public_images_path))\nos.makedirs((public_content_path))\nos.makedirs((public_texts_path))\nos.mknod(public_index_file)\nos.mknod(public_styles_file)\n\n# favicon copied\ncopyfile(source_favicon_path, public_favicon_path)\n\n# # create index.html (meta-header)\n# htmlGen(\"index.html\", public_path, \"site\", meta, header, nav, aside, content, footer)\n\n#html creations\nfor subdir, dirs, files in os.walk(source_texts_files_path):\n for filename in files:\n print(\"przejście pętli\")\n filePath = subdir + os.sep + filename\n #jeżeli pliki są w głównym katalogu texts\n if os.path.dirname(filePath).endswith(\"texts\") and (filePath.endswith(\".md\") or filePath.endswith(\".html\") or filePath.endswith(\".htm\")):\n fileType = \"site\"\n content = str(open(filePath, 'r').read())\n print(\"pierwszy if sie wykonuje\")\n #w tym miejscu zmienna filePath którą przekażę do funkcji htmlGen musi zawierać ścieżkę z PUBLIC katalogie a nie bez\n htmlGen(filename, filePath, fileType, meta, header, nav, aside, content, footer)\n #jeżeli pliki są głębiej, w katalogu categories to zrób tego ifa a nie wyższego\n elif os.path.dirname(public_texts_path).endswith(\"texts\") == False and (filePath.endswith(\".md\") or filePath.endswith(\".html\") or filePath.endswith(\".htm\")):\n fileType = \"category\"\n content = str(open(filePath, 'r').read())\n print(\"drugi if sie wykonuje\")\n #w tym miejscu zmienna filePath którą przekażę do funkcji htmlGen musi zawierać ścieżkę z PUBLIC katalogie a nie bez\n htmlGen(filename, filePath, fileType, meta, header, nav, aside, content, footer)\n\n\n# generowanie nav i aside może być przydatna biblioteka https://www.crummy.com/software/BeautifulSoup/ podobno można nią też robić xmle więc rss i sitemapy może też da radę\n# albo po prostu na podstawie skanowania z tworzenia plików zrobić pętle która dla typu site robi wpisy do słownika który stworzy potem nav i osobno wpisy dla typu typu category robi wpisy do słownika który potem zrobi aside\n#w każdym razie musi to zostać wygenerowane przed rozpoczęciem tworzenia plików html żeby przy generowani podstron kategorii i stron już generowały się one z dobrym nav i aside\n\n# skan stylów css\n# for subdir, dirs, files in os.walk(source_path):\n# for filename in files:\n# filepath = subdir + os.sep + filename\n\n# if filepath.endswith(\".css\"):\n# print (filepath)\n\n# skan zdjęć\n# for subdir, dirs, files in os.walk(source_path):\n# for filename in files:\n# filepath = subdir + os.sep + filename\n\n# if filepath.endswith(\".jpg\") or filepath.endswith(\".jpeg\") or filepath.endswith(\".png\") or filepath.endswith(\".gif\"):\n# print (filepath)\n\n\n# skan content log\n# for subdir, dirs, files in os.walk(\"source_path/content/log\"):\n# for filename in files:\n# filepath = subdir + os.sep + filename\n\n# if filepath.endswith(\".md\") or filepath.endswith(\".html\"):\n# print (filepath)\n\n\n# pętla która generuje stronę (markdown html import export) md-to-html pip\n# sklejenie i generacja plikówcssów\n# skan wszystkich jsów i sklejenie jako jeden js\n# ładne dopracowanie html css i md i js też pod mobilki!\n# poczyszczenie skeletonowych plików żeby było tylko to co ważne???\n# tworzenie podstrony typu news/logs????\n# go to up w stopce, go to down u góry gdzieś? logo klikalne\n# trim htmlów i cssów i jsów\n\n# tworzenie unikalnego kodu na podstawie zawartości całego katalogu (md5?) i wrzucanie tego razem z datą do komentarza na samej górze strony + dopisanie funkcji w skrypcie która porównuje czy jest to to samo co na stronie za pomocą curla? curl http://www.stomski.pl | head -n3 > example.html albo pythonowe requests resp = req.get(\"http://www.webcode.me\")The get() method returns a response object. print(resp.text) http://zetcode.com/python/requests/ i w zależności od parametru, generowanie kodu z datą, generowanie tylko kodu, generowanie tylko daty, sprawdzanie kodu, sprawdzanie daty, sprawdzanie i kodu i daty, wyświetlenie tylko jaka data i kod zostałby wygenerowany teraz przy odpaleniu skryptu\n# utworzenie .htaccess\n# kilka gotowych skinów: white, white solarized, gray, gray solarized, dark, dark solarized zmiana tylko p,a,background wybieralne parametrem\n# poprawiony osobny css do druku? i jakieś knefle do drukowania do pdf i drukarką?\n# stworzenie pliku readme\n#może jakiś meta plik z tytułem strony, autorem, description, ustawieniem htaccess i komentarzem? calość niech wpł←wa na to jak się wykona skrypt\n\n# generacja rss PyRSS2Gen-1.1\n# generacja sitemapy sitemap-generator\n# opytymalizacja zdjęć? możnaby ją załączać dodatkowym parametrem przy odpalaniu skryptu -rekursywnie całość, rekursywnie to co nie istnieje w public/images, kopiowanie, i nie robienie nic w zależności od parametru\n\n# funckaj któ©a sprawdza czy win/lin/mac i podmienia w skrypcie to co nie działa na to co działa w zależności od systemu. może nie jest to konieczne?\n\n# dodanie emoji i ionnych czcionek\n# dodanie w podstrony/ikony/stopki cokolwiek do kontaktu z autorem? email czy też youtube twitter cokolwiek na podstawie metadanych przy generowaniu w osobny pliku?\n# dodanie tej fajnej opcji dodawania strony jako apka pod androidem?\n# generowanie spisów treści dla h1 h2 h3 h4 dla każdej podstrony? jako prawy aside albo jako klikane wyjeżdżalne menu? co z mobilkami?\n\n# obsługa sass less?\n","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":7635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"339435563","text":"\"\"\"\"\n\nЗадание 1\n\n0) Повторение понятий из биологии (ДНК, РНК, нуклеотид, протеин, кодон)\n\n1) Построение статистики по входящим в последовательность ДНК нуклеотидам \nдля каждого гена (например: [A - 46, C - 66, G - 23, T - 34])\n\n2) Перевод последовательности ДНК в РНК (окей, Гугл)\n\n3) Перевод последовательности РНК в протеин*\n\n\n*В папке files вы найдете файл rna_codon_table.txt - \nв нем содержится таблица переводов кодонов РНК в аминокислоту, \nсоставляющую часть полипептидной цепи белка.\n\n\nВход: файл dna.fasta с n-количеством генов\n\nВыход - 3 файла:\n - статистика по количеству нуклеотидов в ДНК\n - последовательность РНК для каждого гена\n - последовательность кодонов для каждого гена\n\n ** Если вы умеете в matplotlib/seaborn или еще что, \n welcome за дополнительными баллами за\n гистограммы по нуклеотидной статистике.\n (Не забудьте подписать оси)\n\nP.S. За незакрытый файловый дескриптор - караем штрафным дезе.\n\n\"\"\"\nimport os\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport re\n\n\ndef translate_from_dna_to_rna(dna):\n dict_complement = {'A': 'U', 'C': 'G', 'G': 'C', 'T': 'A'}\n rna = {}\n rna = \"\"\n for litera in dna:\n new_litera = dict_complement[litera]\n rna += new_litera\n return rna\n\n\ndef count_nucleotides(dna):\n num_of_nucleotides = {}\n nucleotides = {'A':0, 'C':0, 'G':0, 'T':0}\n for nucleotide in nucleotides.keys():\n num_of_nucleotides[nucleotide] = dna.count(nucleotide)\n return num_of_nucleotides\n\n\ndef read_file():\n # Записываем объект Python в файл в виде JSON\n in_filename = os.path.join('files', 'rna_codon_table.txt')\n fin = open(in_filename, 'rt')\n dict_rna_to_protein = {}\n for line in fin:\n lst = re.split('\\s {2,6}', line)\n for key_value in lst:\n lst_key_value = re.split('\\s+', key_value)\n key = lst_key_value[0]\n value = lst_key_value[1]\n dict_rna_to_protein[key] = value\n return dict_rna_to_protein\n fin.close()\n\n\ndef translate_rna_to_protein(rna):\n# dict_rna_to_protein = {\n# 'UUU':'F','CUU':'L','AUU':'I','GUU':'V','UUC':'F','CUC':'L','AUC':'I','GUC':'V','UUA':'L','CUA':'L',\\\n# 'AUA':'I','GUA':'V','UUG':'L','CUG':'L','AUG':'M','GUG':'V','UCU':'S','CCU':'P','ACU':'T','GCU':'A',\\\n# 'UCC':'S','CCC':'P','ACC':'T','GCC':'A','UCA':'S','CCA':'P','ACA':'T','GCA':'A','UCG':'S','CCG':'P',\\\n# 'ACG':'T','GCG':'A','UAU':'Y','CAU':'H','AAU':'N','GAU':'D','UAC':'Y','CAC':'H','AAC':'N','GAC':'D',\\\n# 'UAA':'Stop','CAA':'Q','AAA':'K','GAA':'E','UAG':'Stop','CAG':'Q','AAG':'K','GAG':'E','UGU':'C','CGU':'R',\\\n# 'AGU':'S','GGU':'G','UGC':'C','CGC':'R','AGC':'S','GGC':'G','UGA':'Stop','CGA':'R','AGA':'R','GGA':'G',\\\n# 'UGG':'W','CGG':'R','AGG':'R','GGG':'G'\n# }\n dict_rna_to_protein = read_file()\n\n protein = \"\"\n begin = 0\n end = 3\n while end <= len(rna):\n codon = rna[begin:end]\n begin = end\n end = begin + 3\n amino_acid = dict_rna_to_protein[codon]\n protein += amino_acid\n return protein\n\ndef out_file(out_dict, out_filename):\n try:\n with open(out_filename, 'w', encoding='UTF-8') as f:\n json.dump(out_dict, f, ensure_ascii=False)\n print(f\"файл {out_filename} создался успешно!\")\n except:\n print(\"Ошибка при записи выходного файла JSON\")\n\n\n# read the file dna.fasta\nfull_file_name = os.path.join('files','dna.fasta')\nfin = open(full_file_name, 'rt')\ngene_name = 'unknoun gene'\ngenes = {}\nfor line in fin:\n if line[0] == '>':\n gene_name = line[1:-1]\n genes[gene_name] = ''\n else:\n genes[gene_name] += line\nfin.close()\ncount_nucl = {}\nrna = {}\nproteins = {}\nfor name,gene in genes.items():\n gene = gene.replace('\\n','')\n count_nucl[name] = count_nucleotides(gene)\n rna[name] = translate_from_dna_to_rna(gene)\n proteins[name] = translate_rna_to_protein(rna[name])\n# Записываем объект Python в файл в виде JSON\nout_filename = os.path.join('files','count_nucleotides.json')\nout_file(count_nucl,out_filename)\n\nout_filename = os.path.join('files','rna.json')\nout_file(rna,out_filename)\n\nout_filename = os.path.join('files','proteins.json')\nout_file(proteins,out_filename)\n\nfor name,gene in genes.items():\n# print(list(count_nucl[name].values()))\n g = np.array(list(gene))\n fig, ax = plt.subplots()\n\n ax.set_title = \"статистики по входящим в последовательность ДНК нуклеотидам\"\n ax.set_xlabel = 'нуклеотиды'\n ax.hist(g, label='статистики по нуклеотидам')\n ax.legend(loc='best')\n ax.set_title = \"статистики по входящим в последовательность ДНК нуклеотидам\"\n ax.set_xlabel = 'нуклеотиды'\n\n plt.show()\n","sub_path":"01-Data-Structures/hw/carrots/homework_strings.py","file_name":"homework_strings.py","file_ext":"py","file_size_in_byte":5562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"40789105","text":"import os as os\nimport requests \nimport json\n\ndef download_data_sets():\n '''Downloads a dataset from kaggle and only keeps the csv in your data file. Beware of your own data structure:\n this creates a data directory and also moves all the .csv files next to your jupyter notebooks to it.\n Takes: url from kaggle\n Returns: a folder with the downloaded csv\n '''\n url = input('introduce the link to kaggle dataset: ')\n\n #Gets the name of the dataset.zip\n endopint = url.split(\"/\")[-1]\n user = url.split(\"/\")[-2]\n\n download = f\"kaggle datasets download -d {user}/{endopint}; say -v Monica 'descargando'\"\n decompress = f\"tar -xzvf {endopint}.zip; say -v Monica 'descomprimiendo'\"\n delete = f\"rm -rf {endopint}.zip; say -v Monica 'borrando el zip'\"\n make_directory = \"mkdir data\"\n lista = \"ls >> archivos.txt\"\n for i in [download, decompress, delete, make_directory, lista]:\n os.system(i)\n \n move_and_delete = f\"mv *.csv database/; say -v Monica 'moviendo el dataset'\"\n return os.system(move_and_delete)\n\n\ndef access_yahoo(url_yahoo, parameters): \n '''\n Accesses the yahoo finance api with given parameters, uses our key.\n\n where:\n url_yahoo: the yahoo_api url you will use to connect. \n for this function to work you need to be using rapid apis low latency yahoo api.\n\n parameters = a dictionary with the following keys:\n {'symbols': 'TICKERS ex: AAPL, GME, MSFT', 'range':'5y', 'interval':'1d'}\n \n '''\n headers = {\n 'x-rapidapi-key': \"5978a55496msh35be22b262fe8acp19f68cjsn3abe53806b27\",\n 'x-rapidapi-host': \"yahoo-finance-low-latency.p.rapidapi.com\"\n }\n response = requests.request(\"GET\", url_yahoo, headers=headers, params=parameters)\n\n return response.json()\n","sub_path":"your-code/Download_data.py","file_name":"Download_data.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"407701370","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport time\nfrom selenium import webdriver\nfrom scrapy.selector import Selector\nfrom camping.items import CampingItem\n\nfrom datetime import date\nfrom datetime import timedelta\nimport calendar\n\ndef getSaturday():\n \n today = date.today()\n thisyear = today.year\n thismonth = today.month\n nextyear, nextmonth = calendar._nextmonth(year=thisyear, month=thismonth)\n # print(nextyear, nextmonth)\n\n thissaturday=[]\n nextsaturday=[]\n\n cal = calendar.monthcalendar(thisyear, thismonth)\n for week in cal:\n if week[calendar.SATURDAY]:\n print('%2s: %2s' % (str(thismonth).zfill(2), str(week[calendar.SATURDAY]).zfill(2)))\n thissaturday.append({'year':thisyear, 'month': thismonth, 'day':week[calendar.SATURDAY]})\n\n cal = calendar.monthcalendar(nextyear, nextmonth)\n for week in cal:\n if week[calendar.SATURDAY]:\n print('%2s: %2s' % (str(nextmonth).zfill(2), str(week[calendar.SATURDAY]).zfill(2)))\n nextsaturday.append({'year':nextyear, 'month': nextmonth, 'day':week[calendar.SATURDAY]})\n\n return thissaturday, nextsaturday\n\nclass GangdongSpider(scrapy.Spider):\n name = 'gangdong_spider'\n allowed_domains = ['camp.xticket.kr']\n start_urls = ['https://camp.xticket.kr/web/main?shopEncode=5f9422e223671b122a7f2c94f4e15c6f71cd1a49141314cf19adccb98162b5b0']\n\n def __init__(self):\n scrapy.Spider.__init__(self)\n self.browser = webdriver.Chrome('C:\\\\github\\\\chromedriver.exe')\n\n\n def parse1(self, response):\n for colum in response.xpath('//*[@id=\"contents\"]/div[3]/div[2]/div/img').getall():\n print(\"------------------------------\")\n print(colum)\n print(\"******************************\")\n pass\n\n\n def parse2(self, response):\n self.browser.get(response.url)\n time.sleep(10)\n html = self.browser.find_element_by_xpath('//*').get_attribute('outerHTML')\n selector = Selector(text=html)\n rows = selector.xpath('//*[@id=\"contents\"]/div[3]/div[2]/div/img').extract()\n\n for colum in rows:\n print(\"------------------------------\")\n print(colum)\n print(\"******************************\")\n #self.browser.quit()\n pass\n\n# find_element_by_name('HTML_name')\n# find_element_by_id('HTML_id')\n# find_element_by_xpath('/html/body/some/xpath')\n# find_element_by_css_selector('#css > div.selector')\n# find_element_by_class_name('some_class_name')\n# find_element_by_tag_name('h1')\n# find_elements_by_css_selector('#css > div.selector')\n\n def parse(self, response):\n self.browser.get(response.url)\n time.sleep(1)\n\n\n try:\n # self.browser.find_element_by_css_selector(\"#notice_layer_582 > div > div > div > fieldset > ul > li > button\").click()\n self.browser.find_element_by_xpath(\"/html/body/div[5]/div/div/div/fieldset/ul/li/button\").click()\n except Exception as identifier:\n print(\"Processing Exception:\", identifier)\n\n # //*[@id=\"calendarTable\"]/tbody/tr[2]/td[7]/a\n # //*[@id=\"calendarTable\"]/tbody/tr[3]/td[7]/a\n # //*[@id=\"calendarTable\"]/tbody/tr[4]/td[7]/a\n # //*[@id=\"calendarTable\"]/tbody/tr[5]/td[7]/a\n # //*[@id=\"calendarTable\"]/tbody/tr[6]/td[7]/a\n # //*[@id=\"calendarTable\"]/tbody/tr[7]/td[7]/a\n # thissaturday, nextsaturday = getSaturday()\n # print(thissaturday, nextsaturday)\n\n self.browser.find_element_by_xpath('//*[@id=\"login_id\"]').send_keys('mamma1234')\n self.browser.find_element_by_xpath('//*[@id=\"login_passwd\"]').send_keys('qkrghwls0!')\n self.browser.find_element_by_xpath('//*[@id=\"header\"]/div[2]/fieldset/form/ul[1]/li[3]/a').click()\n time.sleep(1)\n\n\n emptys=[]\n for loop in [1, 2]:\n if loop == 2:\n css = '#contents > div.aside > div.calendar_box > div.calendar_paginate_box > ul.calendar_paginate > li.next > a'\n self.browser.find_element_by_css_selector(css).click()\n # self.browser.implicitly_wait(5)\n time.sleep(2)\n\n weeks = [2,3,4,5,6,7] #주차\n Saturday = 7 #7 토요일\n for week in weeks:\n try:\n print('======================>', week)\n path = '//*[@id=\"calendarTable\"]/tbody/tr['+str(week)+']/td['+str(Saturday)+']/a'\n self.browser.find_element_by_xpath(path).click()\n time.sleep(1)\n # self.browser.implicitly_wait(2)\n # print(click)\n\n # time.sleep(5)\n \n \n path = '//*[@id=\"오토캠핑장\"]'\n self.browser.find_element_by_xpath(path).click()\n time.sleep(1)\n empty = self.search()\n if len(empty) > 0:\n emptys.extend(empty)\n\n if len(emptys) > 0:\n print('--------------------------------------------')\n print('emptys:', emptys)\n print('--------------------------------------------')\n # pass\n return emptys\n\n\n path = '//*[@id=\"가족캠핑장\"]'\n self.browser.find_element_by_xpath(path).click()\n time.sleep(1)\n empty = self.search()\n if len(empty) > 0:\n emptys.extend(empty)\n\n if len(emptys) > 0:\n print('--------------------------------------------')\n print('emptys:', emptys)\n print('--------------------------------------------')\n # pass\n return emptys\n\n # path = '//*[@id=\"매화나무캠핑장\"]'\n # self.browser.find_element_by_xpath(path).click()\n # time.sleep(1)\n # empty = self.search()\n # emptys.extend(empty)\n\n\n except Exception as identifier:\n print(\"Processing Exception:\", identifier)\n # pass\n\n\n\n\n\n print('--------------------------------------------')\n print('emptys:', emptys)\n print('--------------------------------------------')\n # elements1 = self.browser.find_elements_by_xpath('//*[@id=\"contents\"]/div[3]/div[2]/div/img').get_attribute('alt')\n # for element1 in elements1:\n # print(element1)\n\n # elements = self.browser.find_elements_by_xpath('//*[@id=\"contents\"]/div[3]/div[2]/div/img')\n # print(elements)\n # for element in elements:\n # print(element)\n # print('test')\n # alt = element.get_attribute('alt')\n # if \"예약완료\" in alt:\n # print(\"예약완료\", alt)\n # else:\n # print(\"예약불가\", alt)\n\n # html = self.browser.find_element_by_xpath('//*').get_attribute('outerHTML')\n # selector = Selector(text=html)\n # rows = selector.xpath('//*[@id=\"contents\"]/div[3]/div[2]/div/img').extract()\n\n # for colum in rows:\n # print(\"------------------------------\")\n # print(colum)\n # # if \"예약완료\" in colum:\n # # print('예약완료')\n # # else:\n # # print('예약가능')\n # # print(colum)\n # print(\"******************************\")\n if len(emptys) == 0:\n self.browser.quit()\n return emptys\n # pass\n\n def search(self):\n emptys = []\n\n html = self.browser.find_element_by_xpath('//*').get_attribute('outerHTML')\n selector = Selector(text=html)\n\n day = selector.xpath('//*[@class=\"select_day\"]/a/text()').extract()\n # print('click =============>', day[0], '<=============')\n # rows = selector.xpath('//*[@id=\"contents\"]/div[3]/div[2]/div/img/@alt').extract()\n rows = selector.xpath('//*[@id=\"contents\"]/div[3]/div[2]/div/img[contains(@class,\"product_box\")]')\n # print(rows)\n for row in rows:\n alt = row.xpath('@alt').extract()\n id = row.xpath('@id').extract()\n print(alt, \":\", id)\n if \"시설 약도\" not in alt[0] and \"예약완료\" not in alt[0]: \n # if \"시설 약도\" not in row and \"예약완료\" not in row:\n # print(row)\n # emptys.append({'day':day, 'row': row})\n\n empty = CampingItem()\n empty['day']=day\n empty['row']=alt\n emptys.append(empty)\n\n self.browser.find_element_by_xpath('//*[@id=\"'+id[0]+'\"]').click()\n self.browser.find_element_by_xpath('//*[@id=\"contents\"]/div[1]/div/p/a').click()\n \n return emptys\n\n return emptys","sub_path":"camping/camping/spiders/gangdong_spider.py","file_name":"gangdong_spider.py","file_ext":"py","file_size_in_byte":9270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"31388345","text":"#!/usr/bin/python\n# coding=utf8\n\n__author__ = 'peng lin'\n\nimport cx_Oracle as oracle\nfrom src.module.config.conf import Config\n\nclass bi_connector(object):\n\n def __init__(self):\n super().__init__()\n self.bi_cybersource_name = Config.DataBase.bi_cybersource_name\n self.bi_paypal_name = Config.DataBase.bi_paypal_name\n self.bi_paypalwpp_name = Config.DataBase.bi_paypalwpp_name\n self.bi_realtimebank_name = Config.DataBase.bi_realtimebank_name\n self.bi_astropay_credit_name = Config.DataBase.bi_astropay_credit_name\n self.bi_astropay_installment_name = Config.DataBase.bi_astropay_installment_name\n self.bi_globalcollect_name = Config.DataBase.bi_globalcollect_name\n self.bi_cod_name = Config.DataBase.bi_cod_name\n self.bi_payubiz_name = Config.DataBase.bi_payubiz_name\n self.bi_ebanx_credit_name = Config.DataBase.bi_ebanx_credit_name\n self.bi_gc_westernunion_name = Config.DataBase.bi_gc_westernunion_name\n self.bi_wiretransfer_name = Config.DataBase.bi_wiretransfer_name\n self.bi_storecredit_name = Config.DataBase.bi_storecredit_name\n #self.bi_astropay_oxxo_boleto_name = Config.DataBase.bi_astropay_oxxo_boleto_name\n self.bi_ebanx_boleto_name = Config.DataBase.bi_ebanx_boleto_name\n self.bi_paytm_name = Config.DataBase.bi_paytm_name\n\n def set_payment_sql(self, start_date, end_date):\n sql1 = \"\"\"SELECT\n TO_CHAR (STAT_TIME, 'yyyy-MM-dd'),\n APPLICATION_TYPE,\n PAYMENT_MODULE_CODE,\n TOTAL_SUCC\n FROM\n bisd.rh_checkout_detail_ma\n WHERE\n TO_CHAR (STAT_TIME,'yyyy-MM-dd HH24:mi:ss')\n IN (\"\"\"\n sql2 = \"\"\")\n AND\n DELIVERY_COUNTRY = 'Total'\n AND\n 'Total' not in PAYMENT_MODULE_CODE\n AND\n APPLICATION_TYPE = :platform\n AND\n MERCHANT_ID = :merchant_id\n ORDER BY\n TOTAL_SUCC DESC\"\"\"\n sql = sql1 + \"'\" + start_date + \" 23:00:00\" + \"'\" + sql2\n return sql\n\n def set_pc_sql(self, start_date, end_date):\n sql1 = \"\"\"SELECT\n TO_CHAR (STAT_TIME, 'yyyy-MM-dd'),\n APPLICATION_TYPE,\n 'offline' PAYMENT_MODULE_CODE,\n sum(TOTAL_SUCC)\n FROM\n bisd.rh_checkout_detail_ma\n WHERE\n TO_CHAR (\n STAT_TIME,\n 'yyyy-MM-dd HH24:mi:ss'\n ) in (\"\"\"\n\n sql2 = \"\"\")AND DELIVERY_COUNTRY = 'Total'\n AND APPLICATION_TYPE = :platform\n AND PAYMENT_MODULE_CODE in ('cod', 'ebanx_boleto', 'astropay_boleto', 'gc_westernunion', 'astropay_oxxo', 'wiretransfer')\n AND MERCHANT_ID = :merchant_id\n GROUP BY TO_CHAR (STAT_TIME, 'yyyy-MM-dd'), APPLICATION_TYPE\n UNION\n SELECT\n TO_CHAR (STAT_TIME, 'yyyy-MM-dd'),\n APPLICATION_TYPE,\n 'online' PAYMENT_MODULE_CODE,\n sum(TOTAL_SUCC)\n FROM\n bisd.rh_checkout_detail_ma\n WHERE\n TO_CHAR (\n STAT_TIME,\n 'yyyy-MM-dd HH24:mi:ss'\n ) in (\"\"\"\n\n sql3 = \"\"\")\n AND DELIVERY_COUNTRY = 'Total'\n AND APPLICATION_TYPE = :platform\n AND PAYMENT_MODULE_CODE not in ('cod', 'ebanx_boleto', 'astropay_boleto', 'gc_westernunion', 'astropay_oxxo', 'wiretransfer', 'Total')\n AND MERCHANT_ID = :merchant_id\n GROUP BY TO_CHAR (STAT_TIME, 'yyyy-MM-dd'), APPLICATION_TYPE\"\"\"\n\n sql_date = \"'\" + start_date + \" 23:00:00\" + \"'\"\n sql = sql1 + sql_date + sql2 + sql_date + sql3\n return sql\n\n def get_bi_data(self, start_date, end_date, platform, merchant_id):\n sql = ''''''\n # connect oracle database\n db = oracle.connect('waapp/WazzZZ@172.16.0.107:1521/dw01')\n\n # create cursor\n cursor = db.cursor()\n if platform == Config.DataBase.bi_pc_platform_name:\n sql = self.set_pc_sql(start_date, end_date)\n else:\n sql = self.set_payment_sql(start_date, end_date)\n merchant_id = int(merchant_id)\n parm = {'platform': platform,\n 'merchant_id': merchant_id}\n\n #print('执行sql:[{}],参数:[{}]'.format(sql, parm))\n # execute sql\n cursor.execute(sql, parm)\n # fetch data\n data = cursor.fetchall()\n # close cursor and oracle\n cursor.close()\n db.close()\n return data\n\n def parse_bi_data(self, data):\n dict_tmp = {}\n if len(data) > 0:\n for item in data:\n if list(item)[2] == self.bi_cybersource_name:\n dict_tmp[self.bi_cybersource_name] = list(item)\n elif list(item)[2] == self.bi_paypal_name:\n dict_tmp[self.bi_paypal_name] = list(item)\n elif list(item)[2] == self.bi_paypalwpp_name:\n dict_tmp[self.bi_paypalwpp_name] = list(item)\n elif list(item)[2] == self.bi_realtimebank_name:\n dict_tmp[self.bi_realtimebank_name] = list(item)\n elif list(item)[2] == self.bi_astropay_credit_name:\n dict_tmp[self.bi_astropay_credit_name] = list(item)\n elif list(item)[2] == self.bi_astropay_installment_name:\n dict_tmp[self.bi_astropay_installment_name] = list(item)\n elif list(item)[2] == self.bi_globalcollect_name:\n dict_tmp[self.bi_globalcollect_name] = list(item)\n elif list(item)[2] == self.bi_cod_name:\n dict_tmp[self.bi_cod_name] = list(item)\n elif list(item)[2] == self.bi_payubiz_name:\n dict_tmp[self.bi_payubiz_name] = list(item)\n elif list(item)[2] == self.bi_ebanx_credit_name:\n dict_tmp[self.bi_ebanx_credit_name] = list(item)\n elif list(item)[2] == self.bi_gc_westernunion_name:\n dict_tmp[self.bi_gc_westernunion_name] = list(item)\n elif list(item)[2] == self.bi_wiretransfer_name:\n dict_tmp[self.bi_wiretransfer_name] = list(item)\n elif list(item)[2] == self.bi_storecredit_name:\n dict_tmp[self.bi_storecredit_name] = list(item)\n #elif list(item)[2] == self.bi_astropay_oxxo_boleto_name:\n #dict_tmp[self.bi_astropay_oxxo_boleto_name] = list(item)\n elif list(item)[2] == self.bi_ebanx_boleto_name:\n dict_tmp[self.bi_ebanx_boleto_name] = list(item)\n elif list(item)[2] == self.bi_paytm_name:\n dict_tmp[self.bi_paytm_name] = list(item)\n return dict_tmp\n\nif __name__ == '__main__':\n Config.init(\"./module/config/config.ini\")\n start_date = '2018-01-29'\n end_date = '2018-01-29'\n\n litb_merchant = Config.DataBase.litb_merchant\n mini_merchant = Config.DataBase.mini_merchant\n\n platform = Config.DataBase.bi_jupiter_platorm_name\n my_connector = bi_connector(start_date, end_date)\n jupiter_litb_data = my_connector.get_bi_data(platform, litb_merchant)\n jupiter_litb_result = my_connector.parse_bi_data(jupiter_litb_data)\n jupiter_mini_data = my_connector.get_bi_data(platform, mini_merchant)\n jupiter_mini_result = my_connector.parse_bi_data(jupiter_mini_data)\n\n #get mobile_app_android bi data\n platform = Config.DataBase.bi_mobile_app_android_platorm_name\n mobile_app_android_litb_data = my_connector.get_bi_data(platform, litb_merchant)\n mobile_app_android_litb_result = my_connector.parse_bi_data(mobile_app_android_litb_data)\n mobile_app_android_mini_data = my_connector.get_bi_data(platform, mini_merchant)\n mobile_app_android_mini_result = my_connector.parse_bi_data(mobile_app_android_mini_data)\n\n #get mobile_app_iphone bi data\n platform = Config.DataBase.bi_mobile_app_iphone_platorm_name\n mobile_app_iphone_litb_data = my_connector.get_bi_data(platform, litb_merchant)\n mobile_app_iphone_litb_result = my_connector.parse_bi_data(mobile_app_iphone_litb_data)\n mobile_app_iphone_mini_data = my_connector.get_bi_data(platform, mini_merchant)\n mobile_app_iphone_mini_result = my_connector.parse_bi_data(mobile_app_iphone_mini_data)\n\n result_data = {\n Config.DataBase.bi_jupiter_platorm_name:\n {\"LITB\": jupiter_litb_result,\n \"MINI\": jupiter_mini_result},\n Config.DataBase.bi_mobile_app_android_platorm_name:\n {\"LITB\": mobile_app_android_litb_result,\n \"MINI\": mobile_app_android_mini_result},\n Config.DataBase.bi_mobile_app_iphone_platorm_name:\n {\"LITB\": mobile_app_iphone_litb_result,\n \"MINI\": mobile_app_iphone_mini_result}\n }\n print(result_data)","sub_path":"src/bi_data_collector.py","file_name":"bi_data_collector.py","file_ext":"py","file_size_in_byte":9377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"224019996","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import cm\n\nfrom loocius.tmp.tools.paths import colourwheel_path\n\nif __name__ == '__main__':\n\n dpi = 100\n fig = plt.figure(figsize=(2.56, 2.56), dpi=dpi)\n\n display_axes = fig.add_axes([0.1,0.1,0.8,0.8], projection='polar')\n display_axes._direction = 2*np.pi ## This is a nasty hack - using the hidden field to\n ## multiply the values such that 1 become 2*pi\n ## this field is supposed to take values 1 or -1 only!!\n\n norm = mpl.colors.Normalize(0.0, 2*np.pi)\n display_axes.set_theta_zero_location('S')\n\n # Plot the colorbar onto the polar axis\n # note - use orientation horizontal so that the gradient goes around\n # the wheel rather than centre out\n quant_steps = 2056\n cb = mpl.colorbar.ColorbarBase(display_axes, cmap=cm.get_cmap('hsv_r',quant_steps),\n norm=norm,\n orientation='horizontal')\n\n # aesthetics - get rid of border and axis labels\n cb.outline.set_visible(False)\n display_axes.set_axis_off()\n plt.savefig(colourwheel_path, transparent=True)\n","sub_path":"loocius/tmp/misc/matplotlib_colorwheel.py","file_name":"matplotlib_colorwheel.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"396533091","text":"# 정수 삼각형\n\ndef solution(triangle):\n len_t = len(triangle)\n if len_t == 1: return triangle[0][0]\n dp = [[0 for _ in range(i+1)] for i in range(len_t)]\n dp[0][0] = triangle[0][0]\n # dp 구하기\n for i in range(1, len_t):\n dp[i][0] = triangle[i][0] + dp[i-1][0]\n dp[i][-1] = triangle[i][-1] + dp[i-1][-1]\n if i >= 2: # 사이 값들도 계산\n for j in range(1, i):\n dp[i][j] = max(dp[i-1][j-1], dp[i-1][j]) + triangle[i][j] \n \n return max(dp[-1])","sub_path":"prev/programmers/14주차/정수 삼각형/정수삼각형_jy.py","file_name":"정수삼각형_jy.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"173977084","text":"#conding=utf-8\n\nimport random\nclass MySort:\n #初始化\n def __init__(self,start,end,count):\n self.start=start\n self.end=end\n self.count=count\n \n def mysort(self):\n #对参数的数据类型做检查\n if not isinstance(self.start,(int))&isinstance(self.end,(int))&isinstance(self.count,(int,float)):\n raise TypeError('参数必须是整数')\n #生成随机数列表\n temp=[]\n for i in range(self.count):\n ran_data=random.randint(self.start,self.end)\n temp.append(ran_data)\n #从小到大排序列表\n for i in range(self.count):\n for j in range(i):\n if temp[j]>temp[j+1]:\n temp[j],temp[j+1]=temp[j+1],temp[j]\n return temp \n \n#使用示例\nif __name__ == '__main__':\n sorted_Data = MySort(10,1000,100).mysort()\n #打印排序后的结果\n print(sorted_Data)\n\n\n\n","sub_path":"第一期/广州-番茄/task1/MySort.py","file_name":"MySort.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"279710475","text":"\"\"\"\nImplements analysis to look for free registers\n\"\"\"\n\nimport copy\nfrom collections import defaultdict\n\nfrom archinfo import ArchAMD64, Register\n\n\nclass RegisterAnalysis(object):\n KEY = 'free_registers'\n\n def __init__(self):\n self.regmap = self._init_reg_pool()\n self.reg_pool = frozenset(self.regmap.keys())\n\n # registers that can be safely used (i.e. their content is not used)\n self.free_regs = defaultdict(set)\n\n # registers whose content is actively in use (\n self.used_regs = defaultdict(lambda: copy.copy(self.reg_pool))\n\n # registers who are not actively used in the current function\n # these are safe to use if they are pushed/popped at beginning/end of function\n # clobber = overwrite content\n # initially, these are all registers\n self.clobber_registers = set([\n \"rbx\", \"rsp\", \"rbp\", \"r12\", \"r13\", \"r14\", \"r15\",\n \"rax\", \"rdx\", \"r10\", \"r11\", \"r8\", \"r9\", \"rcx\", \"rdi\", \"rsi\"])\n\n self.subregs = dict()\n\n self._init_subregisters()\n self.closure_list = self._init_closure_list()\n\n # Caller saved register list, These are registers that cannot be\n # clobbered and therefore are 'used'.\n self.used_regs['ret'] = set([\n \"rbx\", \"rsp\", \"rbp\", \"r12\", \"r13\", \"r14\", \"r15\",\n \"rax\", \"rdx\", \"r10\", \"r11\", \"r8\", \"r9\", \"rcx\", \"rdi\", \"rsi\"])\n self.used_regs['call'] = set([\n \"rbx\", \"rsp\", \"rbp\", \"r12\", \"r13\", \"r14\", \"r15\",\n \"rdi\", \"rsi\", \"rdx\", \"rcx\", \"r8\", \"r9\", \"rax\"])\n\n def _init_reg_pool(self):\n # Possible extension: add xmm registers into the pool\n amd64 = ArchAMD64()\n regmap = dict()\n for reg in amd64.register_list:\n if reg.general_purpose:\n regmap[reg.name] = reg\n\n # Remove rip, rsp from regpool\n del regmap[\"rip\"]\n del regmap[\"rsp\"]\n\n # Add a fake register for rflags\n rflags = Register(\"rflags\", 64)\n regmap[\"rflags\"] = rflags\n\n return regmap\n\n def _init_closure_list(self):\n closure_list = defaultdict(lambda: [\"\", \"\", \"\", \"\"])\n\n for wrn, wr in self.regmap.items():\n subreg_list = list(enumerate(wr.subregisters))\n # 64-bit register rules\n for idx, subreg in subreg_list:\n closure_list[wrn][idx] = subreg[0]\n\n # 32-bit register rules\n reg32 = closure_list[wrn][0]\n if reg32:\n closure_list[reg32] = copy.copy(closure_list[wrn])\n closure_list[reg32][0] = wrn\n\n # 16-bit register rules\n reg16 = closure_list[wrn][1]\n if reg16:\n closure_list[reg16] = copy.copy(closure_list[wrn][2:])\n\n # 8l-bit register rules\n reg8l = closure_list[wrn][2]\n if reg8l:\n closure_list[reg8l] = []\n\n # 8h-bit register rules\n reg8h = closure_list[wrn][3]\n if reg8h:\n closure_list[reg8h] = []\n\n # Cleanup\n for k, items in closure_list.items():\n closure_list[k] = frozenset([x for x in items if x])\n\n return closure_list\n\n def _init_subregisters(self):\n for rn, reg in self.regmap.items():\n self.subregs[rn] = rn\n\n if reg.name in [\"r8\", \"r9\", \"r10\", \"r11\",\n \"r12\", \"r13\", \"r14\", \"r15\"]:\n\n reg.subregisters = [\n (reg.name + \"d\", 0, 4),\n (reg.name + \"w\", 0, 2),\n (reg.name + \"b\", 0, 1)]\n\n if reg.name == \"rbp\":\n reg.subregisters = [\n (\"ebp\", 0, 4),\n (\"bp\", 0, 2),\n (\"bpl\", 0, 1)]\n\n for subr in reg.subregisters:\n self.subregs[subr[0]] = rn\n\n def compute_reg_set_closure(self, regl):\n regset = set(regl)\n for item in regl:\n clist = self.closure_list[item]\n regset.update(clist)\n return regset\n\n def full_register_of(self, regname):\n return self.subregs.get(regname, None)\n\n @staticmethod\n def analyze(container):\n for addr, function in container.functions.items():\n ra = RegisterAnalysis()\n ra.analyze_function(function)\n function.analysis[RegisterAnalysis.KEY] = ra.free_regs\n function.analysis['clobber_registers'] = ra.clobber_registers\n function.analysis['used_registers'] = ra.used_regs\n\n def analyze_function(self, function):\n change = True\n iter = 0\n while change and iter < 8192:\n change = False\n for idx, _ in enumerate(function.cache):\n change = change or self.analyze_instruction(function, idx)\n iter += 1\n self.finalize()\n\n def analyze_instruction(self, function, instruction_idx):\n current_instruction = function.cache[instruction_idx]\n\n # if a register is written or read at some point it is no longer a clobber register\n self.clobber_registers.difference_update(set(current_instruction.reg_reads()))\n self.clobber_registers.difference_update(set(current_instruction.reg_writes()))\n\n nexts = function.next_of(instruction_idx)\n\n reguses = self.reg_pool.intersection(\n [self.full_register_of(x) for x in current_instruction.reg_reads()]\n )\n\n regwrites = self.compute_reg_set_closure(\n current_instruction.reg_writes()\n ).difference(reguses)\n\n for nexti in nexts:\n reguses = reguses.union(\n self.used_regs[nexti].difference(regwrites))\n reguses = self.compute_reg_set_closure(reguses)\n if reguses != self.used_regs[instruction_idx]:\n self.used_regs[instruction_idx] = reguses\n return True\n\n return False\n\n def debug(self, function):\n print(\"==== DEBUG\")\n for instruction_idx, inst in enumerate(function.cache):\n print(inst, \"Used:\", sorted(self.used_regs[instruction_idx]))\n\n def finalize(self):\n for idx, ent in self.used_regs.items():\n self.free_regs[idx] = self.reg_pool.difference(ent)\n","sub_path":"retrowrite/librw/analysis/register.py","file_name":"register.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"115950893","text":"import discord\nimport asyncio\nimport time\nimport sys\nimport json\nfrom channel_list_dict import academics\nfrom sqlite_gen import *\n\nconfig_data = json.load(open('token.json'))\n\nclient = discord.Client()\n\nrecentUsers = dict([])\nallUsers = set([])\n\n@client.event\nasync def on_ready():\n print(\"Bot is ready\")\n\n#datestamp = str(datetime.datetime.fromtimestamp(unix) .strftime('%Y-%m-%d %H:%M:%S'))\n\n@client.event\nasync def on_message(message):\n if message.author.id not in allUsers:\n unix = int(time.time())\n allUsers.add(message.author.id)\n recentUsers[message.author.id] = unix\n name = trim_name(message.author)\n data_entry(message.author.id, name)\n else:\n unix = int(time.time())\n if recentUsers[message.author.id]+30 < unix and message.channel in academics:\n recentUsers[message.author.id] = unix\n add_points(message.author.id)\n elif recentUsers[message.author.id]+30 < unix:\n recentUsers[message.author.id] = unix\n add_points_half(message.author.id)\n if message.content == \"*points\" and len(str(message.content)) < 8:\n point_message = \"Your current point value is: \" + str(current_points(message.author.id))\n await client.send_message(message.channel, point_message)\n elif message.content.startswith(\"*points\"):\n msg = str(message.content)\n id = msg[10:28]\n print (id)\n point_message = trim_name(message.author) + \"'s current point value is: \" + str(current_points(id))\n await client.send_message(message.channel, point_message)\n elif message.author.id == \"142425531801927680\":\n if message.content == \"*restart\":\n await client.close()\n sys.exit(0)\n elif message.content.startswith(\"*set_points\"):\n msg = str(message.content)\n id = msg[14:32]\n pointsToAdd = msg[34:len(msg)]\n set_points(id,pointsToAdd)\n\n\ndef trim_name(name):\n l = []\n name = str(name)\n for x in name:\n if x == '#':\n break\n else:\n l.append(x)\n l = ''.join(l)\n return l\n\nclient.run(config_data['access_token'])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"255666165","text":"import glob\nimport os\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument('-i', '--input_folder',\n required=True,\n help='Folder containing scans')\nargs = vars(ap.parse_args())\ninput_dir = args['input_folder']\nif input_dir[-1] != '/':\n input_dir += '/'\nfor filename in glob.glob(input_dir+'*.png'):\n if os.path.isfile(filename[:-4]+'.knots'):\n continue\n if os.path.isfile(filename+'.knots'):\n old = filename+'.knots'\n new = filename[:-4]+'.knots'\n print(\"Renaming {} to {}\".format(old, new))\n os.rename(old, new)\n","sub_path":"File_utilities/knots_file_extension_converter.py","file_name":"knots_file_extension_converter.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"589657074","text":"\"\"\"\nThis spider is a HodesDigitalCablevision spider created on top of the HodesDigital\nscrapy crawl hodesdigital_cablevision -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://cablevision.sc.hodesdigital.com/jobs/job-search-result?division=-1&area-of-talent=-1&position-type=-1&jobtype=-1&state=-1&keywords=\"\n\nsample url:\nhttp://cablevision.sc.hodesdigital.com/jobs/job-search-result?division=-1&area-of-talent=-1&position-type=-1&jobtype=-1&state=-1&keywords=\n\"\"\"\n\nfrom re import compile\nfrom urlparse import urljoin\nfrom scrapy.http import FormRequest, Request\nfrom scrapy.selector import Selector\nfrom brightcorp.lib.utils import get_hidden_inputs\nfrom brightcorp.spiders.hodesdigital import HodesDigital\n\n\nclass HodesDigitalCablevision(HodesDigital):\n\n name = \"hodesdigital_cablevision\"\n job_count_re = compile(r\"(\\d+)\\xa0 Found\")\n\n def parse(self, response):\n sel = Selector(response)\n if not self.expected_job_count_set:\n job_count = sel.xpath(\n '//div[@id=\"phcontent_0_phsearchresult_0_jobsfound\"]/text()'\n ).re(self.job_count_re)\n if job_count:\n self.expected_job_count = job_count\n\n if not self.logo_url:\n logo_url = sel.xpath(\n '//div[@id=\"HeaderLeft\"]/a/img/@src').extract()\n if logo_url:\n self.logo_url = urljoin(response.url, logo_url[0])\n\n jobs = sel.xpath(\n '//table[contains(@id, \"phcontent_0_phsearchresult_0_\")]//tr'\n )\n for job in jobs:\n job_url = job.xpath('./td[1]/a/@href').extract()\n if job_url:\n job_url = urljoin(response.url, job_url[0])\n meta = {\n 'title': job.xpath('./td[1]/a/text()').extract(),\n 'location': job.xpath('./td[2]/text()').extract(),\n 'jobcategory': job.xpath('./td[3]/text()').extract(),\n }\n yield Request(\n job_url, callback=self.parse_job_callback(), meta=meta\n )\n\n next_page = sel.xpath(\n '//a[@id=\"phcontent_0_phsearchresult_0_next_page\"]/@href').re(self.next_url_re)\n if next_page:\n form_data = get_hidden_inputs(response)\n form_data['__EVENTTARGET'] = next_page[0]\n yield FormRequest(\n response.url, callback=self.parse, formdata=form_data\n )\n","sub_path":"brightcorp/brightcorp/spiders/hodesdigital_cablevision.py","file_name":"hodesdigital_cablevision.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"626762655","text":"# coding: utf-8\n\n\"\"\"\n ORY Hydra\n\n Welcome to the ORY Hydra HTTP API documentation. You will find documentation for all HTTP APIs here. # noqa: E501\n\n The version of the OpenAPI document: v1.7.0\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom ory_hydra_client.configuration import Configuration\n\n\nclass JSONWebKey(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'alg': 'str',\n 'crv': 'str',\n 'd': 'str',\n 'dp': 'str',\n 'dq': 'str',\n 'e': 'str',\n 'k': 'str',\n 'kid': 'str',\n 'kty': 'str',\n 'n': 'str',\n 'p': 'str',\n 'q': 'str',\n 'qi': 'str',\n 'use': 'str',\n 'x': 'str',\n 'x5c': 'list[str]',\n 'y': 'str'\n }\n\n attribute_map = {\n 'alg': 'alg',\n 'crv': 'crv',\n 'd': 'd',\n 'dp': 'dp',\n 'dq': 'dq',\n 'e': 'e',\n 'k': 'k',\n 'kid': 'kid',\n 'kty': 'kty',\n 'n': 'n',\n 'p': 'p',\n 'q': 'q',\n 'qi': 'qi',\n 'use': 'use',\n 'x': 'x',\n 'x5c': 'x5c',\n 'y': 'y'\n }\n\n def __init__(self, alg=None, crv=None, d=None, dp=None, dq=None, e=None, k=None, kid=None, kty=None, n=None, p=None, q=None, qi=None, use=None, x=None, x5c=None, y=None, local_vars_configuration=None): # noqa: E501\n \"\"\"JSONWebKey - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._alg = None\n self._crv = None\n self._d = None\n self._dp = None\n self._dq = None\n self._e = None\n self._k = None\n self._kid = None\n self._kty = None\n self._n = None\n self._p = None\n self._q = None\n self._qi = None\n self._use = None\n self._x = None\n self._x5c = None\n self._y = None\n self.discriminator = None\n\n self.alg = alg\n if crv is not None:\n self.crv = crv\n if d is not None:\n self.d = d\n if dp is not None:\n self.dp = dp\n if dq is not None:\n self.dq = dq\n if e is not None:\n self.e = e\n if k is not None:\n self.k = k\n self.kid = kid\n self.kty = kty\n if n is not None:\n self.n = n\n if p is not None:\n self.p = p\n if q is not None:\n self.q = q\n if qi is not None:\n self.qi = qi\n self.use = use\n if x is not None:\n self.x = x\n if x5c is not None:\n self.x5c = x5c\n if y is not None:\n self.y = y\n\n @property\n def alg(self):\n \"\"\"Gets the alg of this JSONWebKey. # noqa: E501\n\n The \\\"alg\\\" (algorithm) parameter identifies the algorithm intended for use with the key. The values used should either be registered in the IANA \\\"JSON Web Signature and Encryption Algorithms\\\" registry established by [JWA] or be a value that contains a Collision- Resistant Name. # noqa: E501\n\n :return: The alg of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._alg\n\n @alg.setter\n def alg(self, alg):\n \"\"\"Sets the alg of this JSONWebKey.\n\n The \\\"alg\\\" (algorithm) parameter identifies the algorithm intended for use with the key. The values used should either be registered in the IANA \\\"JSON Web Signature and Encryption Algorithms\\\" registry established by [JWA] or be a value that contains a Collision- Resistant Name. # noqa: E501\n\n :param alg: The alg of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and alg is None: # noqa: E501\n raise ValueError(\"Invalid value for `alg`, must not be `None`\") # noqa: E501\n\n self._alg = alg\n\n @property\n def crv(self):\n \"\"\"Gets the crv of this JSONWebKey. # noqa: E501\n\n\n :return: The crv of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._crv\n\n @crv.setter\n def crv(self, crv):\n \"\"\"Sets the crv of this JSONWebKey.\n\n\n :param crv: The crv of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._crv = crv\n\n @property\n def d(self):\n \"\"\"Gets the d of this JSONWebKey. # noqa: E501\n\n\n :return: The d of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._d\n\n @d.setter\n def d(self, d):\n \"\"\"Sets the d of this JSONWebKey.\n\n\n :param d: The d of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._d = d\n\n @property\n def dp(self):\n \"\"\"Gets the dp of this JSONWebKey. # noqa: E501\n\n\n :return: The dp of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._dp\n\n @dp.setter\n def dp(self, dp):\n \"\"\"Sets the dp of this JSONWebKey.\n\n\n :param dp: The dp of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._dp = dp\n\n @property\n def dq(self):\n \"\"\"Gets the dq of this JSONWebKey. # noqa: E501\n\n\n :return: The dq of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._dq\n\n @dq.setter\n def dq(self, dq):\n \"\"\"Sets the dq of this JSONWebKey.\n\n\n :param dq: The dq of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._dq = dq\n\n @property\n def e(self):\n \"\"\"Gets the e of this JSONWebKey. # noqa: E501\n\n\n :return: The e of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._e\n\n @e.setter\n def e(self, e):\n \"\"\"Sets the e of this JSONWebKey.\n\n\n :param e: The e of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._e = e\n\n @property\n def k(self):\n \"\"\"Gets the k of this JSONWebKey. # noqa: E501\n\n\n :return: The k of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._k\n\n @k.setter\n def k(self, k):\n \"\"\"Sets the k of this JSONWebKey.\n\n\n :param k: The k of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._k = k\n\n @property\n def kid(self):\n \"\"\"Gets the kid of this JSONWebKey. # noqa: E501\n\n The \\\"kid\\\" (key ID) parameter is used to match a specific key. This is used, for instance, to choose among a set of keys within a JWK Set during key rollover. The structure of the \\\"kid\\\" value is unspecified. When \\\"kid\\\" values are used within a JWK Set, different keys within the JWK Set SHOULD use distinct \\\"kid\\\" values. (One example in which different keys might use the same \\\"kid\\\" value is if they have different \\\"kty\\\" (key type) values but are considered to be equivalent alternatives by the application using them.) The \\\"kid\\\" value is a case-sensitive string. # noqa: E501\n\n :return: The kid of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._kid\n\n @kid.setter\n def kid(self, kid):\n \"\"\"Sets the kid of this JSONWebKey.\n\n The \\\"kid\\\" (key ID) parameter is used to match a specific key. This is used, for instance, to choose among a set of keys within a JWK Set during key rollover. The structure of the \\\"kid\\\" value is unspecified. When \\\"kid\\\" values are used within a JWK Set, different keys within the JWK Set SHOULD use distinct \\\"kid\\\" values. (One example in which different keys might use the same \\\"kid\\\" value is if they have different \\\"kty\\\" (key type) values but are considered to be equivalent alternatives by the application using them.) The \\\"kid\\\" value is a case-sensitive string. # noqa: E501\n\n :param kid: The kid of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and kid is None: # noqa: E501\n raise ValueError(\"Invalid value for `kid`, must not be `None`\") # noqa: E501\n\n self._kid = kid\n\n @property\n def kty(self):\n \"\"\"Gets the kty of this JSONWebKey. # noqa: E501\n\n The \\\"kty\\\" (key type) parameter identifies the cryptographic algorithm family used with the key, such as \\\"RSA\\\" or \\\"EC\\\". \\\"kty\\\" values should either be registered in the IANA \\\"JSON Web Key Types\\\" registry established by [JWA] or be a value that contains a Collision- Resistant Name. The \\\"kty\\\" value is a case-sensitive string. # noqa: E501\n\n :return: The kty of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._kty\n\n @kty.setter\n def kty(self, kty):\n \"\"\"Sets the kty of this JSONWebKey.\n\n The \\\"kty\\\" (key type) parameter identifies the cryptographic algorithm family used with the key, such as \\\"RSA\\\" or \\\"EC\\\". \\\"kty\\\" values should either be registered in the IANA \\\"JSON Web Key Types\\\" registry established by [JWA] or be a value that contains a Collision- Resistant Name. The \\\"kty\\\" value is a case-sensitive string. # noqa: E501\n\n :param kty: The kty of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and kty is None: # noqa: E501\n raise ValueError(\"Invalid value for `kty`, must not be `None`\") # noqa: E501\n\n self._kty = kty\n\n @property\n def n(self):\n \"\"\"Gets the n of this JSONWebKey. # noqa: E501\n\n\n :return: The n of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._n\n\n @n.setter\n def n(self, n):\n \"\"\"Sets the n of this JSONWebKey.\n\n\n :param n: The n of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._n = n\n\n @property\n def p(self):\n \"\"\"Gets the p of this JSONWebKey. # noqa: E501\n\n\n :return: The p of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._p\n\n @p.setter\n def p(self, p):\n \"\"\"Sets the p of this JSONWebKey.\n\n\n :param p: The p of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._p = p\n\n @property\n def q(self):\n \"\"\"Gets the q of this JSONWebKey. # noqa: E501\n\n\n :return: The q of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._q\n\n @q.setter\n def q(self, q):\n \"\"\"Sets the q of this JSONWebKey.\n\n\n :param q: The q of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._q = q\n\n @property\n def qi(self):\n \"\"\"Gets the qi of this JSONWebKey. # noqa: E501\n\n\n :return: The qi of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._qi\n\n @qi.setter\n def qi(self, qi):\n \"\"\"Sets the qi of this JSONWebKey.\n\n\n :param qi: The qi of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._qi = qi\n\n @property\n def use(self):\n \"\"\"Gets the use of this JSONWebKey. # noqa: E501\n\n Use (\\\"public key use\\\") identifies the intended use of the public key. The \\\"use\\\" parameter is employed to indicate whether a public key is used for encrypting data or verifying the signature on data. Values are commonly \\\"sig\\\" (signature) or \\\"enc\\\" (encryption). # noqa: E501\n\n :return: The use of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._use\n\n @use.setter\n def use(self, use):\n \"\"\"Sets the use of this JSONWebKey.\n\n Use (\\\"public key use\\\") identifies the intended use of the public key. The \\\"use\\\" parameter is employed to indicate whether a public key is used for encrypting data or verifying the signature on data. Values are commonly \\\"sig\\\" (signature) or \\\"enc\\\" (encryption). # noqa: E501\n\n :param use: The use of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n if self.local_vars_configuration.client_side_validation and use is None: # noqa: E501\n raise ValueError(\"Invalid value for `use`, must not be `None`\") # noqa: E501\n\n self._use = use\n\n @property\n def x(self):\n \"\"\"Gets the x of this JSONWebKey. # noqa: E501\n\n\n :return: The x of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._x\n\n @x.setter\n def x(self, x):\n \"\"\"Sets the x of this JSONWebKey.\n\n\n :param x: The x of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._x = x\n\n @property\n def x5c(self):\n \"\"\"Gets the x5c of this JSONWebKey. # noqa: E501\n\n The \\\"x5c\\\" (X.509 certificate chain) parameter contains a chain of one or more PKIX certificates [RFC5280]. The certificate chain is represented as a JSON array of certificate value strings. Each string in the array is a base64-encoded (Section 4 of [RFC4648] -- not base64url-encoded) DER [ITU.X690.1994] PKIX certificate value. The PKIX certificate containing the key value MUST be the first certificate. # noqa: E501\n\n :return: The x5c of this JSONWebKey. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._x5c\n\n @x5c.setter\n def x5c(self, x5c):\n \"\"\"Sets the x5c of this JSONWebKey.\n\n The \\\"x5c\\\" (X.509 certificate chain) parameter contains a chain of one or more PKIX certificates [RFC5280]. The certificate chain is represented as a JSON array of certificate value strings. Each string in the array is a base64-encoded (Section 4 of [RFC4648] -- not base64url-encoded) DER [ITU.X690.1994] PKIX certificate value. The PKIX certificate containing the key value MUST be the first certificate. # noqa: E501\n\n :param x5c: The x5c of this JSONWebKey. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._x5c = x5c\n\n @property\n def y(self):\n \"\"\"Gets the y of this JSONWebKey. # noqa: E501\n\n\n :return: The y of this JSONWebKey. # noqa: E501\n :rtype: str\n \"\"\"\n return self._y\n\n @y.setter\n def y(self, y):\n \"\"\"Sets the y of this JSONWebKey.\n\n\n :param y: The y of this JSONWebKey. # noqa: E501\n :type: str\n \"\"\"\n\n self._y = y\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, JSONWebKey):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, JSONWebKey):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"clients/hydra/python/ory_hydra_client/models/json_web_key.py","file_name":"json_web_key.py","file_ext":"py","file_size_in_byte":16232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"232443829","text":"\"\"\"\n提供一个APIView类,该类是REST框架中所有视图的基础。\n\"\"\"\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import connection, models, transaction\nfrom django.http import Http404\nfrom django.http.response import HttpResponseBase\nfrom django.utils.cache import cc_delim_re, patch_vary_headers\nfrom django.utils.encoding import smart_text\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic import View\n\nfrom rest_framework import exceptions, status\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.schemas import DefaultSchema\nfrom rest_framework.settings import api_settings\nfrom rest_framework.utils import formatting\n\n\ndef get_view_name(view):\n \"\"\"\n 给定一个视图实例,返回一个文本名称来表示该视图。此名称用于可浏览的API和OPTIONS响应中。此功能是VIEW_NAME_FUNCTION设置的默认设置。\n \"\"\"\n # 名称可能由某些视图(例如ViewSet)设置。\n name = getattr(view, 'name', None)\n if name is not None:\n return name\n\n name = view.__class__.__name__\n name = formatting.remove_trailing_string(name, 'View')\n name = formatting.remove_trailing_string(name, 'ViewSet')\n name = formatting.camelcase_to_spaces(name)\n\n # 后缀可以由某些视图设置,例如ViewSet。\n suffix = getattr(view, 'suffix', None)\n if suffix:\n name += ' ' + suffix\n\n return name\n\n\ndef get_view_description(view, html=False):\n \"\"\"\n 给定视图实例,返回文本描述以表示视图。此名称用于可浏览的API和OPTIONS响应中。\n 此功能是VIEW_DESCRIPTION_FUNCTION设置的默认设置。\n \"\"\"\n # 描述可能由某些视图(例如ViewSet)设置。\n description = getattr(view, 'description', None)\n if description is None:\n description = view.__class__.__doc__ or ''\n\n description = formatting.dedent(smart_text(description))\n if html:\n return formatting.markup_description(description)\n return description\n\n\ndef set_rollback():\n atomic_requests = connection.settings_dict.get('ATOMIC_REQUESTS', False)\n if atomic_requests and connection.in_atomic_block:\n transaction.set_rollback(True)\n\n\ndef exception_handler(exc, context):\n \"\"\"\n 返回应用于任何给定异常的响应。默认情况下,我们处理REST框架APIException,以及Django内置的Http404和PermissionDenied异常。\n 任何未处理的异常都可能返回“ None”,这将引发500错误。\n \"\"\"\n if isinstance(exc, Http404):\n exc = exceptions.NotFound()\n elif isinstance(exc, PermissionDenied):\n exc = exceptions.PermissionDenied()\n\n if isinstance(exc, exceptions.APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n if isinstance(exc.detail, (list, dict)):\n data = exc.detail\n else:\n data = {'detail': exc.detail}\n\n set_rollback() # 调用上面的rollback\n return Response(data, status=exc.status_code, headers=headers)\n\n return None\n\n\nclass APIView(View):\n # T可以在全局或按观看次数设置以下策略。\n renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES\n parser_classes = api_settings.DEFAULT_PARSER_CLASSES\n authentication_classes = api_settings.DEFAULT_AUTHENTICATION_CLASSES\n throttle_classes = api_settings.DEFAULT_THROTTLE_CLASSES\n permission_classes = api_settings.DEFAULT_PERMISSION_CLASSES\n content_negotiation_class = api_settings.DEFAULT_CONTENT_NEGOTIATION_CLASS\n metadata_class = api_settings.DEFAULT_METADATA_CLASS\n versioning_class = api_settings.DEFAULT_VERSIONING_CLASS\n\n # 允许其他设置的依赖项注入使测试更加容易。\n settings = api_settings\n\n schema = DefaultSchema()\n\n @classmethod\n def as_view(cls, **initkwargs):\n \"\"\"\n 将原始类存储在视图函数中。这使我们能够在进行URL反向查找时发现有关视图的信息。用于生成面包屑。\n \"\"\"\n if isinstance(getattr(cls, 'queryset', None), models.query.QuerySet):\n def force_evaluation():\n raise RuntimeError(\n 'Do not evaluate the `.queryset` attribute directly, '\n 'as the result will be cached and reused between requests. '\n 'Use `.all()` or call `.get_queryset()` instead.'\n )\n\n cls.queryset._fetch_all = force_evaluation\n\n view = super().as_view(**initkwargs) # 调用父类的as_view\n view.cls = cls\n view.initkwargs = initkwargs\n\n # 注意:基于会话的身份验证已通过CSRF明​​确验证,所有其他身份验证均不受CSRF的限制。\n return csrf_exempt(view)\n\n @property\n def allowed_methods(self):\n \"\"\"\n 将Django的私有_allowed_methods接口包装在公共属性中。\n \"\"\"\n return self._allowed_methods()\n\n @property\n def default_response_headers(self): # 获取响应头的信息\n headers = {\n 'Allow': ', '.join(self.allowed_methods),\n }\n if len(self.renderer_classes) > 1:\n headers['Vary'] = 'Accept'\n return headers\n\n def http_method_not_allowed(self, request, *args, **kwargs):\n \"\"\"\n 如果`request.method`与处理程序方法不对应,请确定引发哪种异常。\n \"\"\"\n raise exceptions.MethodNotAllowed(request.method)\n\n def permission_denied(self, request, message=None):\n \"\"\"\n 如果不允许请求,请确定要提出哪种异常。\n \"\"\"\n if request.authenticators and not request.successful_authenticator:\n raise exceptions.NotAuthenticated()\n raise exceptions.PermissionDenied(detail=message)\n\n def throttled(self, request, wait):\n \"\"\"\n 如果请求受到限制,请确定引发哪种异常。\n \"\"\"\n raise exceptions.Throttled(wait)\n\n def get_authenticate_header(self, request):\n \"\"\"\n 如果请求未经身份验证,请确定用于401响应的WWW-Authenticate标头(如果有)。\n \"\"\"\n authenticators = self.get_authenticators()\n if authenticators:\n return authenticators[0].authenticate_header(request)\n\n def get_parser_context(self, http_request):\n \"\"\"\n 返回传递给Parser.parse()的字典,作为parser_context关键字参数。\n \"\"\"\n # 注意:另外,request对象还将把`request`和`encoding`添加到上下文中。\n return {\n 'view': self,\n 'args': getattr(self, 'args', ()),\n 'kwargs': getattr(self, 'kwargs', {})\n }\n\n def get_renderer_context(self):\n \"\"\"\n 返回传递给Renderer.render()的字典,作为`renderer_context`关键字参数。\n \"\"\"\n # 注意:另外,“响应”也将通过“响应”对象添加到上下文中。\n return {\n 'view': self,\n 'args': getattr(self, 'args', ()),\n 'kwargs': getattr(self, 'kwargs', {}),\n 'request': getattr(self, 'request', None)\n }\n\n def get_exception_handler_context(self):\n \"\"\"\n 返回传递给EXCEPTION_HANDLER的字典,作为context参数。\n \"\"\"\n return {\n 'view': self,\n 'args': getattr(self, 'args', ()),\n 'kwargs': getattr(self, 'kwargs', {}),\n 'request': getattr(self, 'request', None)\n }\n\n def get_view_name(self):\n \"\"\"\n 返回在OPTIONS响应和可浏览API中使用的视图名称。\n \"\"\"\n func = self.settings.VIEW_NAME_FUNCTION\n return func(self)\n\n def get_view_description(self, html=False):\n \"\"\"\n 返回视图的一些描述性文本,如OPTIONS响应和可浏览API中所使用。\n \"\"\"\n func = self.settings.VIEW_DESCRIPTION_FUNCTION\n return func(self, html)\n\n # API策略实例化方法\n def get_format_suffix(self, **kwargs):\n \"\"\"\n 确定请求是否包含'.json'样式格式后缀\n \"\"\"\n if self.settings.FORMAT_SUFFIX_KWARG:\n return kwargs.get(self.settings.FORMAT_SUFFIX_KWARG)\n\n def get_renderers(self):\n \"\"\"\n 实例化并返回此视图可以使用的渲染器列表。\n \"\"\"\n return [renderer() for renderer in self.renderer_classes]\n\n def get_parsers(self):\n \"\"\"\n 实例化并返回此视图可以使用的解析器列表。\n \"\"\"\n return [parser() for parser in self.parser_classes]\n\n def get_authenticators(self):\n \"\"\"\n 实例化并返回此视图可以使用的身份验证器列表。\n \"\"\"\n return [auth() for auth in self.authentication_classes]\n\n def get_permissions(self):\n \"\"\"\n 实例化并返回此视图所需的权限列表。\n \"\"\"\n return [permission() for permission in self.permission_classes]\n\n def get_throttles(self):\n \"\"\"\n 实例化并返回此视图使用的油门列表。\n \"\"\"\n return [throttle() for throttle in self.throttle_classes]\n\n def get_content_negotiator(self):\n \"\"\"\n 实例化并返回要使用的内容协商类。\n \"\"\"\n if not getattr(self, '_negotiator', None):\n self._negotiator = self.content_negotiation_class()\n return self._negotiator\n\n def get_exception_handler(self):\n \"\"\"\n 返回此视图使用的异常处理程序。\n \"\"\"\n return self.settings.EXCEPTION_HANDLER\n\n # API政策实施方法\n\n def perform_content_negotiation(self, request, force=False):\n \"\"\"\n 确定要使用哪种渲染器和媒体类型来渲染响应。\n \"\"\"\n renderers = self.get_renderers()\n conneg = self.get_content_negotiator()\n\n try:\n return conneg.select_renderer(request, renderers, self.format_kwarg)\n except Exception:\n if force:\n return (renderers[0], renderers[0].media_type)\n raise\n\n def perform_authentication(self, request):\n \"\"\"\n 对传入的请求执行身份验证。\n\n 请注意,如果您覆盖此设置并只是简单地“通过”,则将在第一次访问`request.user`或`request.auth`时懒惰地执行身份验证。\n \"\"\"\n request.user\n\n def check_permissions(self, request):\n \"\"\"\n 检查是否应允许该请求。如果不允许该请求,则引发适当的异常。\n \"\"\"\n for permission in self.get_permissions():\n if not permission.has_permission(request, self):\n self.permission_denied(\n request, message=getattr(permission, 'message', None)\n )\n\n def check_object_permissions(self, request, obj):\n \"\"\"\n 检查是否应允许给定对象的请求。如果不允许该请求,则引发适当的异常。\n \"\"\"\n for permission in self.get_permissions():\n if not permission.has_object_permission(request, self, obj):\n self.permission_denied(\n request, message=getattr(permission, 'message', None)\n )\n\n def check_throttles(self, request):\n \"\"\"\n 检查是否应限制请求。如果请求被限制,则引发适当的异常。\n \"\"\"\n throttle_durations = []\n for throttle in self.get_throttles():\n if not throttle.allow_request(request, self):\n throttle_durations.append(throttle.wait())\n\n if throttle_durations:\n # 滤除“None”值,这些值可能在配置/速率更改的情况下发生,请参阅#1438\n durations = [\n duration for duration in throttle_durations\n if duration is not None\n ]\n\n duration = max(durations, default=None)\n self.throttled(request, duration)\n\n def determine_version(self, request, *args, **kwargs):\n \"\"\"\n 如果使用版本控制,则为传入请求确定任何API版本。返回(version,versioning_scheme)的二元组\n \"\"\"\n if self.versioning_class is None:\n return (None, None)\n scheme = self.versioning_class()\n return (scheme.determine_version(request, *args, **kwargs), scheme)\n\n # Dispatch methods\n\n def initialize_request(self, request, *args, **kwargs):\n \"\"\"\n 返回初始请求对象。\n \"\"\"\n parser_context = self.get_parser_context(request)\n\n return Request(\n request,\n parsers=self.get_parsers(),\n authenticators=self.get_authenticators(),\n negotiator=self.get_content_negotiator(),\n parser_context=parser_context\n )\n\n def initial(self, request, *args, **kwargs):\n \"\"\"\n 运行在调用方法处理程序之前需要发生的任何事情。\n \"\"\"\n self.format_kwarg = self.get_format_suffix(**kwargs)\n\n # 执行内容协商并在请求上存储接受的信息\n neg = self.perform_content_negotiation(request)\n request.accepted_renderer, request.accepted_media_type = neg\n\n # 确定API版本(如果正在使用版本控制)。\n version, scheme = self.determine_version(request, *args, **kwargs)\n request.version, request.versioning_scheme = version, scheme\n\n # 确保允许传入请求\n self.perform_authentication(request)\n self.check_permissions(request)\n self.check_throttles(request)\n\n def finalize_response(self, request, response, *args, **kwargs):\n \"\"\"\n 返回最终响应对象。\n \"\"\"\n # 如果未返回正确的响应,则使错误显而易见\n assert isinstance(response, HttpResponseBase), (\n 'Expected a `Response`, `HttpResponse` or `HttpStreamingResponse` '\n 'to be returned from the view, but received a `%s`'\n % type(response)\n )\n\n if isinstance(response, Response):\n if not getattr(request, 'accepted_renderer', None):\n neg = self.perform_content_negotiation(request, force=True)\n request.accepted_renderer, request.accepted_media_type = neg\n\n response.accepted_renderer = request.accepted_renderer\n response.accepted_media_type = request.accepted_media_type\n response.renderer_context = self.get_renderer_context()\n\n # 向响应中添加新的variable标头,而不是覆盖。\n vary_headers = self.headers.pop('Vary', None)\n if vary_headers is not None:\n patch_vary_headers(response, cc_delim_re.split(vary_headers))\n\n for key, value in self.headers.items():\n response[key] = value\n\n return response\n\n def handle_exception(self, exc):\n \"\"\"\n 通过返回适当的响应或重新引发错误来处理发生的任何异常。\n \"\"\"\n if isinstance(exc, (exceptions.NotAuthenticated,\n exceptions.AuthenticationFailed)):\n # 用于401响应的WWW-Authenticate标头,否则强制为403\n auth_header = self.get_authenticate_header(self.request)\n\n if auth_header:\n exc.auth_header = auth_header\n else:\n exc.status_code = status.HTTP_403_FORBIDDEN\n\n exception_handler = self.get_exception_handler()\n\n context = self.get_exception_handler_context()\n response = exception_handler(exc, context)\n\n if response is None:\n self.raise_uncaught_exception(exc)\n\n response.exception = True\n return response\n\n def raise_uncaught_exception(self, exc):\n if settings.DEBUG:\n request = self.request\n renderer_format = getattr(request.accepted_renderer, 'format')\n use_plaintext_traceback = renderer_format not in ('html', 'api', 'admin')\n request.force_plaintext_errors(use_plaintext_traceback)\n raise exc\n\n # 注意:在`as_view`中将视图设为CSRF免除项,以防止Dispatch`需要被覆盖时意外删除此免除项。\n def dispatch(self, request, *args, **kwargs):\n \"\"\"\n `.dispatch()`与Django的常规调度几乎相同,但具有用于启动,完成和异常处理的额外钩子。\n \"\"\"\n self.args = args\n self.kwargs = kwargs\n request = self.initialize_request(request, *args, **kwargs)\n self.request = request\n self.headers = self.default_response_headers # deprecate?\n\n try:\n self.initial(request, *args, **kwargs)\n\n # 获取适当的处理程序方法\n if request.method.lower() in self.http_method_names:\n handler = getattr(self, request.method.lower(),\n self.http_method_not_allowed)\n else:\n handler = self.http_method_not_allowed\n\n response = handler(request, *args, **kwargs)\n\n except Exception as exc:\n response = self.handle_exception(exc)\n\n self.response = self.finalize_response(request, response, *args, **kwargs)\n return self.response\n\n def options(self, request, *args, **kwargs):\n \"\"\"\n HTTP“ OPTIONS”请求的处理程序方法。\n \"\"\"\n if self.metadata_class is None:\n return self.http_method_not_allowed(request, *args, **kwargs)\n data = self.metadata_class().determine_metadata(request, self)\n return Response(data, status=status.HTTP_200_OK)\n","sub_path":"SourceCode/rest_framework/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"458349689","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 19 15:15:13 2019\n\n@author: rpira\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nimport numpy as np\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.models import load_model\n\nimport time\nstart_time = time.time()\n\n##Load the dataset\ndataset= pd.read_csv('dataset.csv') # read data set using pandas\nInputData=dataset.copy()\n\n#Split features from labels\nInputData.pop('P')\nInputData.pop('D')\nInputData.pop('Time')\nInputData.pop('Error')\nOutputData=InputData.pop('Energy')\n\ntrain_dataset = InputData.sample(frac=.80,random_state=0)\ntest_dataset = InputData.drop(train_dataset.index)\n\ntrain_labels = OutputData.sample(frac=.80,random_state=0)\ntest_labels = OutputData.drop(train_dataset.index)\n\n#look at the overall statistics\nData_stats=dataset.describe()\nData_stats = Data_stats.transpose()\ntrain_stats = train_dataset.describe()\nOutput_stats = test_labels.describe()\ntrain_stats = train_stats.transpose()\nOutput_stats = Output_stats.transpose()\n\nprint(train_stats)\nprint(Output_stats)\nprint(Data_stats)\n\n## Normalization Training\ndef norm(x):\n return (x - train_stats['mean']) / train_stats['std']\nnormed_train_data = norm(train_dataset)\nnormed_test_data = norm(test_dataset)\n\n\nN0=2560\nk0=layers.Dense(N0, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())])\nN1=160\nk1=layers.Dense(N1, activation=tf.nn.relu)\nN2=160\nk2=layers.Dense(N2, activation=tf.nn.relu)\nkend=layers.Dense(1)\nk=[k1]+[k2]\nm=[k0]+k+[kend]\n ## build the model\ndef build_model():\n model = keras.Sequential(m)\n \n optimizer = tf.keras.optimizers.RMSprop(0.001)\n model.compile(loss='mean_absolute_percentage_error',\n optimizer=optimizer,\n metrics=['mean_squared_error','mean_absolute_percentage_error'])\n return model\nmodel = build_model()\n\n ## Using the the epoch approach \nEPOCHS = 100\n \nmodel = build_model()\n \n ## Using the cross validation approach\n \n # The patience parameter is the amount of epochs to check for improvement\n \nearly_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=100) \n#mc = ModelCheckpoint(filepath='best_model.h5', monitor='val_loss', verbose=0, save_best_only=True) \nhistory =model.fit(normed_train_data, train_labels, epochs=EPOCHS,\n validation_split = 0.2, verbose=1, callbacks=[early_stop])\n ## saving the model\n #MatrixModelAll.append(model)\n ## Testing error\n#saved_model = load_model('best_model.h5')\n#loss, mse, mape = saved_model.evaluate(normed_test_data, test_labels, verbose=0)\n#print(mape)\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n","sub_path":"OptimalCoresTest/energy.py","file_name":"energy.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"158524697","text":"import sys\r\nnum = int(input())\r\nli = []\r\nitmp=0\r\nfor _ in range(num):\r\n li.append(str(sys.stdin.readline()).strip())\r\nfor i in range(len(li[0])):\r\n litmp=[]\r\n for j in range(num):\r\n litmp.append(li[j][len(li[0])-1-i:])\r\n liset = set(litmp)\r\n if len(litmp)==len(liset):\r\n itmp=i+1\r\n break\r\nprint(itmp)","sub_path":"powerful104/1235.py","file_name":"1235.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"346596296","text":"from typing import List\n\n\nboss_stats = \"\"\"Hit Points: 104\nDamage: 8\nArmor: 1\"\"\"\n\nclass Character:\n def __init__(self, health: int, damage: int, armor: int):\n self.health = health\n self.damage = damage\n self.armor = armor\n \n def attack(self, other: \"Character\") -> None:\n other.health -= max(self.damage - other.armor, 1)\n\n\nclass Player(Character):\n def __init__(self):\n super().__init__(100, 0, 0)\n self._inventory = []\n self.total_spent = 0\n \n def equip(self, item: List[int]) -> None:\n cost, dam, arm = item\n self.total_spent += cost\n self.damage += dam\n self.armor += arm\n self._inventory.append(item)\n \n\nweapons = [ # pick one\n [8, 4, 0],\n [10, 5, 0],\n [25, 6, 0],\n [40, 7, 0],\n [74, 8, 0]\n]\n\narmors = [ # pick 0 or one\n [0, 0, 0],\n [13, 0, 1],\n [31, 0, 2],\n [53, 0, 3],\n [75, 0, 4],\n [102, 0, 5]\n]\n\nrings = [ # pick 0-2\n [0, 0, 0],\n [0, 0, 0],\n [25, 1, 0],\n [50, 2, 0],\n [100, 3, 0],\n [20, 0, 1],\n [40, 0, 2],\n [80, 0, 3]\n]\n\nlowest_cost = None\nfor weapon in weapons:\n for armor in armors:\n for i, first_ring in enumerate(rings):\n for second_ring in rings[i+1:]:\n boss = Character(104, 8, 1)\n player = Player()\n player.equip(weapon)\n player.equip(armor)\n player.equip(first_ring)\n player.equip(second_ring)\n\n if lowest_cost is not None and lowest_cost < player.total_spent:\n continue\n \n # do the battle\n while True:\n player.attack(boss)\n if boss.health < 1: # victory\n if lowest_cost is None or player.total_spent < lowest_cost:\n lowest_cost = player.total_spent\n break\n \n boss.attack(player)\n if player.health < 1: # defeat\n break\n\nprint(lowest_cost)\n\n# answer: 78","sub_path":"2015/Day 21/day_21_part1.py","file_name":"day_21_part1.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"59881035","text":"# -*- coding:utf-8 -*- ?\nimport telebot\nfrom math import acos, cos, sin, radians, pi , atan2, sqrt \n\n#distance between points (metrs)\ndef dist(latitude1, longitude1, latitude2, longitude2):\n \n latitude1 = float(latitude1)\n latitude2 = float(latitude2)\n longitude1 = float(longitude1)\n longitude2 = float(longitude2)\n \n latitude1 = radians(latitude1)\n longitude1 = radians(longitude1)\n latitude2 = radians(latitude2)\n longitude2 = radians(longitude2)\n r = 6372.797\n \n delta_latitude = latitude1-latitude2\n delta_longitude = longitude1-longitude2\n \n a = sin(delta_latitude / 2) * sin(delta_latitude / 2) + cos(latitude1) * cos(latitude2) * sin(delta_longitude / 2) * sin(delta_longitude / 2)\n c = 2 * atan2(sqrt(a), sqrt(1 - a)) \n dist_metrs = int(r * c * 1000)\n return dist_metrs\n\n#generate markup keyboard from list\n#-p (flag) is it request_location button\ndef generateMarkup(keypad):\n markup = telebot.types.ReplyKeyboardMarkup(resize_keyboard=True)\n for text in keypad:\n if text.count(\"-p\"): \n text = text.replace(\"-p\", \"\")\n request_location = True\n else: request_location = False\n button = telebot.types.KeyboardButton(text, request_location = request_location) \n markup.add(button)\n return markup","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"36531443","text":"画像ファイルの分類\nimport os\nimport shutil\nimport glob\nimport re\nimport pandas as pd\n#読み込みたいcsv\ncsv = '/Muffin_end.csv'\n#フォルダー番号(企業)\nnum = '0'\n#フォルダー名(ジャンル)\ngenre = 'Muffin'\n#正解クラス\ntrue_class = genre\ndf = pd.read_csv(csv)\nclass_list = []\nclass_obj = []\nfor index, row in df.iterrows():\n point = row['index'].find('/00')+1\n if class_list.count(row['class']) == 0:\n class_list.append(row['class'])\nfor item in range(len(class_list)):\n tmp = []\n for index, row in df.iterrows():\n if row['class'] == class_list[item]:\n tmp.append(row['index'][point:])\n class_obj.append(tmp)\npath1 = 'mydata/'+num+'/'+genre+'/正解'\nos.makedirs(path1, exist_ok=True)\npath2 = 'mydata/'+num+'/'+genre+'/不正解'\nos.makedirs(path2, exist_ok=True)\nfor i in range(len(class_obj)):\n new_dir_path_recursive = 'mydata/'+num+'/'+genre+'/'+class_list[i]\n for item in class_obj[i]:\n image_path = 'drive/My Drive/Colab Notebooks/dataset/'+num+'/'+genre+'/'+item\n files = glob.glob(image_path)\n for file in files:\n if new_dir_path_recursive == 'mydata/'+num+'/'+genre+'/'+true_class:\n new_file_path = 'mydata/'+num+'/'+genre+'/正解'\n shutil.copy(file, new_file_path)\n else:\n new_file_path = 'mydata/'+num+'/'+genre+'/不正解'\n shutil.copy(file, new_file_path)","sub_path":"cd.py","file_name":"cd.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"437639482","text":"from casbin import persist\nfrom sqlobject import SQLObject, StringCol, sqlhub, connectionForURI\n\n\nclass CasbinRule(SQLObject):\n class sqlmeta:\n\n table = \"casbin_rule\"\n\n ptype = StringCol(length=255)\n v0 = StringCol(length=255, default=None)\n v1 = StringCol(length=255, default=None)\n v2 = StringCol(length=255, default=None)\n v3 = StringCol(length=255, default=None)\n v4 = StringCol(length=255, default=None)\n v5 = StringCol(length=255, default=None)\n\n def __str__(self):\n arr = [self.ptype]\n for v in (self.v0, self.v1, self.v2, self.v3, self.v4, self.v5):\n if v is None:\n break\n arr.append(v)\n return \", \".join(arr)\n\n def __repr__(self):\n return '<CasbinRule {}: \"{}\">'.format(self.id, str(self))\n\n\nclass Adapter(persist.Adapter):\n \"\"\"the interface for Casbin adapters.\"\"\"\n\n def __init__(self, connection_string):\n self._conhandler = connectionForURI(connection_string)\n sqlhub.processConnection = self._conhandler\n\n def load_policy(self, model):\n \"\"\"loads all policy rules from the storage.\"\"\"\n count = CasbinRule.select().count()\n for i in range(1, 1 + count):\n line = CasbinRule.get(i)\n persist.load_policy_line(str(line), model)\n\n def _save_policy_line(self, ptype, rule):\n line = CasbinRule.selectBy(ptype=ptype)\n for i, v in enumerate(rule):\n setattr(line, \"v{}\".format(i), v)\n\n def save_policy(self, model):\n \"\"\"saves all policy rules to the storage.\"\"\"\n for sec in [\"p\", \"g\"]:\n if sec not in model.model.keys():\n continue\n for ptype, ast in model.model[sec].items():\n for rule in ast.policy:\n self._save_policy_line(ptype, rule)\n\n return True\n\n def add_policy(self, sec, ptype, rule):\n \"\"\"adds a policy rule to the storage.\"\"\"\n self._save_policy_line(ptype, rule)\n\n def remove_policy(self, sec, ptype, rule):\n \"\"\"removes a policy rule from the storage.\"\"\"\n pass\n\n def remove_filtered_policy(self, sec, ptype, field_index, *field_values):\n \"\"\"removes policy rules that match the filter from the storage.\n This is part of the Auto-Save feature.\n \"\"\"\n pass\n","sub_path":"casbin_sqlobject_adapter/adapter.py","file_name":"adapter.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"24323512","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 31 21:55:36 2017\n\n@author: Nicholas\n\"\"\"\nimport numpy as np\nimport my_quat_ops as quat\nimport my_conversions as rbm_conv\nimport my_sigma_points as spoints\nimport euler_short as euler\nimport matplotlib.pyplot as plt\n\ndata_A = data_imu[0:3,:]\ndata_omega = data_imu[3:6,:]\n\n#saving data elsewhere\n#np.savetxt(\"data_imu.csv\", data_imu, delimiter=\",\")\n#np.savetxt(\"t_imu.csv\", t_imu, delimiter=\",\")\n\n#attempting to use other data\n#data_imu = np.genfromtxt('my_file.csv', delimiter=',')\n#t_imu = \n#%% UKF\n\nQ = .001*np.eye(3) #.002 .05 . even better: .001 and .01\nR = .01*np.eye(3) \n\ng_quat = np.array([0, 0, 0, 1])\n\n#old weights\n#alpha_mu = 1/2.0 #1/2.0 #weighting for mean\n#alpha_cov = 1/2.0\n\nalpha_mu = 0 #1/2.0 #weighting for mean\nalpha_cov = 2.0\n\nn_steps = t_imu.shape[1]\n\nx_hat = np.zeros((4,n_steps))\nx_hat[:,0] = np.array([1,0,0,0]) \nP_hat = np.zeros((3,3,n_steps))\nP_hat[:,:,0] = .0001*np.eye(3)\n\nx_ap = np.zeros((4,n_steps))\nP_ap = np.zeros((3,3,n_steps))\nz_ap = np.zeros((3,n_steps))\nP_zz = np.zeros((3,3,n_steps))\n\nnu = np.zeros((3,n_steps))\nP_nu = np.zeros((3,3,n_steps))\n\nP_xz = np.zeros((3,3,n_steps))\n\nK = np.zeros((3,3,n_steps)) \n\nX = np.zeros((4,7)) # ~ sigma points for x_hat[i]\nY = np.zeros((4,7)) # ~ sigma points for x_ap[i+1]\nZ = np.zeros((3,7)) # ~ sigma points for z_ap[i+1]\n\nupdate = True\n\nfor i_ukf in range(data_imu.shape[1]-1): #for every sample in data_imu \n \n dt = t_imu[0,i_ukf+1] - t_imu[0,i_ukf]\n \n #prediction step\n \n #describe x_hat in sigma points \n X = spoints.gen_sigma_points(x_hat[:,i_ukf],P_hat[:,:,i_ukf],Q)\n \n #propogate sigma points through process model A\n for i_spoints in range(X.shape[1]):\n Y[:,i_spoints] = quat.multiply(X[:,i_spoints], rbm_conv.w2q_exp(data_omega[:,i_ukf]*dt))\n \n #determine a priori estimates by analyzing sigma points\n #breaks when guess is 0\n #w_prime is 3x7 matrix containing deviations of each sigma point from mean in vector space\n x_ap[:,i_ukf+1],P_ap[:,:,i_ukf+1],W_prime = spoints.mean_covar_quat(Y,alpha_mu, alpha_cov,x_hat[:,i_ukf])\n \n x_hat[:,i_ukf+1] = x_ap[:,i_ukf+1]#remove this after update step works \n P_hat[:,:,i_ukf+1] = P_ap[:,:,i_ukf+1]\n if update:\n #update step\n \n #propogate sigma points of x_ap through measurement model H\n for i_spoints in range(X.shape[1]):\n Z[:,i_spoints] = quat.multiply( np.hstack((Y[0,i_spoints],-Y[1:4,i_spoints])), quat.multiply(g_quat,Y[:,i_spoints]))[1:4]\n \n #find z_ap and P_zz from Z.\n z_ap[:,i_ukf+1], P_zz[:,:,i_ukf+1] = spoints.mean_covar_vect(Z,alpha_mu, alpha_cov) \n nu[:,i_ukf+1] = data_A[:,i_ukf+1] - z_ap[:,i_ukf+1]\n P_nu[:,:,i_ukf+1] = P_zz[:,:,i_ukf+1] + R \n P_xz[:,:,i_ukf+1] = spoints.P_XZ(W_prime, Z, alpha_mu, alpha_cov) \n K[:,:,i_ukf+1] = P_xz[:,:,i_ukf+1].dot(np.linalg.inv(P_nu[:,:,i_ukf+1]))\n \n x_hat[:,i_ukf+1] = quat.multiply(x_ap[:,i_ukf+1],rbm_conv.w2q_exp(K[:,:,i_ukf+1].dot(nu[:,i_ukf+1])))\n P_hat[:,:,i_ukf+1] = P_ap[:,:,i_ukf+1] - K[:,:,i_ukf+1].dot(P_nu[:,:,i_ukf+1].dot(K[:,:,i_ukf+1].T))\n \n print('timestep = ' + str(i_ukf+1) + '/' +str(data_imu.shape[1]-1))","sub_path":"baselineUKFtrialPython/new_ukf_4d.py","file_name":"new_ukf_4d.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"464571847","text":"import numpy as np\nimport warnings\nimport astropy.units as u\n\nfrom MulensModel.mulensdata import MulensData\nfrom MulensModel.trajectory import Trajectory\nfrom MulensModel.modelparameters import ModelParameters\nfrom MulensModel.utils import Utils\n\n\nclass FitData:\n \"\"\"\n Performs a least squares linear fit for given dataset and model to\n determine the source flux(es) and (optionally) blend flux. After creating\n the object, you must run :py:func:`~update()` to perform the linear fit for\n the fluxes and calculate the chi2. To perform the linear fit without\n calculating chi2, you can run :py:func:`fit_fluxes()`. If you change\n anything in the object, e.g. the model parameters, you *must* re-run\n :py:func:`~update()` or :py:func:`~fit_fluxes()`.\n\n Arguments :\n model: :py:class:`~MulensModel.model.Model` object\n The model to fit to the data.\n\n dataset: :py:class:`~MulensModel.mulensdata.MulensData` object\n A single photometric dataset to be fitted.\n\n fix_blend_flux: *False* or *float*, optional\n Default is *False*, i.e. allow the blend flux to be a free\n parameter. If set to a float, it will fix the blend value to that\n value.\n\n fix_source_flux: *False*, *float*, or *list*, optional\n Default is *False*, i.e. allow the source flux to be a free\n parameter. If set to a float, it will fix the source value to that\n value. For binary source models, a list should be used to set the\n fluxes of the individual sources or fix one and not the other, e.g.\n [2.3, False] would fix source_flux_0 to 2.3 but allow a free fit to\n source_flux_1.\n\n fix_source_flux_ratio: *False* or *float*, optional\n For binary source models, source_flux_ratio is the flux ratio\n between two components, i.e.,\n source_flux_ratio = source_flux_1 / source_flux_0\n Default is *False*, i.e. allow the source flux to be a free\n parameter. If set to a float, it will fix the source value to that\n value.\n\n \"\"\"\n\n def __init__(self, model=None, dataset=None, fix_blend_flux=False,\n fix_source_flux=False, fix_source_flux_ratio=False):\n self.model = model\n self.dataset = dataset\n\n # Setup limb-darkening\n self._gamma = 0.\n if self.model.parameters.is_finite_source():\n if self.dataset.bandpass is not None:\n try:\n self._gamma = self.model.get_limb_coeff_gamma(\n self.dataset.bandpass)\n except KeyError:\n msg = (\n 'Dataset bandpass is {0} but model does not have a ' +\n 'limb-darkening coefficient for {0}. Assuming zero.')\n warnings.warn(msg.format(self.dataset.bandpass))\n\n # fit parameters\n self.fix_blend_flux = fix_blend_flux\n self.fix_source_flux_ratio = fix_source_flux_ratio\n if isinstance(fix_source_flux, list) or (fix_source_flux is False):\n self.fix_source_flux = fix_source_flux\n else:\n if self._model.n_sources == 1:\n self.fix_source_flux = [fix_source_flux]\n else:\n msg = (\"you have {0}\".format(self._model.n_sources) +\n \" sources. Thus, fix_source_flux should be a list of\" +\n \"length {0}\".format(self._model.n_sources) +\n \"(or False).\")\n raise ValueError(msg)\n\n # parameters fluxes of various sources\n self._source_fluxes = None\n self._blend_flux = None\n self._source_flux_ratio = None\n\n # chi2 parameters\n self._chi2_per_point = None\n self._chi2 = None\n\n def _check_for_flux_ratio_errors(self):\n \"\"\"\n If combination of settings and models are invalid, raise exceptions.\n \"\"\"\n\n if self.fix_source_flux_ratio is not False:\n if self._model.n_sources != 2:\n msg = ('fix_source_flux_ratio only valid for models with 2' +\n 'sources. n_sources = {0}'.format(\n self._model.n_sources))\n raise ValueError(msg)\n elif self.fix_source_flux is not False:\n msg = ('fix_source_flux_ratio + fixed_source_flux not ' +\n 'implemented. Fix the fluxes for each source ' +\n 'individually instead.')\n raise NotImplementedError(msg)\n\n def update(self, bad=False):\n \"\"\"\n Calculate the best-fit source and blend fluxes as well as the chi2.\n\n Keywords :\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that there are values\n even for bad datapoints.\n\n \"\"\"\n self.fit_fluxes()\n\n # Calculate chi2\n model_flux = self.get_model_fluxes(bad=bad)\n diff = self._dataset.flux - model_flux\n self._chi2_per_point = (diff / self._dataset.err_flux)**2\n\n def _calculate_magnifications(self, bad=True):\n \"\"\"\n Calculate the model magnifications for the epochs of the dataset.\n \"\"\"\n if bad:\n select = np.ones(self._dataset.n_epochs, dtype=bool)\n else:\n select = self._dataset.good\n\n if self.dataset.ephemerides_file is None:\n satellite_skycoord = None\n else:\n satellite_skycoord = self.dataset.satellite_skycoord\n\n magnification_kwargs = {\n 'gamma': self.gamma, 'satellite_skycoord': satellite_skycoord}\n\n if self._model.n_sources == 1:\n mag_matrix = self._model.get_magnification(\n time=self._dataset.time[select],\n **magnification_kwargs)\n elif self._model.n_sources == 2:\n mag_matrix = self._model.get_magnification(\n time=self._dataset.time[select], separate=True,\n **magnification_kwargs)\n else:\n msg = (\"{0} \".format(self._model.n_sources) +\n \"sources used. Function model.get_magnification can \" +\n \"only handle <=2 sources\")\n raise NotImplementedError(msg)\n\n if bad:\n self._data_magnification = mag_matrix\n else:\n if self._model.n_sources == 1:\n self._data_magnification = np.zeros(\n self._dataset.n_epochs)\n self._data_magnification[self._dataset.good] = mag_matrix\n else:\n self._data_magnification = [np.zeros(self._dataset.n_epochs)]\n self._data_magnification[0][self._dataset.good] = mag_matrix[0]\n for source in range(1, self.model.n_sources):\n self._data_magnification.append(\n np.zeros(self._dataset.n_epochs))\n self._data_magnification[\n source][self._dataset.good] = mag_matrix[source]\n\n def _get_xy_qflux(self):\n \"\"\" Apply a fixed flux ratio. \"\"\"\n y = self._dataset.flux[self._dataset.good]\n x = np.array(\n self._data_magnification[0][self._dataset.good] +\n self.fix_source_flux_ratio *\n self._data_magnification[1][self._dataset.good])\n self.n_fluxes = 1\n\n return (x, y)\n\n def _get_xy_individual_fluxes(self):\n \"\"\" Account for source fluxes individually \"\"\"\n y = self._dataset.flux[self._dataset.good]\n\n if self.fix_source_flux is False:\n x = np.array(self._data_magnification)\n if self.model.n_sources == 1:\n x = x[self._dataset.good]\n else:\n x = x[:, self._dataset.good]\n\n self.n_fluxes = self._model.n_sources\n else:\n x = None\n if self._model.n_sources == 1:\n y -= (self.fix_source_flux[0] *\n self._data_magnification[self._dataset.good])\n else:\n for i in range(self._model.n_sources):\n if self.fix_source_flux[i] is False:\n self.n_fluxes += 1\n if x is None:\n x = self._data_magnification[i][self._dataset.good]\n else:\n x = np.vstack(\n (x, self._data_magnification[i][\n self._dataset.good]))\n\n else:\n y -= (self.fix_source_flux[i] *\n self._data_magnification[i][self._dataset.good])\n\n return (x, y)\n\n def _setup_linalg_arrays(self):\n \"\"\"\n Create xT and y arrays\n \"\"\"\n (x, y) = self._create_arrays()\n xT = self._invert_x_array(x)\n (xT, y) = self._weight_linalg_arrays(xT, y)\n return (xT, y)\n\n def _create_arrays(self):\n # Initializations\n self.n_fluxes = 0\n n_epochs = np.sum(self._dataset.good)\n self._calculate_magnifications(bad=False)\n\n # Account for source fluxes\n if self.fix_source_flux_ratio is not False:\n self._check_for_flux_ratio_errors()\n (x, y) = self._get_xy_qflux()\n else:\n (x, y) = self._get_xy_individual_fluxes()\n\n # Account for free or fixed blending\n # Should do a runtime test to compare with lines 83-94\n if self.fix_blend_flux is False:\n self.n_fluxes += 1\n if x is None:\n x = np.ones((1, n_epochs))\n else:\n x = np.vstack((x, np.ones(n_epochs)))\n\n elif self.fix_blend_flux == 0.:\n pass\n else:\n y -= self.fix_blend_flux\n\n return (x, y)\n\n def _invert_x_array(self, x):\n \"\"\" Take the transpose of x \"\"\"\n n_epochs = np.sum(self._dataset.good)\n xT = np.copy(x).T\n xT.shape = (n_epochs, self.n_fluxes)\n\n return xT\n\n def _weight_linalg_arrays(self, xT, y):\n \"\"\"weight by data uncertainties\"\"\"\n # Take into account uncertainties\n sigma_inverse = 1. / self._dataset.err_flux[self._dataset.good]\n y *= sigma_inverse\n xT *= np.array([sigma_inverse] * self.n_fluxes).T\n\n return (xT, y)\n\n def fit_fluxes(self):\n \"\"\"\n Execute the linear least squares fit to determine the fitted fluxes.\n Sets the values of :py:obj:`~source_fluxes`, :py:obj:`~blend_flux`,\n and (if applicable) :py:obj:`~source_flux`.\n\n Does *not* calculate chi2. To fit for the fluxes and calculate chi2,\n run :py:func:`~update()`.\n \"\"\"\n\n (xT, y) = self._setup_linalg_arrays()\n\n # Solve for the coefficients in y = fs * x + fb (point source)\n # These values are: F_s1, F_s2,..., F_b.\n try:\n results = np.linalg.lstsq(xT, y, rcond=-1)[0]\n except ValueError as e:\n message = (\n \"{0}\\nIf either of these numbers ({1}, {2}) is greater than \"\n \"zero, there is a NaN somewhere, probably in the data. The \"\n \"cause of this error may be the epochs with extreme \"\n \"brightness (e.g., 99.999 mag), which is sometimes used to \"\n \"mark bad data. Other possible reason is mistakenly using \"\n \"phot_fmt='flux' instead of 'mag'\")\n args = (e, np.sum(np.isnan(xT)), np.sum(np.isnan(y)))\n raise ValueError(message.format(*args))\n\n # Record the results\n if self.fix_source_flux_ratio is False:\n if self.fix_source_flux is False:\n self._source_fluxes = results[0:self._model.n_sources]\n else:\n self._source_fluxes = []\n index = 0\n for i in range(self._model.n_sources):\n if self.fix_source_flux[i] is False:\n self._source_fluxes.append(results[index])\n index += 1\n else:\n self._source_fluxes.append(self.fix_source_flux[i])\n\n else:\n self._source_fluxes = [results[0],\n results[0] * self.fix_source_flux_ratio]\n\n if self.fix_blend_flux is False:\n self._blend_flux = results[-1]\n else:\n self._blend_flux = self.fix_blend_flux\n\n def get_data_magnification(self, bad=False):\n \"\"\"\n Calculates the model magnification for each data point.\n\n Arguments :\n bad: *boolean*\n If *True*, calculates the magnification for all points.\n If *False*, only calculates the magnification for good data\n points. Values for bad data points are set to 0. Default is\n *False*.\n\n Returns :\n data_magnification: *np.ndarray*\n The model magnification evaluated for each datapoint. If there\n is more than one source, the magnification of each source is\n reported separately.\n \"\"\"\n\n self._calculate_magnifications(bad=bad)\n return self._data_magnification\n\n def get_model_fluxes(self, bad=False):\n \"\"\"\n Calculate model in flux space.\n\n Keywords :\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that the values\n for bad datapoints are calculated (otherwise, they are set to\n the magnitude of the blend).\n\n Returns :\n model_flux: *np.ndarray*\n The model flux evaluated for each datapoint.\n \"\"\"\n if self.source_fluxes is None:\n raise AttributeError(\n 'you need to run FitData.fit_fluxes() first to execute the' +\n 'linear fit.')\n\n if bad:\n self._calculate_magnifications(bad=True)\n\n model_flux = np.ones(self._dataset.n_epochs) * self.blend_flux\n if self._model.n_sources == 1:\n model_flux += self.source_flux * self._data_magnification\n else:\n for i in range(self._model.n_sources):\n model_flux += self.source_fluxes[i] \\\n * self._data_magnification[i]\n\n return model_flux\n\n def get_model_magnitudes(self, **kwargs):\n \"\"\"\n Calculate model in magnitude space\n\n Arguments :\n ``**kwargs``:\n see :py:func:`get_model_fluxes()`\n\n Returns :\n model_mag: *np.ndarray*\n The model magnitude evaluated for each datapoint.\n \"\"\"\n model_flux = self.get_model_fluxes(**kwargs)\n model_mag = Utils.get_mag_from_flux(model_flux)\n\n return model_mag\n\n def scale_fluxes(self, source_flux, blend_flux):\n \"\"\"\n Rescale the data fluxes to an arbitrary flux scale:\n flux = source_flux_0 * (data.flux - blend_flux) / source_flux\n flux += blend_flux_0\n err_flux = source_flux_0 * data.err_flux / source_flux\n\n Arguments :\n source_flux: *float*, *list*, *np.array*\n Flux of the source in the desired system. If n_sources > 1 and\n source_flux has more than one element, the elements are\n summed to produce the overall scaling flux.\n\n blend_flux: *float*\n Flux of the blend in the desired system\n\n Returns :\n flux: *np.ndarray*\n Fluxes from the data rescaled to the desired system.\n\n err_flux: *np.ndarray*\n Uncertainties of fluxes from the data rescaled to the desired\n system.\n \"\"\"\n if self.model.n_sources == 1:\n data_source_flux = self.source_flux\n else:\n data_source_flux = np.sum(self.source_fluxes)\n if len(source_flux) > 1:\n source_flux = np.sum(source_flux)\n\n flux = source_flux * (self._dataset.flux - self.blend_flux)\n flux /= data_source_flux\n flux += blend_flux\n err_flux = source_flux * self._dataset.err_flux / data_source_flux\n\n return (flux, err_flux)\n\n def get_residuals(\n self, phot_fmt=None, source_flux=None, blend_flux=None, bad=False,\n type=None):\n \"\"\"\n Calculate the residuals for each datapoint relative to the model.\n\n Keywords :\n phot_fmt: *str*, optional\n specify whether the residuals should be returned in\n magnitudes ('mag') or in flux ('flux'). Default is\n 'mag'. If 'scaled', will return the residuals in magnitudes\n scaled to source_flux, blend_flux.\n\n source_flux, blend_flux: *float*\n reference source and blend fluxes for scaling the residuals\n\n bad: *bool*\n Default is *False*. If *True* recalculates the data\n magnification for each point to ensure that there are values\n even for bad datapoints.\n\n type:\n DEPRECATED, see \"phot_fmt\" above.\n\n Returns :\n residuals: *np.ndarray*\n the residuals for the corresponding dataset.\n\n errorbars: *np.ndarray*\n the scaled errorbars for each point. For plotting\n errorbars for the residuals.\n \"\"\"\n if type is not None:\n if type == 'mag':\n warnings.warn(\n '\"mag\" returns residuals in the original data flux' +\n 'system. To scale the residuals, use \"scaled\".')\n warnings.warn(\n 'type keyword will be deprecated. Use \"phot_fmt\" instead.',\n FutureWarning)\n phot_fmt = type\n\n if bad:\n self._calculate_magnifications(bad=True)\n\n if phot_fmt == 'mag':\n residuals = self._dataset.mag - self.get_model_magnitudes()\n errorbars = self._dataset.err_mag\n elif phot_fmt == 'flux':\n residuals = self._dataset.flux - self.get_model_fluxes()\n errorbars = self._dataset.err_flux\n elif phot_fmt == 'scaled':\n if source_flux is None or blend_flux is None:\n raise ValueError(\n 'If phot_fmt=scaled, source_flux and blend_flux must ' +\n 'also be specified.')\n\n magnification = self._data_magnification\n if self._model.n_sources == 1:\n model_flux = source_flux * magnification\n else:\n model_flux = source_flux[0] * magnification[0]\n model_flux += source_flux[1] * magnification[1]\n model_flux += blend_flux\n model_mag = Utils.get_mag_from_flux(model_flux)\n (flux, err_flux) = self.scale_fluxes(source_flux, blend_flux)\n (mag, errorbars) = Utils.get_mag_and_err_from_flux(flux, err_flux)\n residuals = mag - model_mag\n else:\n raise ValueError(\n 'phot_fmt must be one of \"mag\", \"flux\", or \"scaled\". Your ' +\n 'value: {0}'.format(phot_fmt))\n\n return (residuals, errorbars)\n\n def _check_for_gradient_implementation(self, parameters):\n \"\"\"\n Check that the gradient methods are implemented for the requested\n values.\n \"\"\"\n # Implemented for the requested parameters?\n if not isinstance(parameters, list):\n parameters = [parameters]\n implemented = {'t_0', 't_E', 'u_0', 't_eff', 'pi_E_N', 'pi_E_E'}\n if len(set(parameters) - implemented) > 0:\n raise NotImplementedError((\n \"chi^2 gradient is implemented only for {:}\\nCannot work \" +\n \"with {:}\").format(implemented, parameters))\n\n # Implemented for the number of sources in the model?\n if self.model.n_lenses != 1:\n raise NotImplementedError(\n 'chi2_gradient() only implemented for single lens models')\n\n # Implemented for finite source effects?\n if self.model.parameters.is_finite_source():\n raise NotImplementedError('Event.chi2_gradient() is not working '\n 'for finite source models yet')\n\n def get_chi2_gradient(self, parameters):\n \"\"\"\n Fits fluxes and calculates chi^2 gradient (also called Jacobian), i.e.,\n :math:`d chi^2/d parameter`.\n\n Parameters :\n parameters: *str* or *list*, required\n Parameters with respect to which gradient is calculated.\n Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,\n ``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for\n which you request gradient must be defined in py:attr:`~model`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient\n \"\"\"\n self.fit_fluxes()\n self.calculate_chi2_gradient(parameters)\n return self.chi2_gradient\n\n def calculate_chi2_gradient(self, parameters):\n \"\"\"\n Calculates chi^2 gradient (also called Jacobian), i.e.,\n :math:`d chi^2/d parameter` WITHOUT refitting for the fluxes. Saves\n computations if, e.g., you want to retrieve both py:attr:`~chi2` and\n py:attr:`~chi2_gradient`.\n\n Parameters :\n parameters: *str* or *list*, required\n Parameters with respect to which gradient is calculated.\n Currently accepted parameters are: ``t_0``, ``u_0``, ``t_eff``,\n ``t_E``, ``pi_E_N``, and ``pi_E_E``. The parameters for\n which you request gradient must be defined in py:attr:`~model`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient\n \"\"\"\n self._check_for_gradient_implementation(parameters)\n\n # Calculate factor\n flux_factor = self.get_model_fluxes() - self.dataset.flux\n flux_factor *= 2. * self.source_flux / self.dataset.err_flux**2\n\n gradient = self._get_d_A_d_params_for_point_lens_model(parameters)\n\n for (key, value) in gradient.items():\n gradient[key] = np.sum((flux_factor * value)[self.dataset.good])\n\n if len(parameters) == 1:\n out = gradient[parameters[0]]\n else:\n out = np.array([gradient[p] for p in parameters])\n\n self._chi2_gradient = out\n\n return self._chi2_gradient\n\n def _get_d_A_d_params_for_point_lens_model(self, parameters):\n \"\"\"\n Calculate d A / d parameters for a point lens model.\n\n Returns a *dict*.\n \"\"\"\n gradient = self._get_d_u_d_params(parameters)\n\n d_A_d_u = self._get_d_A_d_u_for_point_lens_model()\n\n for (key, value) in gradient.items():\n gradient[key] *= d_A_d_u\n\n return gradient\n\n def _get_d_A_d_u_for_point_lens_model(self):\n \"\"\"\n Calculate dA/du for PSPL\n \"\"\"\n trajectory = self.model.get_trajectory(self.dataset.time)\n u_2 = trajectory.x**2 + trajectory.y**2\n d_A_d_u = -8. / (u_2 * (u_2 + 4) * np.sqrt(u_2 + 4))\n return d_A_d_u\n\n def _get_d_u_d_params(self, parameters):\n \"\"\"\n Calculate d u / d parameters\n\n Returns a *dict*.\n \"\"\"\n # Setup\n gradient = {param: 0 for param in parameters}\n as_dict = self.model.parameters.as_dict()\n\n # Get source location\n trajectory = self.model.get_trajectory(self.dataset.time)\n u_ = np.sqrt(trajectory.x**2 + trajectory.y**2)\n\n # Calculate derivatives\n d_u_d_x = trajectory.x / u_\n d_u_d_y = trajectory.y / u_\n dt = self.dataset.time - as_dict['t_0']\n\n # Exactly 2 out of (u_0, t_E, t_eff) must be defined and\n # gradient depends on which ones are defined.\n t_E = self.model.parameters.t_E\n t_eff = self.model.parameters.t_eff\n if 't_eff' not in as_dict:\n gradient['t_0'] = -d_u_d_x / t_E\n gradient['u_0'] = d_u_d_y\n gradient['t_E'] = d_u_d_x * -dt / t_E**2\n elif 't_E' not in as_dict:\n gradient['t_0'] = -d_u_d_x * as_dict['u_0'] / t_eff\n gradient['u_0'] = (d_u_d_y + d_u_d_x * dt / t_eff)\n gradient['t_eff'] = (d_u_d_x * -dt * as_dict['u_0'] / t_eff**2)\n elif 'u_0' not in as_dict:\n gradient['t_0'] = -d_u_d_x / t_E\n gradient['t_E'] = (d_u_d_x * dt - d_u_d_y * t_eff) / t_E**2\n gradient['t_eff'] = d_u_d_y / t_E\n else:\n raise KeyError(\n 'Something is wrong with ModelParameters in ' +\n 'FitData.calculate_chi2_gradient():\\n', as_dict)\n\n # Below we deal with parallax only.\n if 'pi_E_N' in parameters or 'pi_E_E' in parameters:\n warnings.warn(\n \"\\n\\nTests indicate that chi2 gradient for models with \"\n \"parallax has BUGS!!!\\n It's better not to use it or contact \"\n \"code authors.\\n\")\n # JCY Not happy about this as it requires importing from other\n # modules. It is inelegant, which in my experience often means it\n # needs to be refactored.\n kwargs = dict()\n if self.dataset.ephemerides_file is not None:\n kwargs['satellite_skycoord'] = self.dataset.satellite_skycoord\n\n parameters_no_piE = {**self.model.parameters.as_dict()}\n parameters_no_piE.pop('pi_E_N')\n parameters_no_piE.pop('pi_E_E')\n\n trajectory_no_piE = Trajectory(\n self.dataset.time, ModelParameters(parameters_no_piE),\n **kwargs)\n dx = trajectory.x - trajectory_no_piE.x\n dy = trajectory.y - trajectory_no_piE.y\n delta_E = dx * as_dict['pi_E_E'] + dy * as_dict['pi_E_N']\n delta_N = dx * as_dict['pi_E_N'] - dy * as_dict['pi_E_E']\n det = as_dict['pi_E_N']**2 + as_dict['pi_E_E']**2\n gradient['pi_E_N'] = (d_u_d_x * delta_N + d_u_d_y * delta_E) / det\n gradient['pi_E_E'] = (d_u_d_x * delta_E - d_u_d_y * delta_N) / det\n\n return gradient\n\n @property\n def chi2_gradient(self):\n \"\"\"\n Return previously calculated chi^2 gradient (also called Jacobian),\n i.e., :math:`d chi^2/d parameter`. See :py:func:`~get_chi2_gradient()`\n and :py:func:`~calculate_chi2_gradient()`.\n\n Returns :\n gradient: *float* or *np.ndarray*\n chi^2 gradient. Will return None if the chi2 gradient was not\n previously calculated using one of the functions mentioned\n above.\n\n \"\"\"\n try:\n return self._chi2_gradient\n except AttributeError:\n return None\n\n @property\n def chi2(self):\n \"\"\"\n Returns :\n chi2: *float*\n the total chi2 for the fitted dataset. Good points only. See\n :py:obj:`~MulensModel.mulensdata.MulensData.good`.\n\n If None, you need to run :py:func:`~update()` to execute the\n linear fit and calculate the chi2.\n \"\"\"\n if self.chi2_per_point is None:\n return None\n else:\n return np.sum(self.chi2_per_point[self._dataset.good])\n\n @property\n def chi2_per_point(self):\n \"\"\"\n Returns :\n chi2_per_point: *np.ndarray*\n Chi^2 contribution from each data point,\n e.g. ``chi2_per_point[k]`` returns the chi2 contribution\n from the *k*-th point of :py:obj:`dataset`. Includes bad\n datapoints.\n\n If None, you need to run :py:func:`~update()` to execute the\n linear fit and calculate the chi2.\n \"\"\"\n return self._chi2_per_point\n\n @property\n def source_flux(self):\n \"\"\"\n Returns :\n source_flux: *float*\n the fitted source flux. Only defined for models with a single\n source. See also :py:obj:`~source_fluxes`\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n \"\"\"\n if self._model.n_sources == 1:\n return self.source_fluxes[0]\n else:\n msg = (\"source_flux is defined only for models\" +\n \" with ONE source, you have\" +\n \" {0}\".format(self._model.n_sources) +\n \" sources. Try FitData.source_fluxes instead\")\n\n raise NameError(msg)\n\n @property\n def source_fluxes(self):\n \"\"\"\n Returns :\n source_fluxes: *np.array*\n the fitted source flux(es).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n \"\"\"\n return self._source_fluxes\n\n @property\n def blend_flux(self):\n \"\"\"\n Returns :\n blend_flux: *float*\n the fitted blend flux or the value set by\n fix_blend_flux (see :ref:`keywords`).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n \"\"\"\n return self._blend_flux\n\n @property\n def source_flux_ratio(self):\n \"\"\"\n source_flux_ratio = source_flux_1 / source_flux_0\n\n Returns :\n source_flux_ratio: *float*\n the ratio of the fitted source fluxes or the value set by\n fix_source_flux_ratio (see :ref:`keywords`).\n\n If None, you need to run :py:func:`~fit_fluxes()` or\n :py:func:`~update()` to execute the linear fit.\n \"\"\"\n if self._model.n_sources != 2:\n msg = (\"source_flux is defined only for models\" +\n \" with TWO sources, you have\" +\n \" {0}\".format(self._model.n_sources) +\n \" sources.\")\n raise NameError(msg)\n\n if self.fix_source_flux_ratio:\n return self.fix_source_flux_ratio\n else:\n return self.source_fluxes[1] / self.source_fluxes[0]\n\n @property\n def dataset(self):\n \"\"\"\n :py:class:`~MulensModel.mulensdata.MulensData` object\n\n A single photometric dataset to be fitted.\n \"\"\"\n return self._dataset\n\n @dataset.setter\n def dataset(self, new_value):\n if not isinstance(new_value, MulensData):\n raise TypeError(\"Dataset has to of MulensData type, not: \" +\n str(type(new_value)))\n self._dataset = new_value\n\n @property\n def model(self):\n \"\"\"\n :py:class:`~MulensModel.model.Model` object\n\n The model to fit to the data.\n \"\"\"\n return self._model\n\n @model.setter\n def model(self, new_value):\n self._model = new_value\n\n @property\n def gamma(self):\n \"\"\"\n *float*\n\n Limb-darkening coefficient for this fit. Set by\n :py:attr:`~dataset.bandpass` and\n :py:func:`~model.get_limb_coeff_gamma()`.\n\n *** CHECK LINKS IN SPHINX. PROBABLY WON'T WORK. ***\n \"\"\"\n return self._gamma\n","sub_path":"source/MulensModel/fitdata.py","file_name":"fitdata.py","file_ext":"py","file_size_in_byte":31392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"251138516","text":"class Solution:\n # @param path, a string\n # @return a string\n def simplifyPath(self, path):\n if not path: return path\n buf = [] # save word met before /\n path = path + '/' # sentinel\n result = []\n for i, x in enumerate(path):\n if x == '/':\n if buf: # not empty\n if buf == ['.','.']: \n if result:\n result.pop() # last layer # if /.. what happens?\n elif buf != ['.']:\n result.append(''.join(buf))\n buf = []\n else: # other chars\n buf.append(x)\n return '/'+ '/'.join(result) # finally need append '/' at front","sub_path":"71_simplify_path/prac3.py","file_name":"prac3.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"189782986","text":"import random\r\n\r\nsample_text = ['atharva','sparky','badumtss','idk','eat','pant','dank','memes','ohio','canada','globe']\r\n\r\nword = random.choice(sample_text)\r\n\r\n# Game function \r\n\r\ndef game():\r\n\r\n\tchances = len(word)\r\n\trevealed = []\r\n\tprint('\\n\\n'+'_ '*chances)\r\n\r\n\twhile(chances != 0):\r\n\r\n\t\tletter = input('\\n\\nType a letter\\n\\n')\r\n\r\n\t\tif letter in word:\r\n\r\n\t\t\trevealed.append(letter)\r\n\t\t\tcount = 0\r\n\t\t\tfor i in word:\r\n\t\t\t\tif i == letter:\r\n\t\t\t\t\tcount+=1\r\n\t\t\tchances-=count\r\n\t\t\tfor i in range(count-1):\r\n\t\t\t\trevealed.append(letter)\r\n\t\t\tprint('\\t\\t-----\\n\\t\\tO |\\n\\t /|\\\\ |\\n\\t\\t| |\\n\\t / \\\\ |\\n\\t\\t |\\n\\t\\t --^--')\r\n\t\t\tprint('\\n\\nNice work! You guessed it correctly!\\n\\n')\r\n\t\t\tprint('Word guessed so far = \\n\\n')\r\n\t\t\tguess = [i if i in revealed else '_' for i in word]\r\n\t\t\tword_guessed = ' '.join(guess)\r\n\t\t\tprint(word_guessed)\r\n\t\t\t\r\n\r\n\t\telse:\r\n\r\n\t\t\tchances-=1\r\n\t\t\tprint('\\n\\nWrong choice!\\n\\n')\r\n\t\t\tprint('Word guessed so far = \\n\\n')\r\n\t\t\tguess = [i if i in revealed else '_' for i in word]\r\n\t\t\tword_guessed = ' '.join(guess)\r\n\t\t\tprint(word_guessed)\r\n\t\t\tprint('\\t\\t-----\\n\\t\\tO |\\n\\t /|\\\\ |\\n\\t\\t| |\\n\\t / \\\\ |\\n\\t\\t |\\n\\t\\t --^--')\r\n\r\n\trevealed.sort()\r\n\tcheck_word = [i for i in word]\r\n\tcheck_word.sort()\r\n\r\n\tif revealed == check_word:\r\n\t\twin()\r\n\telse:\r\n\t\tlost()\r\n\r\n# Result functions\r\n\r\ndef win():\r\n\r\n\tprint('\\n\\nYou have won the game! The word is \"{}\"'.format(word))\r\n\tprint('\\n\\n\\t O\\n\\t\\\\|/\\n\\t |\\n\\t/ \\\\')\r\n\r\ndef lost():\r\n\r\n\tprint('\\n\\nYou have lost the game! The word is \"{}\"'.format(word))\r\n\tprint('\\n\\n\\t \\n\\t/|\\\\\\n\\t |\\n\\t/ \\\\')\r\n\r\n# Driver code\r\nprint('\\n\\nWelcome to the Hangman game! The rules are simple, guess the word and the man doesnt die, if you dont guess, he dies. Let\\'s begin!\\n\\n')\r\nprint('\\t\\t-----\\n\\t\\tO |\\n\\t /|\\\\ |\\n\\t\\t| |\\n\\t / \\\\ |\\n\\t\\t |\\n\\t\\t --^--')\r\ngame()\r\n","sub_path":"Games/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"600904499","text":"from profil3r.app.search import search_get\nimport time\n\nclass Replit:\n\n def __init__(self, config, permutations_list):\n # 1000 ms\n self.delay = config['plateform']['replit']['rate_limit'] / 1000\n # https://replit.com/@{username}\n self.format = config['plateform']['replit']['format']\n self.permutations_list = permutations_list\n # Programming \n self.type = config['plateform']['replit']['type']\n\n # Generate all potential replit usernames\n def possible_usernames(self):\n possible_usernames = []\n\n for permutation in self.permutations_list:\n possible_usernames.append(self.format.format(\n permutation = permutation,\n ))\n return possible_usernames\n\n def search(self):\n replit_usernames = {\n \"type\": self.type,\n \"accounts\": []\n }\n possible_usernames_list = self.possible_usernames()\n\n for username in possible_usernames_list:\n r = search_get(username)\n if not r:\n continue\n \n # If the account exists\n if r.status_code == 200:\n replit_usernames[\"accounts\"].append({\"value\": username})\n time.sleep(self.delay)\n \n return replit_usernames","sub_path":"profil3r/app/modules/programming/replit.py","file_name":"replit.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"518103412","text":"\"\"\"MapSelectionTool to draw a polygon and get the coordinates of it.\n\nThis script was based on https://github.com/lcoandrade/OSMDownloader/blob/master/rectangleAreaTool.py\n\nNotes:\n begin : 2019-02-09\n git sha : $Format:%H$\n\n development : 2019, Ivan Ivanov @ ITC, University of Twente\n email : ivan.ivanov@suricactus.com\n copyright : (C) 2019 by Ivan Ivanov\n\nLicense:\n /***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n /***************************************************************************\n\n\"\"\"\n\n\nfrom qgis.gui import QgsMapTool, QgsRubberBand, QgsMapMouseEvent, QgsMapCanvas\nfrom qgis.core import QgsWkbTypes, QgsPointXY\nfrom qgis.PyQt.QtCore import pyqtSignal, Qt\nfrom qgis.PyQt.QtGui import QColor\nfrom qgis.PyQt.QtWidgets import QApplication\n\nclass MapSelectionTool(QgsMapTool):\n\n polygonCreated = pyqtSignal(QgsPointXY, QgsPointXY, Qt.KeyboardModifiers)\n\n def __init__(self, canvas: QgsMapCanvas) -> None:\n QgsMapTool.__init__(self, canvas)\n\n mFillColor = QColor(254, 178, 76, 63)\n\n self.canvas = canvas\n self.active = True\n\n self.rubberBand = QgsRubberBand(self.canvas, QgsWkbTypes.PolygonGeometry)\n self.rubberBand.setColor(mFillColor)\n self.rubberBand.setWidth(1)\n self.reset()\n\n def reset(self) -> None:\n self.startPoint = self.endPoint = None\n self.isEmittingPoint = False\n self.rubberBand.reset(QgsWkbTypes.PolygonGeometry)\n\n def canvasPressEvent(self, e: QgsMapMouseEvent) -> None:\n self.startPoint = self.toMapCoordinates(e.pos())\n self.endPoint = self.startPoint\n self.isEmittingPoint = True\n self.showRect(self.startPoint, self.endPoint)\n\n def canvasReleaseEvent(self, e: QgsMapMouseEvent) -> None:\n self.isEmittingPoint = False\n self.rubberBand.hide()\n self.polygonCreated.emit(self.startPoint, self.endPoint, QApplication.keyboardModifiers())\n\n def canvasMoveEvent(self, e: QgsMapMouseEvent) -> None:\n if not self.isEmittingPoint:\n return\n self.endPoint = self.toMapCoordinates(e.pos())\n self.showRect(self.startPoint, self.endPoint)\n\n def showRect(self, startPoint: QgsPointXY, endPoint: QgsPointXY) -> None:\n self.rubberBand.reset(QgsWkbTypes.PolygonGeometry)\n if startPoint.x() == endPoint.x() or startPoint.y() == endPoint.y():\n return\n point1 = QgsPointXY(startPoint.x(), startPoint.y())\n point2 = QgsPointXY(startPoint.x(), endPoint.y())\n point3 = QgsPointXY(endPoint.x(), endPoint.y())\n point4 = QgsPointXY(endPoint.x(), startPoint.y())\n\n self.rubberBand.addPoint(point1, False)\n self.rubberBand.addPoint(point2, False)\n self.rubberBand.addPoint(point3, False)\n self.rubberBand.addPoint(point4, True) # true to update canvas\n self.rubberBand.show()\n\n def deactivate(self) -> None:\n self.rubberBand.hide()\n QgsMapTool.deactivate(self)\n","sub_path":"MapSelectionTool.py","file_name":"MapSelectionTool.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"259140409","text":"import logging\nimport sys\n\nfrom unified_model.server.api_endpoint import app\n\nDEFAULT_PORT = 5000\nDEFAULT_HOST = '127.0.0.1'\n\n\ndef run_waitress(port: int = DEFAULT_PORT, host: str = DEFAULT_HOST):\n from waitress import serve\n serve(app, host=host, port=port)\n\n\ndef run_gevent(port: int = DEFAULT_PORT, host: str = DEFAULT_HOST):\n from gevent.pywsgi import WSGIServer\n http_server = WSGIServer((host, port), app)\n http_server.serve_forever()\n\n\ndef run_tornado(port: int = DEFAULT_PORT, host: str = DEFAULT_HOST):\n from tornado.wsgi import WSGIContainer\n from tornado.httpserver import HTTPServer\n from tornado.ioloop import IOLoop\n\n http_server = HTTPServer(WSGIContainer(app))\n http_server.listen(port)\n IOLoop.instance().start()\n\n\ndef run_flask(port: int = DEFAULT_PORT, host: str = DEFAULT_HOST):\n app.run(debug=False, threaded=True, port=port, host=host)\n\n\ndef run_gunicorn(port: int = DEFAULT_PORT, host: str = DEFAULT_HOST):\n import gunicorn.app.base\n from gunicorn.six import iteritems\n\n # http://docs.gunicorn.org/en/latest/custom.html\n class StandaloneApplication(gunicorn.app.base.BaseApplication):\n\n def __init__(self, app, options=None):\n self.options = options or {}\n self.application = app\n super(StandaloneApplication, self).__init__()\n\n def load_config(self):\n config = dict([(key, value) for key, value in iteritems(self.options)\n if key in self.cfg.settings and value is not None])\n for key, value in iteritems(config):\n self.cfg.set(key.lower(), value)\n\n def load(self):\n return self.application\n\n # Or Run with command line: gunicorn -w 4 -b 0.0.0.0:5000 api_server:app -k gevent\n # http://flask.pocoo.org/docs/0.12/deploying/wsgi-standalone/#gunicorn\n # probably best for production - maybe couple with nginx\n options = {\n 'bind': '%s:%s' % (host, str(port)),\n 'worker_class': 'gevent',\n 'workers': 5, # (multiprocessing.cpu_count() * 2) + 1\n }\n\n StandaloneApplication(app, options).run()\n\n\ndef run(port: int = DEFAULT_PORT, host: str = DEFAULT_HOST):\n if not port:\n port = DEFAULT_PORT\n\n if not host:\n host = DEFAULT_HOST\n\n run_flask(port=port, host=host) # change to gunicorn\n\n\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n run()\n","sub_path":"libraries/unified-model/unified_model/server/api_server.py","file_name":"api_server.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"577703970","text":"# -*- coding:utf-8 -*-\nfrom django.shortcuts import render\nfrom .models import Article, Category\n\n\n# Create your views here.\ndef get_category():\n category_lists = Category.objects.all()\n category_lists = [category_list.data for category_list in category_lists]\n\n return category_lists\n\n\ndef get_recent_lists():\n recent_lists = Article.objects.all().order_by('-create_at')[0:5]\n recent_lists = [recent_list.data for recent_list in recent_lists]\n\n return recent_lists\n\n\nCATEGORY = get_category()\nRECENT_LISTS = get_recent_lists()\n\n\ndef home(request):\n article_lists = Article.objects.all().order_by('-create_at')\n article_lists = [article_list.data for article_list in article_lists]\n\n data = {\n 'article_lists': article_lists,\n 'recent_lists': RECENT_LISTS,\n 'category_lists': CATEGORY,\n }\n return render(request, 'home.html', data)\n\n\ndef blog(request):\n article_lists = Article.objects.all().order_by('-create_at')\n article_lists = [article_list.data for article_list in article_lists]\n\n data = {\n 'article_lists': article_lists,\n }\n return render(request, 'blog.html', data)\n\n\ndef article(request, pk):\n article = Article.objects.get(pk=pk).data\n\n data = {\n 'article': article,\n 'recent_lists': RECENT_LISTS,\n 'category_lists': CATEGORY,\n }\n return render(request, 'article.html', data)\n\n\ndef category_article(request, category_name):\n article_lists = Article.objects.all().filter(category=category_name)\n article_lists = [article_list.data for article_list in article_lists]\n\n data = {\n 'article': article_lists,\n 'recent_lists': RECENT_LISTS,\n 'category_lists': CATEGORY,\n }\n return render(request, 'category.html', data)\n\n\ndef about(request):\n return render(request, 'about.html', {})\n\n\ndef contact(request):\n return render(request, 'contact.html', {})\n\n\ndef test(request):\n message = ['this a message']\n data = {\n 'messages': message,\n }\n return render(request, 'test.html', data)","sub_path":"dj_test/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"337212267","text":"# -*- coding: utf-8 -*-\n\"\"\"\nЗадача: необходимо реализовать игру \"загадки\".\nПроцесс:\n1. Программа задает пользователю вопрос\n2. Пользователь дает свой ответ\n3. Программа сравнивает полученный ответ с результатом, который она ожидает\n4. Программа говорит пользователю: правильный ли он дал ответ\nФункционал:\n1. Программа должна запоминать, сколько правильных ответов дал пользователь. И выводить данный показатель при каждом новом правильном ответе.\nВопросы из предметной области:\n• Сами вопросы для загадок могут быть любыми, но желательно взять вопросы из тематики первого занятия\n• Правильные ответы должными быть правильными\n• Нужно сделать от 5 до 10 вопросов\nПорядок сдачи и проверки:\n• Готовые домашние задания нужно заливать на github, ссылку на код нужно выложить в канал #homework2\n• Задание желательно сделать до пятницы, чтобы я успел его проверить до занятия. Но я проверю его в любом случае, когда бы вы его не прислали\n• Хорошие примеры кода я буду отмечать иконкой - палец вверх, смотрите на такие задания, пользуйтесь ими как вдохновением и привером\n• Смотрите задания, которые прислали ваши коллеги, если возникают трудности, или если вам хочется посмотреть альтернативные решения\n\nДополнительные материалы (для начинающих):\n• Повторяем синтаксис python: https://www.codecademy.com/pt/courses/introduction-to-python-6WeG3/0/1?curriculum_id=4f89dab3d788890003000096\n• Русский туториал по git: https://githowto.com/ru\n• Визуальный инструмент для работы с git: https://desktop.github.com/\n• Как запускать python-файлы: https://tceh-python.slack.com/files/sobolevn/F290F1CQ4/how-to-open-python-files.pdf\n\nДополнительные материалы по первой лекции (для углубленного изучения):\n• Как хранятся числа в памяти: https://en.wikipedia.org/wiki/Computer_number_format\n• Что такое IEEE-754 (как хранятся числа с плавающей точкой): https://ru.wikipedia.org/wiki/IEEE_754-2008\n• Что такое Unicode: https://ru.wikipedia.org/wiki/%D0%AE%D0%BD%D0%B8%D0%BA%D0%BE%D0%B4\n• Разница между UTF-8, UTF-16 и UTF-32: http://unicode.org/faq/utf_bom.html\n• Сколько памяти занимают разные python-объекты в памяти: https://github.com/tceh-python/tceh-python/blob/master/course1/sizes.py\n\"\"\"\n\n\nimport sys\n\n__author__ = 'skorenev'\n\nif sys.version_info[0] == 2:\n input_function = raw_input\nelse:\n input_function = input\n\nquestions = [\n {\n 'question': 'Какая точная версия Python у тебя, в формате (x.x.x)?:',\n 'answer': '{}'.format(\".\".join(map(str, sys.version_info[:3])))\n },\n {\n 'question': 'Задача: \\n----------\\n a=None \\n print (a)\\n----------\\nЧто выдаст интерпретатор Python?:',\n 'answer': 'None'\n },\n {\n 'question': 'Задача: \\n----------\\n print (str(None))\\n----------\\nЧто выдаст интерпретатор Python?:',\n 'answer': 'None'\n },\n {\n 'question': 'Есть ли у Python 3+ raw_input (Да/Нет)?:',\n 'answer': 'Нет'\n },\n {\n 'question': 'Как зовут преподавателя (Никита, Николай, Алексей)?:',\n 'answer': 'Никита'\n },\n {\n 'question': 'Задача: \\n----------\\n s = \\'\\'\\n print s*5\\n----------\\nЧто выдаст интерпретатор Python?',\n 'answer': ''\n }\n]\n\ncorrect_answers = 0\nif __name__ == '__main__':\n for question_number, one_question in enumerate(questions):\n print('Задание: {}'.format(question_number + 1))\n user_input = str(input_function(one_question['question']))\n if user_input == one_question['answer']:\n print('>>> Правильно')\n print('Всего правильных ответов: %d\\n' % correct_answers)\n correct_answers += 1\n\n else:\n print('>>> Неправильно\\n')\n","sub_path":"homework_01/homework_01_simple.py","file_name":"homework_01_simple.py","file_ext":"py","file_size_in_byte":5124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"202913419","text":"from keras.layers import Input, Conv3D, Activation, MaxPool3D, Dropout, Flatten, Dense, Reshape, UpSampling2D\nfrom keras.models import Model\nfrom keras import optimizers, losses\nimport numpy as np\nimport utils\n\nN = utils.N\nP = utils.P\nL = utils.L\n\ndef create_model():\n\ta = Input(shape=(N, N, N, P + L))\n\tb = a\n\n\tb = Conv3D(64, (3, 3, 3), activation='relu', padding='same', use_bias=True)(b)\n\tb = MaxPool3D(pool_size=(2, 2, 2))(b)\n\tb = Dropout(0.5)(b)\n\n\tb = Conv3D(32, (3, 3, 3), activation='relu', padding='same', use_bias=True)(b)\n\tb = MaxPool3D(pool_size=(2, 2, 2))(b)\n\tb = Dropout(0.5)(b)\n\n\tb = Conv3D(16, (3, 3, 3), activation='relu', padding='same', use_bias=True)(b)\n\tb = MaxPool3D(pool_size=(2, 2, 2))(b)\n\tb = Dropout(0.5)(b)\n\n\tb = Flatten()(b)\n\tb = Dense(64, activation='relu')(b)\n\tb = Dense(1, activation='relu')(b)\n\n\tmodel = Model(a, b)\n\tmodel.summary()\n\treturn model\n\ndef main():\n\tcreate_model()\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"1a CS5242/3 Project/Code/project_code can delete/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"388617188","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nimport gzip\nimport os\n\n# 1. Install selenium \n# 2. Chrome's setting (chromedrive location)\n# 3. Change the location ==> a.chromedrive b.download gm_key_64.gz path c.write gm_key_64.gz path\n\n# Create the Firefox WebDriver (/path/of/chromedrive)\ndriver = webdriver.Chrome('/Users/Kuan-Hao/Desktop/chromedriver')\ndriver.get(\"http://topaz.gatech.edu/GeneMark/license_download.cgi\")\n# If the browser is still running ~ wait ~otherwise ~ skip!\ndriver.implicitly_wait(5)\n\n# Select GeneMarkS v.4.30\nsoftware_select = driver.find_element_by_xpath(\"html/body/table/tbody/tr[2]/td/form[1]/table/tbody/tr[3]/td/input[@type='radio']\")\nsoftware_select.click()\n# Select Linux_64\noperating_system_select = driver.find_element_by_xpath(\"html/body/table/tbody/tr[2]/td/form[1]/table/tbody/tr[3]/td[2]/input[2]\")\noperating_system_select.click()\n\n# Name \nname_text = driver.find_element_by_css_selector(\"center tr:nth-child(1) input\")\nname_text.send_keys(\"Howard Chao\")\n# Institution\ninstitution_text = driver.find_element_by_css_selector(\"center tr:nth-child(2) input\")\ninstitution_text.send_keys(\"National Taiwan University\")\n# Country\ncountry_text = driver.find_element_by_css_selector(\"center tr:nth-child(6) input\")\ncountry_text.send_keys(\"Taiwan\")\n# Email\nemail_text = driver.find_element_by_css_selector(\"center tr:nth-child(7) input\")\nemail_text.send_keys(\"ntueeb05howard@gmail.com\")\n\n# Submit the form\nform = driver.find_element_by_xpath(\"html/body/table/tbody/tr[2]/td/form[1]/center[4]/input[@id='submit']\")\nform.click()\n\n# Wait for website to reload\ndriver.implicitly_wait(5)\n\n# Download\ndownload_button = driver.find_element_by_xpath(\"html/body/table/tbody/tr[2]/td/center[7]/a[2]\")\ndriver.get(download_button.get_attribute(\"href\"))\n# Close browser\ndriver.close()\n\n# Unzip and write to gm_key_64.txt\nwith gzip.open('/Users/Kuan-Hao/Downloads/gm_key_64.gz', 'rb') as f:\n file_content = f.read()\nf = open('/Users/Kuan-Hao/Downloads/gm_key_64.txt', 'wb')\nf.write(file_content)\nf.close()\nos.remove('/Users/Kuan-Hao/Downloads/gm_key_64.gz')\n\n","sub_path":"GeneMark_License_Download/genemark_download.py","file_name":"genemark_download.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"638879181","text":"from __future__ import with_statement, absolute_import\nfrom threading import Thread\nimport logging\nfrom concurrent.futures import ThreadPoolExecutor\nimport requests\ntry:\n\tfrom queue import Queue\nexcept ImportError:\n\tfrom Queue import Queue\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n\nclass MarkupExtractor(Thread):\n\tdef __init__(self, urls, workers=1):\n\t\tsuper(MarkupExtractor, self).__init__()\n\t\tself.write_buffer = Queue()\n\t\tself.urls = urls\n\t\tself.workers = workers\n\n\tdef run(self):\n\t\tself._process_urls()\n\n\tdef _process_urls(self):\n\t\twith ThreadPoolExecutor(self.workers) as pool:\n\t\t\twhile self.urls:\n\t\t\t\tpool.submit(self._process_url, self.urls.pop())\n\n\t\t\tpool.shutdown() # wait for all submited threads to exit and free resources\n\n\tdef _process_url(self, url):\n\t\traw_html = self._fetch_raw_html(url)\n\t\tif raw_html:\n\t\t\tself.write_buffer.put({'url': url, 'raw_html':raw_html})\n\n\tdef _fetch_raw_html(self, url):\n\t\ttry:\n\t\t\tresponse = requests.get(url)\n\t\t\tif response.ok:\n\t\t\t\treturn response.content\n\t\t\telse:\n\t\t\t\terror_message = '{} responded with status code {}'.format(\n\t\t\t\t\t\t\t\tresponse.url, response.status_code)\n\t\t\t\traise requests.ConnectionError(error_message)\n\n\t\texcept requests.ConnectionError as E:\n\t\t\tlogger.warning(E)\n\n","sub_path":"markup_extractor.py","file_name":"markup_extractor.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"337117162","text":"def check_equal(str1, str2):\n return str1 == str2\n\n\ndef reversed_word(str):\n reversed_string = str[::-1]\n # eller\n # new_string = \"\"\n # for i in range(len(str)):\n # new_string += str[(len(str) - 1) - i]\n return reversed_string\n\n\ndef check_palindrome(str):\n return check_equal(str, reversed_word(str))\n\n\ndef contains_string(str1, str2):\n return str1.find(str2)\n\n\n\ndef main():\n print(\"\\nOppgave A\")\n str1 = \"hei\"\n str2 = \"hello\"\n str3 = \"hello\"\n print(check_equal(str1, str2))\n print(check_equal(str3, str2))\n\n print(\"\\nOppgave B\")\n print(reversed_word(\"star desserts\"))\n\n print(\"\\nOppgave C\")\n str1 = \"agnes i senga\"\n str2 = \"hello\"\n print(check_palindrome(str1))\n print(check_palindrome(str2))\n\n print(\"\\n Oppgave D\")\n str1 = \"pepperkake\"\n str2 = \"per\"\n str3 = \"ola\"\n print(contains_string(str1, str2))\n print(contains_string(str1, str3))\n return None\n\n\nmain()\n","sub_path":"Øving 7/Strenghåndtering/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"227961523","text":"##############################################################################\n#\n# Copyright (c) 2004 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\nfrom zope.testing import renormalizing, setupstack\nimport doctest\nimport logging\nimport mock\nimport re\nimport sys\nimport unittest\nimport ZODB.MappingStorage\n\n\nclass FauxCache:\n\n @property\n def fc(self):\n return self\n\n def getStats(self):\n return 42, 4200, 23, 2300, 1000\n\ndef is_connected(self):\n return self._is_connected\n\nZODB.MappingStorage.MappingStorage._cache = FauxCache()\nZODB.MappingStorage.MappingStorage._is_connected = True\nZODB.MappingStorage.MappingStorage.is_connected = is_connected\n\ndef setUpInitialize(test):\n for name in (\n 'zope.app.appsetup.product.getProductConfiguration',\n 'zope.component.getUtilitiesFor',\n 'ZODB.ActivityMonitor.ActivityMonitor',\n 'zc.monitor.start',\n ):\n setupstack.context_manager(test, mock.patch(name))\n\ndef test_suite():\n return unittest.TestSuite((\n doctest.DocFileSuite(\n 'README.txt',\n checker=renormalizing.RENormalizing([\n (re.compile(\"Vm(Size|RSS):\\s+\\d+\\s+kB\"), 'Vm\\\\1 NNN kB'),\n (re.compile(\"\\d+[.]\\d+ seconds\"), 'N.NNNNNN seconds'),\n ]),\n ),\n doctest.DocFileSuite(\n 'initialize.test',\n setUp = setUpInitialize, tearDown=setupstack.tearDown,\n )\n ))\n","sub_path":"zc.z3monitor/trunk/src/zc/z3monitor/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"589681085","text":"\nfrom graphics import *;\n\nwindow = GraphWin(\"Window\", 2000,1000);\n\nwindow.setBackground(\"white\")\n\n\nsky = Rectangle(Point(0,0),Point(50000,50000))\nsky.setFill(\"cyan\")\nsky.draw(window)\n\nground = Rectangle(Point(0,400),Point(5000,800))\nground.setFill(\"green\")\nground.draw(window);\n\nbob=10\nbobo=18\nbobob=26\n\nlineo = Rectangle(Point(10,368),Point(50000,368))\nlineo.draw(window)\n\nlinet = Rectangle(Point(10,388),Point(50000,388))\nlinet.draw(window)\n\nfor x in range(0,80):\n \n testfence = Polygon(Point(bob,400),Point(bob,370),Point(bobo,350),Point(bobob,370),Point(bobob,400))\n testfence.setFill(\"white\")\n testfence.draw(window);\n bob=bob+25\n bobo=bobo+25\n bobob=bobob+25\n \nsun = Circle(Point(700,100),40)\nsun.setFill(\"yellow\")\nsun.draw(window)\n\nfor x in range(0,15):\n window.getMouse();\n sun.move(0,-10)\n yval = Text(Point(900,150),sun.getP1().getY())\n yval.draw(window)\n yval.setTextColor(\"red\")\n yval.setSize(25)\n time.sleep(1.5)\n yval.undraw()\nsky.setFill(\"blue\")\n \nwindow.getMouse();\n\nwindow.close();\n\n","sub_path":"Python Labs/18. Python Graphics - Sunrise2/run18.py","file_name":"run18.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"417237869","text":"# creates an ascii tree from a string with the structure root (child ())(child ())\n# an example string might be \"a (b (c (d )))(e (f (g )(h )))\"\n# could be improved by removing extra \"| \"\n\nimport sys\n\ndef main(argv):\n\tstr = argv\n\ttoprint = \"\"\n\tdepth = 0\n\twhile len(str) > 0:\n\t\tif str[0] == \"(\":\n\t\t\tdepth += 1\n\t\t\tstr = str[1:]\n\n\t\tstr = str.split(' ', 1)\n\t\tnextItem = str[0]\n\t\tstr = str[1]\n\t\t\n\t\ti = 0\n\t\twhile i < depth - 1:\n\t\t\ttoprint += \"| \"\n\t\t\ti+=1\n\t\t\n\t\tif depth > 0:\n\t\t\ttoprint += \"|---\"\n\n\t\ttoprint += nextItem\n\n\t\twhile str[0] == \")\":\n\t\t\tdepth -= 1\n\t\t\tstr = str[1:]\n\t\t\tif len(str) == 0:\n\t\t\t\tbreak\n\n\t\tprint(toprint)\n\t\ttoprint = \"\"\n\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1])","sub_path":"33/asciitree.py","file_name":"asciitree.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"127607955","text":"S = input()\nL = len(S)\nQ = 10**9 + 7\nanum = [ 0 for _ in range(L+1)] #i番目までのAの個数の合計\ncnum = [ 0 for _ in range(L+1)] #i番目までのCの個数の合計\nhnum = [ 0 for _ in range(L+1)] #i番目までの?の個数の合計\nfor i in range(1,L+1):\n if S[i-1] == 'A':\n anum[i] = anum[i-1] + 1\n cnum[i] = cnum[i-1]\n hnum[i] = hnum[i-1]\n elif S[i-1] == 'B':\n anum[i] = anum[i-1]\n cnum[i] = cnum[i-1]\n hnum[i] = hnum[i-1]\n elif S[i-1] == 'C':\n anum[i] = anum[i-1]\n cnum[i] = cnum[i-1] + 1\n hnum[i] = hnum[i-1]\n else:\n anum[i] = anum[i-1]\n cnum[i] = cnum[i-1]\n hnum[i] = hnum[i-1] + 1\nans = 0\nczen = cnum[L] #全部のCの個数\nhzen = hnum[L] #全部の?の個数\nfor i in range(1,L+1):\n if S[i-1] == 'B' or S[i-1] == '?':\n A = ((anum[i-1]*pow(3,hnum[i-1],Q))%Q + (hnum[i-1]*pow(3,max(0,hnum[i-1]-1),Q))%Q)%Q\n C = (((czen - cnum[i])*pow(3,hzen - hnum[i],Q))%Q + ((hzen - hnum[i])*pow(3,max(0,hzen - hnum[i]-1),Q))%Q)%Q\n K = (A*C)%Q\n ans = (ans + K)%Q\nprint(int(ans))\n","sub_path":"beginner/104/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"650664116","text":"import os\nimport ssl\nimport warnings\n\nimport pytest\n\nfrom pyhessian.client import HessianProxy\n\nfrom .helpers import ServletProcessWrapper, SUPPORT_DIR\n\n\n@pytest.fixture(autouse=True)\ndef raise_warnings_as_exceptions():\n warnings.simplefilter(\"error\", Warning)\n\n\n@pytest.fixture\ndef support_dir():\n return SUPPORT_DIR\n\n\n@pytest.fixture(scope='session', autouse=True)\ndef servlet_process():\n return ServletProcessWrapper()\n\n\n@pytest.fixture\ndef hessian_client_v1(servlet_process):\n client = HessianProxy(servlet_process.http_url, version=1)\n yield client\n if getattr(client, '_client', None):\n client._client.close()\n\n\n@pytest.fixture\ndef hessian_client_v2(servlet_process):\n client = HessianProxy(servlet_process.http_url, version=2)\n yield client\n if getattr(client, '_client', None):\n client._client.close()\n\n\n@pytest.fixture\ndef hessian_client_ssl(servlet_process, support_dir):\n cert_dir = os.path.join(support_dir, 'certs')\n server_crt = os.path.join(cert_dir, 'caroot.crt')\n client_crt = os.path.join(cert_dir, 'client.crt')\n client_key = os.path.join(cert_dir, 'client.key')\n context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=server_crt)\n context.load_cert_chain(certfile=client_crt, keyfile=client_key)\n client = HessianProxy(servlet_process.https_url, version=1, context=context)\n yield client\n if getattr(client, '_client', None):\n client._client.close()\n\n\n@pytest.fixture\ndef str_1024():\n s = \"\"\n for i in range(0, 16):\n s += \"%d%d%s\" % (\n i // 10, i % 10, \" 456789012345678901234567890123456789012345678901234567890123\\n\")\n return s[:1024]\n\n\n@pytest.fixture\ndef str_65536():\n s = \"\"\n for i in range(0, 64 * 16):\n s += \"%d%d%d%s\" % (\n i // 100, (i // 10) % 10, i % 10, \" 56789012345678901234567890123456789012345678901234567890123\\n\")\n return s[:65536]\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"226783073","text":"'''\nGiven a 2D board containing 'X' and 'O' (the letter O), capture all regions surrounded by 'X'.\nA region is captured by flipping all 'O's into 'X's in that surrounded region.\n\nExample:\nX X X X\nX O O X\nX X O X\nX O X X\nAfter running your function, the board should be:\nX X X X\nX X X X\nX X X X\nX O X X\n\nExplanation:\nSurrounded regions shouldn’t be on the border, which means that any 'O' on \nthe border of the board are not flipped to 'X'. Any 'O' that is not on the border \nand it is not connected to an 'O' on the border will be flipped to 'X'. \nTwo cells are connected if they are adjacent cells connected horizontally or vertically.\n'''\n\nclass Solution:\n def solve(self, board: List[List[str]]) -> None:\n \"\"\"\n Do not return anything, modify board in-place instead.\n \"\"\"\n if board and board[0]:\n m = len(board)\n n = len(board[0])\n border = [(0, i) for i in range(n)] + \\\n [(m - 1, i) for i in range(n)] + \\\n [(i, 0) for i in range(1, m)] + \\\n [(i, n - 1) for i in range(1, m)]\n border = list(filter(lambda x: board[x[0]][x[1]] == 'O', border))\n while border:\n i, j = border.pop()\n if 0 <= i < m and 0 <= j < n and board[i][j] == 'O':\n board[i][j] = '.'\n new = [(i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1)]\n border.extend(new)\n for i in range(m):\n for j in range(n):\n if board[i][j] == '.':\n board[i][j] = 'O'\n else:\n board[i][j] = 'X'\n# Runtime: 160 ms, faster than 68.37% of Python3 online submissions for Surrounded Regions.\n# Memory Usage: 14.9 MB, less than 35.48% of Python3 online submissions for Surrounded Regions.\n","sub_path":"101-200/130. Surrounded Regions.py","file_name":"130. Surrounded Regions.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"652140387","text":"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport abc\nfrom typing import Dict, Any, Callable, Optional, Tuple\nimport numpy as np\n\n\nclass BaseFunction(abc.ABC):\n \"\"\"Functions must inherit from this class for benchmarking purpose\n In child functions, implement \"oracle_call\". This method should provide the output of your function\n (BaseFunction.__call__ will use it, and call the _add_noise method if you implemented it)\n Also, update \"_descriptors\" dict attribute so that function parameterization is recorded during benchmark.\n See ArtificialFunction for an example.\n\n Parameters\n ----------\n dimension: int\n dimension of the input space.\n transform: optional str\n name of a registered transform to be applied to the input data.\n\n Notes\n -----\n - transforms must be registered through the \"register_transform\" class method before instantiation.\n \"\"\"\n\n _TRANSFORMS: Dict[str, Callable[[Any, np.ndarray], np.ndarray]] = {} # Any should be the current class (but typing would get messy)\n\n def __init__(self, dimension: int, transform: Optional[str] = None) -> None:\n assert dimension > 0\n assert isinstance(dimension, int)\n self._dimension = dimension\n self._transform = transform\n self._descriptors: Dict[str, Any] = {}\n self._descriptors.update(dimension=dimension, function_class=self.__class__.__name__, transform=transform)\n if transform is not None and transform not in self._TRANSFORMS:\n raise ValueError(f'Unknown transform \"{self._transform}\", available are:\\n{list(self._TRANSFORMS.keys())}\\n'\n f'(you must register new ones with \"{self.__class__.__name__}.register_transform\" before instantiation)')\n\n @classmethod\n def register_transform(cls, name: str, func: Callable[[\"BaseFunction\", np.ndarray], np.ndarray]) -> None:\n \"\"\"Register a transform for use in call.\n\n Parameters\n ----------\n name: str\n name of the transform (this will be used as descriptor)\n func: callable\n A callable with the function as first input and point as second input, returning the transformed point.\n \"\"\"\n cls._TRANSFORMS[name] = func\n\n @property\n def descriptors(self) -> Dict[str, Any]:\n \"\"\"Description of the function parameterization, as a dict. This base class implementation provides function_class,\n noise_level, transform and dimension\n \"\"\"\n return dict(self._descriptors) # Avoid external modification\n\n def transform(self, x: np.ndarray) -> np.ndarray:\n \"\"\"Transform the input to another function specific domain.\n \"\"\"\n if self._transform is not None:\n x = self._TRANSFORMS[self._transform](self, x)\n return x\n\n def __call__(self, x: np.ndarray) -> float:\n \"\"\"Returns the output of the function,\n after transforming the data and adding noise through _add_noise\n (by default, _add_noise does not add any noise).\n It is preferable to avoid overloading this function in order to avoid issues\n with transformations and noise. Override _add_noise and oracle_call instead.\n \"\"\"\n x_transf = self.transform(x)\n fx = self.oracle_call(x_transf)\n noisy_fx = self._add_noise(x, x_transf, fx)\n return noisy_fx\n\n def _add_noise(self, x_input: np.ndarray, x_transf: np.ndarray, fx: float) -> float: # pylint: disable=unused-argument\n \"\"\"Adds noise to the output of the function\n This is useful for artificial functions only.\n\n Parameters\n ----------\n x_input: np.ndarray\n Input point, before transformation\n x_transf: np.nparray\n Input point, after transformation\n fx: float\n Output before noise, returned by oracle_call\n \"\"\"\n return fx\n\n def __repr__(self) -> str:\n \"\"\"Shows the function name and its summary\n \"\"\"\n params = [f\"{x}={repr(y)}\" for x, y in sorted(self._descriptors.items())]\n return \"Instance of {}({})\".format(self.__class__.__name__, \", \".join(params))\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"Check that two instances where initialized with same settings.\n This is not meant to be used to check if functions are exactly equal (initialization may hold some randomness)\n This is only useful for unit testing.\n (may need to be overloaded to make faster if tests are getting slow)\n \"\"\"\n if other.__class__ != self.__class__:\n return False\n return bool(self._descriptors == other._descriptors)\n\n @property\n def dimension(self) -> int:\n \"\"\"Dimension of the input space\n \"\"\"\n return self._dimension\n\n @abc.abstractmethod\n def oracle_call(self, x: np.ndarray) -> float:\n \"\"\"Implements the call of the function.\n Under the hood, __call__ delegates to oracle_call + applies the transform and add some noise if need be.\n\n Parameter\n ---------\n x: np.ndarray\n The input data *before* transformation.\n\n Notes\n -----\n - \"oracle_call\" is not necessarily deterministic\n - the transform is applied *before* this function, do not apply it here.\n\n \"\"\"\n raise NotImplementedError\n\n\nclass ArtificiallyNoisyBaseFunction(BaseFunction): # pylint: disable=abstract-method\n \"\"\"Functions must inherit from this class for benchmarking purpose\n In child functions, implement \"oracle_call\". This method should provide the output of your function\n (BaseFunction.__call__ will use it and add noise if noise_level > 0)\n Also, update \"_descriptors\" dict attribute so that function parameterization is recorded during benchmark.\n See ArtificialFunction for an example.\n\n Parameters\n ----------\n dimension: int\n dimension of the input space.\n noise_level: float\n level of the noise to add\n noise_dissymmetry: bool\n True if we dissymetrize the noise model\n transform: optional str\n name of a registered transform to be applied to the input data.\n\n Notes\n -----\n - the noise formula is: noise_level * N(0, 1) * (f(x + N(0, 1)) - f(x))\n - transforms must be registered through the \"register_transform\" class method before instantiation.\n \"\"\"\n\n def __init__(self, dimension: int, noise_level: float = 0., noise_dissymmetry: bool = False, transform: Optional[str] = None) -> None:\n super().__init__(dimension, transform=transform)\n assert noise_level >= 0, \"Noise level must be greater or equal to 0\"\n self._noise_level = noise_level\n self._noise_dissymmetry = noise_dissymmetry\n self._descriptors.update(noise_level=noise_level, noise_dissymmetry=noise_dissymmetry)\n\n def _add_noise(self, x_input: np.ndarray, x_transf: np.ndarray, fx: float) -> float: # pylint: disable=unused-argument\n noise = 0\n noise_level = self._noise_level\n if noise_level:\n if not self._noise_dissymmetry or x_transf.ravel()[0] <= 0:\n side_point = self.transform(x_input + np.random.normal(0, 1, size=self.dimension))\n if self._noise_dissymmetry:\n noise_level *= (1. + x_transf.ravel()[0]*100.)\n noise = noise_level * np.random.normal(0, 1) * (self.oracle_call(side_point) - fx)\n return fx + noise\n\n\nclass PostponedObject(abc.ABC):\n \"\"\"Abstract class to inherit in order to notify the steady state benchmark executor that\n the function implements a delay. This delay will be used while benchmarking to provide the\n evaluation in a varying order.\n The main aim of this class is to make sure there is no typo in the name of the special function.\n\n See benchmark/execution.py for more details. This object is implemented here to avoid circular\n imports.\n \"\"\"\n\n @abc.abstractmethod\n def get_postponing_delay(self, arguments: Tuple[Tuple[Any, ...], Dict[str, Any]], value: float) -> float:\n raise NotImplementedError\n","sub_path":"nevergrad/functions/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":8256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"96409697","text":"from urllib import urlopen\nfrom HTMLParser import HTMLParser\nfrom StringIO import StringIO\n\ndef read_url(url):\n socket = urlopen(url)\n html = socket.read()\n socket.close()\n parser = MyHTMLParser()\n parser.feed(html)\n return parser.output.getvalue()\n\nclass MyHTMLParser(HTMLParser):\n output = StringIO()\n def handle_data(self,data):\n self.output.write(data)\n\ndef tidyup(s):\n lines = s.split(\"\\n\")\n lines = map(str.rstrip,lines)\n lines = filter(lambda l:l,lines)\n lines = \"\\n\".join(lines)\n return lines\n\naddress = \"http://docs.python.org/2/library/\"\naddress = \"http://xrpp.iucr.org/Bb/contents/fullindex.html\"\npage = read_url(address)\npage = tidyup(page)\nprint(page)\n","sub_path":"HTMLParser_example.py","file_name":"HTMLParser_example.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"370032150","text":"#!/usr/bin/env python\n'''\n\n'''\n\n__author__ = \"Gonzalo Olave, Giovanni Pais\"\n\nimport rospy\n\nimport smach\nimport smach_ros\n\n# skills\nfrom bender_skills import robot_factory\nfrom uchile_states.manipulation import basic, advanced, octomap\n\n# temporal\nfrom bender_arm_control.arm_commander import Limb\n\n\nclass Iteration(smach.State): \n def __init__(self):\n smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'],\n io_keys=[\"it\"])\n self.it = -1\n self.it_max = 2\n\n def execute(self,userdata):\n\n self.it +=1\n if (self.it < self.it_max):\n userdata.it = self.it\n return 'succeeded'\n return 'aborted'\n\n\ndef getInstance(robot):\n\n sm = smach.StateMachine(outcomes = ['succeeded','failed','aborted','preempted'],\n input_keys=['object','side','possible_grasp','selected_pregrasp','selected_grasp'])\n\n sm.userdata.effort = 0.5\n\n sm.userdata.trayectory_name_pre = ['home','premanip_1','premanip_2']\n sm.userdata.trayectory_name_pos = ['premanip_2','premanip_1','home']\n sm.userdata.trayectory_name_posfail = ['premanip_2']\n\n arms = {'l':Limb('l'),'r':Limb('r')}\n\n sm.userdata.home_safe=[0.0 , 0.15 , 0.0 , 0.0 , 0.0 , 0.0]\n sm.userdata.it =0\n \n with sm:\n smach.StateMachine.add('PRE_MANIPULACION', advanced.GoPremanipulation(robot,arms),\n transitions = {'succeeded':'ITERATION','aborted':'FAILED'},\n remapping = {'trayectory_name':'trayectory_name_pre','side':'side'})\n\n smach.StateMachine.add('ITERATION', Iteration(),\n transitions = {'succeeded':'GO_TO_OBJECT','aborted':'FAILED'})\n\n smach.StateMachine.add('GO_TO_OBJECT', basic.PositionObject(robot,arms),\n transitions = {'succeeded':'OPEN_GRIPPER','aborted':'RE_MANIPULACION_SECURE'},\n remapping = {'object':'object','side':'side'})\n\n smach.StateMachine.add('OPEN_GRIPPER', basic.OpenGripper(robot),\n transitions = {'succeeded':'GRASP_OBJECT'},\n remapping = {'side':'side','effort':'effort'})\n\n smach.StateMachine.add('GRASP_OBJECT', basic.Grasp_capmap(robot,arms),\n transitions = {'succeeded':'GRAB_GRIPPER','aborted':'RE_MANIPULACION_SECURE'},\n remapping = {'object':'object','side':'side'})\n\n smach.StateMachine.add('RE_MANIPULACION_SECURE', basic.JointGoal(robot),\n transitions = {'succeeded':'RE_MANIPULACION','aborted':'FAILED'},\n remapping = {'joint_goal':'selected_pregrasp','side':'side'})\n\n smach.StateMachine.add('RE_MANIPULACION', advanced.GoMoveit(robot,arms),\n transitions = {'succeeded':'ITERATION','aborted':'FAILED'},\n remapping = {'trayectory_name':'trayectory_name_posfail','side':'side'})\n\n smach.StateMachine.add('GRAB_GRIPPER', basic.GrabGripper(robot),\n transitions = {'succeeded':'GO_POSTGRASP','aborted':'RE_MANIPULACION_SECURE'},\n remapping = {'side':'side','effort':'effort'})\n\n smach.StateMachine.add('GO_POSTGRASP', basic.JointGoal(robot),\n transitions = {'succeeded':'POS_MANIPULACION','aborted':'FAILED'},\n remapping = {'joint_goal':'selected_pregrasp','side':'side'})\n\n smach.StateMachine.add('POS_MANIPULACION', basic.SetPositionNamed(robot,arms,blind=False,init='',goal='pre_1'),\n transitions = {'succeeded':'succeeded','aborted':'GO_HOME'},\n remapping = {'trayectory_name':'trayectory_name_pos','side':'side'})\n\n smach.StateMachine.add('GO_HOME', advanced.GoHomeSafe(robot,arms),\n transitions = {'succeeded':'succeeded'},\n remapping = {'joint_goal':'home_safe','side':'side'})\n\n\n smach.StateMachine.add('FAILED', advanced.GoMoveit(robot,arms),\n transitions = {'succeeded':'failed'},\n remapping = {'trayectory_name':'trayectory_name_pos','error_code':'error_code'})\n \n\n\n\n return sm\n\n\nif __name__ == \"__main__\":\n rospy.init_node(\"sm_arm\")\n\n robot = robot_factory.build(['l_gripper','r_gripper','l_arm','r_arm', \"neck\", \"object_recognition\", \"octomap\"], core=False)\n\n sm = getInstance(robot)\n sm.execute()\n","sub_path":"high/manipulation/ManipulateG.py","file_name":"ManipulateG.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"246042872","text":"# encoding: utf-8\nfrom .lliststack import Stack\n\nPRECEDENCE = {\n \"*\": 0,\n \"/\": 0,\n \"+\": 1,\n \"-\": 1,\n}\n\n\ndef op(left, right, operator):\n if operator == \"*\":\n return left * right\n elif operator == \"/\":\n return left / right\n elif operator == \"+\":\n return left + right\n elif operator == \"-\":\n return left - right\n\n\ndef convert(expression):\n stack = Stack()\n result = []\n for c in expression:\n if c == \"(\":\n stack.push(c)\n elif c == \")\":\n while stack.peek() != \"(\":\n result.append(stack.pop())\n stack.pop()\n elif c in PRECEDENCE:\n while not stack.isEmpty() and stack.peek() in PRECEDENCE and PRECEDENCE[stack.peek()] <= PRECEDENCE[c]:\n result.append(stack.pop())\n stack.push(c)\n elif c in [\" \", \"\\t\"]: # 忽略空白\n pass\n else:\n result.append(c)\n while not stack.isEmpty():\n result.append(stack.pop())\n return \"\".join(result)\n\n\ndef evaluate(postfix):\n stack = Stack()\n for c in postfix:\n if c in PRECEDENCE:\n right = stack.pop()\n left = stack.pop()\n stack.push(op(left, right, c))\n else:\n stack.push(int(c))\n assert len(stack) == 1\n return stack.pop()\n\n\nif __name__ == \"__main__\":\n exp = \"((1+2*3)*((1+1)*(2+2))*(3+4))\"\n print(evaluate(convert(exp)) * 2)\n","sub_path":"datastructures/stacks/infix_to_postfix.py","file_name":"infix_to_postfix.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"596784380","text":"# coding: utf-8\n\nimport time\nimport socket\nimport threading\nimport logging\nimport pickle\nimport queue\nimport copy\nfrom utils import NODE_JOIN, REQUEST_INFO, ENTITIES_NAMES, NODE_DISCOVERY, ORDER, PICKUP, \\\n TOKEN, PICK, GIVE_FOOD, KEEP_ALIVE, IM_ALIVE, CLEAR_TABLE, print_out\nfrom adaptor import Adaptor\nfrom encapsulation_utils import nodes_message_create, token_message_create, \\\n pre_ring_message_create, discovery_message_create, entities_message_create\n\n\nclass RingNode(threading.Thread):\n def __init__(self, address, self_id, name, max_nodes=4, ring_address=None, timeout=3, refresh_time=3):\n threading.Thread.__init__(self)\n self.id = self_id\n self.addr = address\n self.ring_address = ring_address\n self.max_nodes = max_nodes\n\n self.inside_ring = False\n self.successor_id = self.max_nodes * 2\n self.successor_addr = self.addr\n self.nodes_com = []\n self.name = name\n\n self.refresh_time = refresh_time\n\n self.entities = {}\n for i in range(len(ENTITIES_NAMES)):\n self.entities[ENTITIES_NAMES[i]] = None\n\n self.coordinator = False\n\n self.inside_ring_order = 0\n\n # queues\n self.in_queue = queue.Queue() # messages received from the token\n self.out_queue = queue.Queue() # messages to send to the token\n\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n self.socket.settimeout(timeout)\n self.logger = logging.getLogger(\"Node {}\".format(self.id))\n\n # adaptor for professor's client\n self.adaptor = Adaptor()\n\n def send(self, address, o):\n p = pickle.dumps(o)\n self.socket.sendto(p, address)\n\n def recv(self):\n try:\n p, addr = self.socket.recvfrom(1024)\n except socket.timeout:\n return None, None\n else:\n if len(p) == 0:\n return None, addr\n else:\n return p, addr\n\n def broadcast(self, message_to_send):\n for i in range(254):\n address_send = ('127.0.0.' + str(i + 1), 5000)\n self.send(address_send, message_to_send)\n\n def requestInfo(self):\n # request info about other nodes (because they can already be in a ring and this is to accelerate the process\n # of enter the ring)\n message_pre_ring = pre_ring_message_create(self.addr, self.id)\n\n message_to_send = nodes_message_create(REQUEST_INFO, message_pre_ring)\n\n self.broadcast(message_to_send)\n\n def discoveryReply(self, args):\n message_to_send = nodes_message_create(NODE_DISCOVERY, args.copy())\n\n if self.name == args['name'] and args['id'] is None:\n message_to_send['args']['id'] = self.id\n elif args['id'] is not None:\n self.entities[args['name']] = args['id']\n # self.logger.debug('My table of entities: ' + str(self.entities))\n\n if args['id'] != self.id:\n self.send(self.successor_addr, message_to_send)\n\n def allNodesDiscovered(self):\n number_nodes = 0\n for i in self.entities:\n if self.entities[i] is not None:\n number_nodes += 1\n return number_nodes == self.max_nodes\n\n def sendMessageToToken(self, id_to_send, order):\n token_to_send = token_message_create(id_to_send, order)\n\n message_to_send = nodes_message_create(TOKEN, token_to_send)\n\n self.out_queue.put(message_to_send)\n\n def sendToClient(self, addr, method, args):\n message_to_send = nodes_message_create(method, args)\n\n self.send(addr, message_to_send)\n\n def run(self):\n self.socket.bind(self.addr)\n\n delta_time = time.time()\n im_alive_time = time.time()\n time_since_last_alive = time.time()\n token_sent = False\n\n while True:\n if not self.inside_ring:\n self.requestInfo()\n\n p, addr = self.recv()\n if p is not None:\n message_received = self.adaptor.adapt(pickle.loads(p), addr)\n\n if message_received['method'] == REQUEST_INFO:\n message_pre_ring = pre_ring_message_create(self.addr, self.id)\n\n message_to_send = nodes_message_create(NODE_JOIN, message_pre_ring)\n\n self.send(message_received['args']['addr'], message_to_send)\n if message_received['method'] == NODE_JOIN or message_received['method'] == REQUEST_INFO:\n args = message_received['args']\n\n if args['id'] not in self.nodes_com:\n self.nodes_com.append(args['id'])\n self.logger.debug(\"Nodes that i know about: \" + str(self.nodes_com))\n\n if self.coordinator and args['id'] < self.id:\n self.coordinator = False\n self.logger.debug(\"I'm not the coordinator!\")\n if not self.coordinator and self.id <= min(self.nodes_com):\n self.coordinator = True\n self.logger.debug(\"I'm the coordinator!\")\n\n if args['id'] > self.successor_id and self.successor_id < self.id and len(self.nodes_com) > self.id + 1:\n self.inside_ring = False\n self.successor_id = self.max_nodes * 2\n self.successor_addr = self.addr\n\n if (len(self.nodes_com) > 1 and self.id == max(self.nodes_com) and args['id'] == min(self.nodes_com)\n or self.successor_id > args['id'] > self.id):\n self.inside_ring = True\n self.successor_id = args['id']\n self.successor_addr = args['addr']\n time_since_last_alive = time.time()\n\n self.logger.debug(\"Me: \" + str(self.addr) + \"\\nSuccessor:\" + str(self.successor_addr) + \"\\n\")\n\n elif message_received['method'] == KEEP_ALIVE:\n message_to_send = nodes_message_create(IM_ALIVE, None)\n self.send(addr, message_to_send)\n elif message_received['method'] == IM_ALIVE:\n time_since_last_alive = time.time()\n elif message_received['method'] == CLEAR_TABLE:\n self.entities = {k: None for k in self.entities}\n self.logger.debug(\"Entities table cleared\")\n elif message_received['method'] == NODE_DISCOVERY:\n self.discoveryReply(message_received['args'])\n elif message_received['method'] == NODE_DISCOVERY:\n self.discoveryReply(message_received['args'])\n elif message_received['method'] == ORDER:\n \n message_received_copy = copy.deepcopy(message_received)\n message_received_copy['args']['food'] = print_out(message_received_copy['args']['food'])\n self.logger.debug(\"Message received from client: \" + str(message_received_copy))\n self.sendMessageToToken(self.entities['Waiter'], message_received['args'])\n\n elif message_received['method'] == PICKUP:\n self.logger.debug(\"Message received from client: \" + str(message_received))\n\n message_to_send = entities_message_create(PICK, message_received['args'])\n self.sendMessageToToken(self.entities['Clerk'], message_to_send)\n elif message_received['method'] == TOKEN:\n id_destination = message_received['args']['id']\n message_to_send = message_received\n\n if id_destination == self.id:\n self.in_queue.put(message_received['args']['order'])\n\n message_to_send = nodes_message_create(TOKEN, token_message_create(None, None))\n\n if self.out_queue.qsize() > 0 and (id_destination == self.id\n or id_destination is None):\n message_to_send = self.out_queue.get()\n\n if len(self.nodes_com) == self.max_nodes:\n self.send(self.successor_addr, message_to_send)\n else:\n self.logger.debug(\"TOKEN REMOVED!\")\n else:\n self.send(self.successor_addr, message_received)\n\n if self.inside_ring and time.time() - im_alive_time > self.refresh_time:\n im_alive_time = time.time()\n\n message_to_send = nodes_message_create(KEEP_ALIVE, None)\n self.send(self.successor_addr, message_to_send)\n\n if self.inside_ring and time.time() - time_since_last_alive > self.refresh_time*2:\n if self.successor_id in self.nodes_com:\n self.nodes_com.pop(self.nodes_com.index(self.successor_id))\n\n self.inside_ring = False\n self.successor_id = self.max_nodes * 2\n self.successor_addr = self.addr\n token_sent = False\n\n # inform the other nodes that the ring is not complete\n message_to_send = nodes_message_create(CLEAR_TABLE, None)\n self.broadcast(message_to_send)\n\n if self.coordinator and self.inside_ring and len(self.nodes_com) == self.max_nodes:\n if not self.allNodesDiscovered():\n for entity in self.entities:\n if self.entities[entity] is None:\n message_to_discover = discovery_message_create(entity, None)\n message_to_send = nodes_message_create(NODE_DISCOVERY, message_to_discover)\n\n self.send(self.successor_addr, message_to_send)\n elif not token_sent:\n token_to_send = token_message_create(None, None)\n message_to_send = nodes_message_create(TOKEN, token_to_send)\n\n self.send(self.successor_addr, message_to_send)\n token_sent = True\n self.logger.debug(\"TOKEN SENT BEFORE %s SECONDS!\", str(time.time() - delta_time))\n","sub_path":"RingNode_v2.py","file_name":"RingNode_v2.py","file_ext":"py","file_size_in_byte":9703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"27485614","text":"from __future__ import absolute_import\n\nfrom xml.parsers.expat import ExpatError\nfrom xml.dom.minidom import Document\nfrom xml.dom.minidom import parseString\n\nimport pytest\n\nfrom sunpy.util import xml\n\n\ndef test_xml_to_dict1():\n \"\"\"\n should return dict of xml string\n \"\"\"\n source_xml = \"<outer>\\\n <inner1>one</inner1>\\\n <inner2>two</inner2>\\\n </outer>\"\n \n xml_dict = xml.xml_to_dict(source_xml)\n expected_dict = {u'outer': {u'inner2': u'two', u'inner1': u'one'}}\n \n assert xml_dict == expected_dict\n \ndef test_xml_to_dict2():\n \"\"\"\n should return dict of xml string \n and if a tag is duplicated it takes the last one. \n \"\"\"\n source_xml = \"<outer>\\\n <inner1>one-one</inner1>\\\n <inner1>one-two</inner1>\\\n <inner2>two-one</inner2>\\\n <inner2>two-two</inner2>\\\n </outer>\"\n \n xml_dict = xml.xml_to_dict(source_xml)\n expected_dict = {u'outer': {u'inner2': u'two-two', u'inner1': u'one-two'}}\n \n assert xml_dict == expected_dict\n \ndef test_xml_to_dict3():\n \"\"\"\n should return dict of xml string \n with empty value if there are no inner elements \n \"\"\"\n source_xml = \"<outer/>\"\n \n xml_dict = xml.xml_to_dict(source_xml)\n expected_dict = {u'outer': ''}\n \n assert xml_dict == expected_dict\n \ndef test_xml_to_dict4():\n \"\"\"\n should return dict of xml string \n with empty value if there are no inner elements \n \"\"\"\n source_xml = \"<outer></outer>\"\n \n xml_dict = xml.xml_to_dict(source_xml)\n expected_dict = {u'outer': ''}\n \n assert xml_dict == expected_dict\n \ndef test_xml_to_dict5():\n \"\"\"\n should return dict of xml string \n with 2 layer nesting \n \"\"\"\n source_xml = \"<outer>\\\n <mid1>\\\n <inner1>one-one</inner1>\\\n </mid1>\\\n <mid2>\\\n <inner2>two-one</inner2>\\\n </mid2>\\\n </outer>\"\n \n xml_dict = xml.xml_to_dict(source_xml)\n expected_dict = {u'outer': {u'mid2': {u'inner2': u'two-one'}, u'mid1': {u'inner1': u'one-one'}}}\n \n assert xml_dict == expected_dict\n \ndef test_xml_to_dict6():\n \"\"\"\n should return dict of xml string \n with 2 layer nesting and if a tag is duplicated it takes the last one.\n \"\"\"\n source_xml = \"<outer>\\\n <mid>\\\n <inner1>one-one</inner1>\\\n </mid>\\\n <mid>\\\n <inner2>two-one</inner2>\\\n </mid>\\\n </outer>\"\n \n xml_dict = xml.xml_to_dict(source_xml)\n expected_dict = {u'outer': {u'mid': {u'inner2': u'two-one'}}}\n \n assert xml_dict == expected_dict\n\ndef test_xml_to_dict7():\n \"\"\"\n should raise TypeError when passed None \n \"\"\"\n assert pytest.raises(TypeError, xml.xml_to_dict, None)\n \ndef test_xml_to_dict8():\n \"\"\"\n should raise TypeError when passed non string\n \"\"\"\n assert pytest.raises(TypeError, xml.xml_to_dict, 9)\n \ndef test_xml_to_dict9():\n \"\"\"\n should raise ExpatError when passed empty string\n \"\"\"\n assert pytest.raises(ExpatError, xml.xml_to_dict, \"\")\n \ndef test_xml_to_dict10():\n \"\"\"\n should raise ExpatError when passed space\n \"\"\"\n assert pytest.raises(ExpatError, xml.xml_to_dict, \" \")\n \n \ndef test_get_node_text1():\n \"\"\"\n should raise NotTextNodeError if there is a non text node.\n \"\"\"\n doc = Document()\n outer = doc.createElement(\"outer\")\n doc.appendChild(outer)\n pytest.raises(xml.NotTextNodeError, xml.get_node_text, doc)\n \ndef test_get_node_text2():\n \"\"\"\n should return empty string for a node with no child nodes. \n \"\"\"\n assert xml.get_node_text(Document()) == \"\"\n \ndef test_get_node_text3():\n \"\"\"\n should return node text\n \"\"\"\n node = parseString(\"<outer>one</outer>\")\n text_node = node.childNodes[0]\n\n assert xml.get_node_text(text_node) == \"one\"\n \ndef test_get_node_text4():\n \"\"\"\n should raise AttributeError when sent None\n \"\"\"\n assert pytest.raises(AttributeError, xml.get_node_text, None)\n \ndef test_get_node_text5():\n \"\"\"\n should raise AttributeError when sent wrong type\n \"\"\"\n assert pytest.raises(AttributeError, xml.get_node_text, \"wrong type\")\n\ndef test_node_to_dict1():\n \"\"\"\n should return dict of node\n \"\"\"\n \n doc = Document()\n\n outer = doc.createElement(\"outer\")\n doc.appendChild(outer)\n \n inner1 = doc.createElement(\"inner1\")\n inner2 = doc.createElement(\"inner2\")\n outer.appendChild(inner1)\n outer.appendChild(inner2)\n \n inner1_text = doc.createTextNode(\"one\")\n inner2_text = doc.createTextNode(\"two\")\n inner1.appendChild(inner1_text)\n inner2.appendChild(inner2_text)\n \n expected_dict = {'outer': {'inner2': 'two', 'inner1': 'one'}}\n xml_dict = xml.node_to_dict(doc)\n \n assert xml_dict == expected_dict\n \ndef test_node_to_dict2():\n \"\"\"\n should return dict of node double nested\n \"\"\"\n \n doc = Document()\n\n outer = doc.createElement(\"outer\")\n doc.appendChild(outer)\n \n mid1 = doc.createElement(\"mid1\")\n outer.appendChild(mid1)\n mid2 = doc.createElement(\"mid2\")\n outer.appendChild(mid2)\n \n inner1 = doc.createElement(\"inner1\")\n inner2 = doc.createElement(\"inner2\")\n mid1.appendChild(inner1)\n mid2.appendChild(inner2)\n \n inner1_text = doc.createTextNode(\"one\")\n inner2_text = doc.createTextNode(\"two\")\n inner1.appendChild(inner1_text)\n inner2.appendChild(inner2_text)\n \n expected_dict = {'outer': {'mid2': {'inner2': 'two'}, 'mid1': {'inner1': 'one'}}}\n xml_dict = xml.node_to_dict(doc)\n \n assert xml_dict == expected_dict\n \ndef test_node_to_dict3():\n \"\"\"\n should return empty dict when sent empty doc\n \"\"\"\n expected_dict = {}\n xml_dict = xml.node_to_dict(Document())\n \n assert xml_dict == expected_dict\n \ndef test_node_to_dict4():\n \"\"\"\n should raise AttributeError when sent wrong type\n \"\"\"\n assert pytest.raises(AttributeError, xml.node_to_dict, 9)\n \ndef test_node_to_dict5():\n \"\"\"\n should raise AttributeError when sent None\n \"\"\"\n assert pytest.raises(AttributeError, xml.node_to_dict, None)\n \n ","sub_path":"sunpy/tests/util/test_xml.py","file_name":"test_xml.py","file_ext":"py","file_size_in_byte":6455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"92172601","text":"import json\nimport os\nimport shutil\nimport time\nfrom urllib.parse import quote_plus\nfrom urllib.parse import unquote_plus\nfrom urllib.parse import urlparse\n\nfrom fedoidc import MetadataStatement\nfrom fedoidc import test_utils\nfrom fedoidc.bundle import FSJWKSBundle\nfrom fedoidc.operator import FederationOperator\nfrom fedoidc.operator import Operator\nfrom fedoidc.test_utils import MetaDataStore\nfrom jwkest import as_unicode\nfrom jwkest.jws import factory\n\nfrom oic.utils.keyio import build_keyjar\n\nKEYDEFS = [\n {\"type\": \"RSA\", \"key\": '', \"use\": [\"sig\"]},\n {\"type\": \"EC\", \"crv\": \"P-256\", \"use\": [\"sig\"]}\n]\n\nTOOL_ISS = 'https://localhost'\n\nFO = {'swamid': 'https://swamid.sunet.se', 'feide': 'https://www.feide.no',\n 'edugain': 'https://edugain.com'}\n\nOA = {'sunet': 'https://sunet.se'}\n\nIA = {}\n\nSMS_DEF = {\n OA['sunet']: {\n \"discovery\": {\n FO['swamid']: [\n {'request': {}, 'requester': OA['sunet'],\n 'signer_add': {'federation_usage': 'discovery'},\n 'signer': FO['swamid'], 'uri': False},\n ],\n FO['feide']: [\n {'request': {}, 'requester': OA['sunet'],\n 'signer_add': {'federation_usage': 'discovery'},\n 'signer': FO['feide'], 'uri': True},\n ],\n FO['edugain']: [\n {'request': {}, 'requester': FO['swamid'],\n 'signer_add': {'federation_usage': 'discovery'},\n 'signer': FO['edugain'], 'uri': True},\n {'request': {}, 'requester': OA['sunet'],\n 'signer_add': {}, 'signer': FO['swamid'], 'uri': True}\n ]\n },\n \"registration\": {\n FO['swamid']: [\n {'request': {}, 'requester': OA['sunet'],\n 'signer_add': {'federation_usage': 'registration'},\n 'signer': FO['swamid'], 'uri': False},\n ]\n }\n }\n}\n\n# Clear out old stuff\nfor d in ['mds', 'ms']:\n if os.path.isdir(d):\n shutil.rmtree(d)\n\nliss = list(FO.values())\nliss.extend(list(OA.values()))\n\nsigner, keybundle = test_utils.setup(\n KEYDEFS, TOOL_ISS, liss, ms_path='ms', csms_def=SMS_DEF,\n mds_dir='msd', base_url='https://localhost')\n\n\nclass Response(object):\n pass\n\n\nclass MockHTTPClient():\n def __init__(self, mds):\n self.mds = mds\n\n def http_request(self, url):\n p = urlparse(url)\n rsp = Response()\n rsp.status_code = 200\n rsp.text = self.mds[p.path.split('/')[-1]]\n return rsp\n\n\ndef test_key_rotation():\n _keyjar = build_keyjar(KEYDEFS)[1]\n fo = FederationOperator(iss='https://example.com/op', keyjar=_keyjar,\n keyconf=KEYDEFS, remove_after=1)\n fo.rotate_keys()\n assert len(fo.keyjar.get_issuer_keys('')) == 4\n time.sleep(1)\n fo.rotate_keys()\n assert len(fo.keyjar.get_issuer_keys('')) == 4\n\n\ndef test_pack_metadata_statement():\n jb = FSJWKSBundle('', None, 'fo_jwks',\n key_conv={'to': quote_plus, 'from': unquote_plus})\n _keyjar = build_keyjar(KEYDEFS)[1]\n op = Operator(keyjar=_keyjar, jwks_bundle=jb, iss='https://example.com/')\n req = MetadataStatement(issuer='https://example.org/op')\n sms = op.pack_metadata_statement(req)\n assert sms # Should be a signed JWT\n _jwt = factory(sms)\n assert _jwt\n assert _jwt.jwt.headers['alg'] == 'RS256'\n _body = json.loads(as_unicode(_jwt.jwt.part[1]))\n assert _body['iss'] == op.iss\n assert _body['issuer'] == 'https://example.org/op'\n # verify signature\n r = _jwt.verify_compact(sms, _keyjar.get_signing_key())\n assert r\n\n\ndef test_pack_metadata_statement_other_iss():\n _keyjar = build_keyjar(KEYDEFS)[1]\n op = Operator(keyjar=_keyjar, iss='https://example.com/')\n req = MetadataStatement(issuer='https://example.org/op')\n sms = op.pack_metadata_statement(req, iss='https://example.com/')\n assert sms # Should be a signed JWT\n _jwt = factory(sms)\n _body = json.loads(as_unicode(_jwt.jwt.part[1]))\n assert _body['iss'] == 'https://example.com/'\n # verify signature\n r = _jwt.verify_compact(sms, _keyjar.get_signing_key())\n assert r\n\n\ndef test_pack_metadata_statement_other_alg():\n _keyjar = build_keyjar(KEYDEFS)[1]\n op = Operator(keyjar=_keyjar, iss='https://example.com/')\n req = MetadataStatement(issuer='https://example.org/op')\n sms = op.pack_metadata_statement(req, alg='ES256')\n assert sms # Should be a signed JWT\n _jwt = factory(sms)\n _body = json.loads(as_unicode(_jwt.jwt.part[1]))\n assert _body['iss'] == 'https://example.com/'\n # verify signature\n r = _jwt.verify_compact(sms, _keyjar.get_signing_key())\n assert r\n\n\ndef test_unpack_metadata_statement_uri():\n s = signer[OA['sunet']]\n req = MetadataStatement(issuer='https://example.org/op')\n # Not intermediate\n ms = s.create_signed_metadata_statement(req, 'discovery', single=True)\n\n jb = FSJWKSBundle('', None, 'fo_jwks',\n key_conv={'to': quote_plus, 'from': unquote_plus})\n\n mds = MetaDataStore('msd')\n op = Operator(jwks_bundle=jb)\n op.httpcli = MockHTTPClient(mds)\n res = op.unpack_metadata_statement(jwt_ms=ms)\n assert len(res.parsed_statement) == 3\n loel = op.evaluate_metadata_statement(res.result)\n assert len(loel) == 3\n assert set([l.fo for l in loel]) == {'https://swamid.sunet.se',\n 'https://edugain.com',\n 'https://www.feide.no'}\n","sub_path":"tests/test_06_operator.py","file_name":"test_06_operator.py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"19539839","text":"import fx, sys, json, glob, os\r\nfrom fx import *\r\n\r\nfx.prefs.add(\"KMFX.Paint Presets Path\",\"\")\r\n\r\nclass GenericJSONEncoder(json.JSONEncoder):\r\n\tdef default(self, obj):\r\n\t\ttry:\r\n\t\t\treturn super().default(obj)\r\n\t\texcept TypeError:\r\n\t\t\tpass\r\n\t\tcls = type(obj)\r\n\r\n\t\tresult = {\r\n\t\t\t'__custom__': True,\r\n\t\t\t'__name__': cls.__name__}\r\n\r\n\t\tif cls == Point3D:\r\n\t\t\tresult[\"data\"] = {\"x\":obj.x,\"y\":obj.y,\"z\":obj.z}\r\n\t\telif cls == Rect:\r\n\t\t\tresult[\"data\"] = {\"size\":obj.size}\r\n\r\n\t\telse:\r\n\t\t\tresult[\"data\"] = obj.__dict__ if not hasattr(cls, '__json_encode__') else obj.__json_encode__\r\n\t\r\n\t\treturn result\r\n\r\n\r\nclass GenericJSONDecoder(json.JSONDecoder):\r\n\tdef simpledecode(self,t):\r\n\t\t\tif t['__name__'] == \"Point3D\":\r\n\t\t\t\treturn Point3D(t[\"data\"][\"x\"],t[\"data\"][\"y\"],t[\"data\"][\"z\"])\r\n\t\t\telif t['__name__'] == \"Rect\":\r\n\t\t\t\ttemp = Rect(0,0,0,0)\r\n\t\t\t\ttemp.setSize(t[\"data\"][\"size\"][0],t[\"data\"][\"size\"][1])\t\r\n\t\t\t\treturn temp\t\r\n\t\t\telse:\r\n\t\t\t\treturn t\r\n\r\n\r\n\tdef decode(self, str):\r\n\t\tresult = super().decode(str)\r\n\r\n\t\tmresult = {}\r\n\t\t\r\n\t\tfor key in result.keys():\r\n\r\n\t\t\tif isinstance(result[key],list):\r\n\t\t\t\t#print(result[key],\"\\n\")\r\n\t\t\t\tmresult[key] = []\r\n\t\t\t\tfor n in range(0,len(result[key])):\r\n\t\t\t\t\tmresult[key].append(self.simpledecode(result[key][n]))\r\n\r\n\t\t\telse:\r\n\t\t\t\tif not isinstance(result[key], dict) or not result[key].get('__custom__', False):\r\n\t\t\t\t#print(\"regular\",result)\r\n\t\t\t\t\tmresult[key] = result[key]\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(result[key],type(result[key]),\"\\n\")\r\n\t\t\t\t\tif isinstance(result[key],list):\r\n\t\t\t\t\t\t#print(result[key],\"\\n\")\r\n\t\t\t\t\t\tfor n in range(0,len(result[key])):\r\n\t\t\t\t\t\t\tmresult[key][n] = simpledecode(result[key][n])\r\n\t\t\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\r\n\t\t\t\t\t\tif result[key]['__name__'] == \"Point3D\":\r\n\t\r\n\t\t\t\t\t#print(\"x\",result[key][\"data\"][0])\r\n\t\t\t\t\t\t\tmresult[key] = Point3D(result[key][\"data\"][\"x\"],result[key][\"data\"][\"y\"],result[key][\"data\"][\"z\"])\r\n\t\t\t\t\t\telif result[key]['__name__'] == \"Rect\":\r\n\t\t\t\t\t\t\tmresult[key] = Rect(0,0,0,0)\r\n\t\t\t\t\t\t\t# print(result[key][\"data\"][\"size\"])\r\n\t\t\t\t\t\t\tmresult[key].setSize(result[key][\"data\"][\"size\"][0],result[key][\"data\"][\"size\"][1])\r\n\t\t\t\t\t\t#instance.__dict__.update(result['data'])\r\n\t\r\n\r\n\t\treturn mresult\r\n\r\n\t\t\r\n\r\n\r\n\r\nclass KMFXpaintPresets(Action):\r\n\t\"\"\"this will save/load the actual state of the paint node to/from disk\"\"\"\r\n\tdef __init__(self,):\r\n\t\tAction.__init__(self, \"KMFX|Paint Presets\")\r\n\r\n\tdef available(self):\r\n\t\t\tpass # verification on execution\r\n\r\n\r\n\tdef execute(self,**kwargs):\r\n\t\t# fx.beginUndo(\"KMFX Paint Presets\") # undo is not working on this\r\n\r\n\r\n\t\tpaint_presets_path = fx.prefs[\"KMFX.Paint Presets Path\"] if fx.prefs[\"KMFX.Paint Presets Path\"]!= \"\" else os.environ[\"SFX_SCRIPT_PATH\"] + \"/KMscripts/paint_presets/\"\r\n\t\t\r\n\t\t#### check if the custom pref path exists and warn user if its wrong\r\n\t\tif paint_presets_path == fx.prefs[\"KMFX.Paint Presets Path\"]:\r\n\t\t\tif not os.path.exists(paint_presets_path):\r\n\t\t\t\tdisplayError(\"The custom path '%s' could be wrong or\\nwas not found or can't be read,\\nplease check your KMFX preferences!\\nFalling back to default path\\n %s \" % (paint_presets_path,os.environ[\"SFX_SCRIPT_PATH\"] + \"/KMscripts/paint_presets/\"))\r\n\t\t\t\t# print(\"The custom path '%s' could be wrong / was not found / can't be read, please check your preferences\\n falling back to default path\\n %s \" % (paint_presets_path,os.environ[\"SFX_SCRIPT_PATH\"] + \"/KMscripts/paint_presets/\"))\r\n\t\t\t\tpaint_presets_path = os.environ[\"SFX_SCRIPT_PATH\"] + \"/KMscripts/paint_presets/\"\r\n\r\n\r\n\r\n\r\n\t\tmode = kwargs[\"mode\"] if \"mode\" in kwargs.keys() else \"save\"\r\n\r\n\t\tnode = activeNode()\r\n\t\t\r\n\t\tif node.type == \"PaintNode\":\r\n\t\t\t'''\r\n\t\t\tthe actual brush used it saved on the <item type=\"string\" id=\"brush\"> on the preset.\r\n\t\t\tlooks like the settings for the rest of the preset are not necessary\r\n\t\t\t'''\r\n\t\t\tfx.activeProject().save() ##small hack to force the state to update\r\n\r\n\t\t\tif mode == \"save\":\r\n\r\n\t\t\t\tfname = { \"id\" : \"fname\", \"label\" : \"Filename\", \"value\" : \"Default\"}\r\n\t\t\t\tresult = getInput(fields=[fname])\r\n\t\t\t\tcurrent = fx.paint.preset\r\n\t\t\t\toverride = False\r\n\r\n\t\t\t\tif result != None:\r\n\t\t\t\t\tdpath = paint_presets_path+\"/\"+result[\"fname\"]+\"/\"\r\n\t\t\t\t\tdirectory = os.path.dirname(dpath)\r\n\r\n\t\t\t\t\tif os.path.exists(directory):\r\n\t\t\t\t\t\tov=askQuestion(\"Preset already exists, override?\")\r\n\t\t\t\t\t\tif ov == False:\r\n\t\t\t\t\t\t\treturn # do not use this with UNDO\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tif not os.path.exists(directory):\r\n\t\t\t\t\t\t\tos.makedirs(directory)\r\n\t\t\t\t\texcept:\r\n\t\t\t\t\t\tprint(\"Error creating preset directory, check folder write permissions?\\n %s\" % directory)\t\t\t\r\n\t\t\t\t\tfor i in range(0,10):\r\n\t\t\t\t\t\tfpath = paint_presets_path+\"/\"+result[\"fname\"]+\"/\"+result[\"fname\"]+\"_\"+str(i)+'.json'\r\n\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\tfx.paint.preset= i\r\n\t\t\t\t\t\t\tif len(node.state[\"preset\"+str(i)]) > 0:\r\n\t\t\t\t\t\t\t\tdic = node.state[\"preset\"+str(i)]\r\n\t\t\t\t\t\t\t\tppreset = json.dumps(dic,cls=GenericJSONEncoder)\r\n\t\t\t\t\t\t\t\twith open(fpath, 'w') as file:\r\n\t\t\t\t\t\t\t\t\tfile.write(ppreset)\r\n\r\n\t\t\t\t\t\t\tprint(\"Saved preset %s @ %s\" % (i,fpath))\r\n\r\n\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\tprint(\"Preset %s skipped\"% i)\r\n\t\t\t\t\t\t\tif os.path.exists(fpath):\r\n\t\t\t\t\t\t\t\tos.remove(fpath)\r\n\t\t\t\t\t\t\t\tprint(\"Old Preset %s removed\"% i)\r\n\t\t\t\t\t\t\t# e = sys.exc_info()\r\n\t\t\t\t\t\t\t# print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)\r\n\t\t\t\ttry:\r\n\t\t\t\t\tfx.paint.preset = current ## go back to original active preset\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass\r\n\r\n\r\n\t\t\telif mode == \"load\":\r\n\t\t\t\tjsonFiles = glob.glob(paint_presets_path+\"/**/*.json\",recursive=True)\r\n\t\t\t\tfilelist = {}\r\n\t\t\t\tnamecollection = []\r\n\t\t\t\tpresetsfound = False\r\n\t\t\t\tif len(jsonFiles) > 0:\r\n\t\t\t\t\tfor f in jsonFiles:\r\n\t\t\t\t\t\tname = os.path.basename(f)\r\n\t\t\t\t\t\tname = str(name).rsplit(\"_\",1)[0]\r\n\t\t\t\t\t\tnamecollection.append(name)\r\n\t\t\t\t\tnamecollection= list(set(namecollection))\r\n\t\t\t\t\tpresetsfound = True\r\n\t\t\t\telse:\r\n\t\t\t\t\tresulterror = getInput(title=\"Error\", msg=\"No presets found\")\r\n\t\t\t\t\t\r\n\t\t\t\tif presetsfound:\r\n\t\t\t\t\tlista = { \"id\" : \"list\", \"label\" : \"List\", \"value\" : namecollection[0], \"items\" : namecollection }\r\n\t\t\t\t\tresult = getInput(fields=[lista])\r\n\t\t\t\t\tloadedpresets = []\r\n\t\t\t\t\tif result != None:\r\n\t\t\t\t\t\tfor i in range(0,10):\r\n\t\t\t\t\t\t\tfx.paint.preset = i\r\n\t\t\t\t\t\t\ttry:\r\n\t\t\t\t\t\t\t\tnode.setState(\"preset\"+str(i),None)\r\n\r\n\t\t\t\t\t\t\t\twith open(paint_presets_path+\"/\"+result[\"list\"]+\"/\"+result[\"list\"]+\"_\"+str(i)+'.json') as complex_data:\r\n\t\t\t\t\t\t\t\t\tdata = complex_data.read()\r\n\t\t\t\t\t\t\t\t\tb = json.loads(data, cls=GenericJSONDecoder)\r\n\t\t\t\t\t\t\t\t\t\r\n\t\t\t\t\t\t\t\t\tfor ii in b.keys():\r\n\t\t\t\t\t\t\t\t\t\tfx.paint.setState(ii,b[ii])\r\n\t\t\t\t\t\t\t\t\tfx.paint.savePreset(i)\r\n\t\t\t\t\t\t\t\t\tloadedpresets.append(i)\r\n\r\n\r\n\t\t\t\t\t\t\texcept:\r\n\t\t\t\t\t\t\t\tpass\r\n\r\n\t\t\t\t\t\tfx.paint.preset = min(loadedpresets) ## loads the first available preset\r\n\t\t\t\t\t\t\t\t# e = sys.exc_info()\r\n\t\t\t\t\t\t\t\t# print('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), type(e).__name__, e)\r\n\r\n\t\t# fx.endUndo() # undo is not working on this\r\n\r\naddAction(KMFXpaintPresets())\r\n\r\n","sub_path":"kmfx_paintpresets.py","file_name":"kmfx_paintpresets.py","file_ext":"py","file_size_in_byte":6825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"637665215","text":"# link: https://www.hackerrank.com/challenges/the-power-sum/problem\n# resources: https://www.geeksforgeeks.org/coin-change-dp-7/ and https://www.hackerrank.com/challenges/the-power-sum/editorial\ndef count_expressions(number, power, sumSoFar, currentValue):\n \n if sumSoFar == number:\n return 1\n else:\n print(sumSoFar, currentValue)\n print(\"add first 1\")\n currentValue += 1\n answer = 0\n while sumSoFar + currentValue**power <= number:\n print(\"not there yet, recurse\")\n answer += count_expressions(number, power, sumSoFar + currentValue**power, currentValue)\n print(\"add another 1 and go back to the start of the while loop\")\n currentValue += 1\n else:\n print(\"too big\", sumSoFar, currentValue)\n return answer\n\nprint(count_expressions(29, 2, 0, 0))","sub_path":"ThePowerSum.py","file_name":"ThePowerSum.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"189253518","text":" \nimport scrapy\nfrom test01.items import Test01Item\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\n\n\nclass DmozSpiderSpider(scrapy.Spider):\n name = \"dmoz_spider\"\n allowed_domains = [\"news.zz91.com\"]\n start_urls = (\n 'http://news.zz91.com/tech/',\n )\n url = 'http://news.zz91.com/tech/'\n\n\n # parse html\n def parse(self, response):\n '''\n filename = response.url.split('/')[-2] + '.html'\n with open (filename, 'wb') as fp:\n \tfp.write(response.body)\n self.log('Saved files %s' % filename)\n '''\n # get tech news ,title and link and date\n lis = response.xpath('//div[@class=\"l-item\"]/ul/li')\n for li in lis:\n item = Test01Item()\n item['title'] = li.xpath('div[@class=\"l-item-text\"]/a/text()').extract()\n item['link'] = li.xpath('div[@class=\"l-item-text\"]/a/@href').extract()\n item['date'] = li.xpath('div[@class=\"l-item-date\"]/text()').extract()\n yield item\n\n # get link of the next page\n next_link = response.xpath('//div[@class=\"page-next\"]/a/@href').extract()\n self.logger.info('next_link %s' % next_link)\n if next_link:\n next_link = next_link[0]\n print(next_link)\n yield Request(self.url + next_link, callback=self.parse)\n\n \n # callback this function when spider has closed, just for test, not really\n def closed(self, reason):\n self.logger.info(\"spider has %s\" % reason)\n \n\n","sub_path":"test01/test01/spiders/dmoz_spider.py","file_name":"dmoz_spider.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"402022658","text":"# ====== Legal notices\n#\n# Copyright (C) 2013 - 2020 GEATEC engineering\n#\n# This program is free software.\n# You can use, redistribute and/or modify it, but only under the terms stated in the QQuickLicence.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY, without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the QQuickLicence for details.\n#\n# The QQuickLicense can be accessed at: http://www.qquick.org/license.html\n#\n# __________________________________________________________________________\n#\n#\n# THIS PROGRAM IS FUNDAMENTALLY UNSUITABLE FOR CONTROLLING REAL SYSTEMS !!\n#\n# __________________________________________________________________________\n#\n# It is meant for training purposes only.\n#\n# Removing this header ends your licence.\n#\n\nimport random as rd\n\nimport simpylc as sp\n\nimport common as cm\n\nrd.seed ()\n\n'''\n\n z\n |\n o -- y\n /\n x\n\n'''\n\n\nclass Visualisation (sp.Scene):\n def __init__ (self):\n sp.Scene.__init__ (self)\n \n self.camera = sp.Camera ()\n \n self.earth = sp.Ellipsoid (size = 3 * (cm.earthDiam,), center = (0, 0, 0), color = (0, 0, 0.9))\n self.moon = sp.Ellipsoid (size = 3 * (cm.moonDiam,), center = (0, 0, cm.earthMoonDist), color = (0.6, 0.6, 0.6))\n\n self.body = sp.Cylinder (size = (0.3, 0.3, 1), center = (0, 0, 0.85 + 0.4), pivot = (0, 0, 1), color = (1, 1, 0.2))\n self.nose = sp.Cone (size = (0.3, 0.3, 0.5), center = (0, 0, 0.75), color = (1, 1, 0.2))\n self.bracket = sp.Cylinder (size = (0.1, 0.1, 0.1), center = (0, 0, -0.55), color = (1, 1, 0.2))\n self.gimbal = sp.Ellipsoid (size = 3 * (0.12,), center = (0, 0, -0.05), pivot = (1, 0, 0), color = (1, 1, 0.2))\n self.thruster = sp.Cone (size = (0.2, 0.2, 0.3), pivot = (0, -1, 0), center = (0, 0, -0.09), joint = (0, 0, 0.09), color = (1, 1, 0.2)) # See thruster_rotation.jpg for pivot\n # Center at -(0.3/2 - 0.12/2)\n self.flame = sp.Cone (size = (0.1, 0.1, 1), center = (0, 0, -0.65), joint = (0, 0, 0.5), axis = (0, 1, 0), angle = 180, color = (1, 0.7, 0))\n self.tankRed = sp.Ellipsoid (size = 3 * (0.1,), center = (0.16, 0, 0), color = (1, 0, 0))\n self.tankGreen = sp.Ellipsoid (size = 3 * (0.1,), center = (-0.16, 0, 0), color = (0, 1, 0))\n self.tankYellow = sp.Ellipsoid (size = 3 * (0.1,), center = (0, 0.16, 0), color = (1, 1, 0))\n self.tankBlue = sp.Ellipsoid (size = 3 * (0.1,), center = (0, -0.16, 0), color = (0, 0, 1))\n \n def display (self):\n self.camera (\n position = sp.tEva ((sp.world.rocket.positionX + 4, sp.world.rocket.positionY, sp.world.rocket.positionZ)),\n focus = sp.tEva ((sp.world.rocket.positionX, sp.world.rocket.positionY, sp.world.rocket.positionZ + 1.5))\n )\n \n self.earth ()\n self.moon ()\n \n self.body (\n position = sp.tEva ((sp.world.rocket.positionX, sp.world.rocket.positionY, sp.world.rocket.positionZ)),\n attitude = sp.world.rocket._shipRotMat,\n parts = lambda:\n self.nose () +\n self.bracket (\n parts = lambda:\n self.tankGreen () +\n self.tankRed () +\n self.tankBlue () + \n self.tankYellow () +\n self.gimbal (\n rotation = sp.world.rocket.blueYellowAngle,\n parts = lambda:\n self.thruster (\n rotation = sp.world.rocket.greenRedAngle,\n parts = lambda:\n self.flame (\n scale = sp.tsMul ((1, 1, 1),\n sp.world.rocket.thrust / sp.world.rocket.thrusterMaxForce * (0.9 + 0.1 * rd.random ())),\n color = (1, 0.3 + 0.7 * rd.random (), 0))\n ) ) ) )\n \n","sub_path":"simpylc/simulations/rocket/visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"255765339","text":"#!/usr/bin/env python3\n\nimport json\nimport urllib.request\n\nwith urllib.request.urlopen(\n \"https://static01.nyt.com/elections-assets/2020/data/api/2020-11-03/race-page/pennsylvania/president.json\"\n) as url:\n raw_data = json.loads(url.read().decode())\n time_series = raw_data[\"data\"][\"races\"][0][\"timeseries\"]\n time_series.sort(key=lambda x: x[\"timestamp\"])\n biden_prev_votes = 0\n trump_prev_votes = 0\n biden_vote_decrease_total = 0\n trump_vote_decrease_total = 0\n biden_vote_decrease_times = 0\n trump_vote_decrease_times = 0\n for time_point in time_series:\n total_votes = time_point[\"votes\"]\n biden_cur_votes = total_votes * time_point[\"vote_shares\"][\"bidenj\"]\n trump_cur_votes = total_votes * time_point[\"vote_shares\"][\"trumpd\"]\n if biden_prev_votes > biden_cur_votes:\n biden_vote_decrease_total += biden_prev_votes - biden_cur_votes\n biden_vote_decrease_times += 1\n if trump_prev_votes > trump_cur_votes:\n trump_vote_decrease_total += trump_prev_votes - trump_cur_votes\n trump_vote_decrease_times += 1\n biden_prev_votes = biden_cur_votes\n trump_prev_votes = trump_cur_votes\n print(\n \"biden_vote_decrease_total=%d\\nbiden_vote_decrease_times=%d\\ntrump_vote_decrease_total=%d\\ntrump_vote_decrease_times=%d\"\n % (\n biden_vote_decrease_total,\n biden_vote_decrease_times,\n trump_vote_decrease_total,\n trump_vote_decrease_times,\n )\n )\n","sub_path":"voteDecreaseDetector.py","file_name":"voteDecreaseDetector.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"243052170","text":"import csv\nimport time\nimport os.path\nimport numpy as np\nimport pandas as pd\nfrom Simon import Simon\nimport json\nimport nltk\n\ndef main(datapath, email_index, execution_config, DEBUG):\n\n # set important parameters\n maxlen = 20\n max_cells = 500\n checkpoint_dir = \"pretrained_models/\"\n with open(checkpoint_dir + 'Categories_base.txt','r') as f:\n Categories = f.read().splitlines()\n category_count = len(Categories)\n\n # load specified execution configuration\n if execution_config is None:\n raise TypeError\n Classifier = Simon(encoder={}) # dummy text classifier\n config = Classifier.load_config(execution_config, checkpoint_dir)\n encoder = config['encoder']\n intermediate_model = Classifier.generate_feature_model(maxlen, max_cells, category_count, checkpoint_dir, config, DEBUG = DEBUG)\n\n # load sample email\n with open(datapath) as data_file:\n emails = data_file.readlines()\n sample_email = json.loads(emails[int(email_index)])['body']\n if DEBUG:\n print('DEBUG::sample email:')\n print(sample_email)\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n sample_email_sentence = tokenizer.tokenize(sample_email)\n sample_email_sentence = [elem[-maxlen:] for elem in sample_email_sentence] # truncate\n all_email_df = pd.DataFrame(sample_email_sentence,columns=['Email 0'])\n if DEBUG:\n print('DEBUG::the final shape is:')\n print(all_email_df.shape)\n all_email_df = all_email_df.astype(str)\n raw_data = np.asarray(all_email_df.ix[:max_cells-1,:]) #truncate to max_cells\n raw_data = np.char.lower(np.transpose(raw_data).astype('U'))\n\n # encode data \n X = encoder.x_encode(raw_data, maxlen)\n\n # generate features for email\n y = intermediate_model.predict(X)\n # discard empty column edge case\n y[np.all(all_email_df.isnull(),axis=0)] = 0\n\n # print and return result\n print('\\n128-d Simon Feature Vector:\\n')\n print(y[0])\n return y[0]\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(\n description='attempts to discern data types looking at columns holistically.')\n\n parser.add_argument('--datapath', dest='datapath',\n help='datapath containing data (i.e. emails) for which to generate features')\n\n parser.add_argument('--index', dest='data_index',\n help='index of data (i.e. single email) from datapath for which to generate features')\n\n parser.add_argument('--config', dest='execution_config',\n help='execution configuration to load. contains max_cells, and encoder config.')\n\n parser.add_argument('--debug', dest='debug_config',default=\"True\",\n help='whether or not to print debug information.')\n\n args = parser.parse_args()\n\n main(args.datapath, args.data_index, args.execution_config,args.debug_config)\n","sub_path":"Simon/scripts/main_generate_feature_model.py","file_name":"main_generate_feature_model.py","file_ext":"py","file_size_in_byte":2912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"379207170","text":"# Send files with Python to Slack from the command line\n# Requirements:\n# Python is installed and slacker is installed via \"pip install slacker\"\n# Slacker is Python slack integration\n# Creater - Willi Carlsen\n\n# Command line usage example:\n# python py2slack.py -c py2slacktest -f fileuwant2upload.filetype -m \"Message\"\n\nimport argparse\nfrom slacker import Slacker\n\n# Slack bot API toke\ntoken = \"insert-your-token-here\"\nslack = Slacker(token)\n\n# Argument input parser\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-c\", \"--channel\", help=\"Slack channel\")\nparser.add_argument(\"-u\", \"--user\", help=\"Slack user\")\nparser.add_argument(\"-f\", \"--file\", help=\"File to upload\")\nparser.add_argument(\"-m\", \"--message\", type=str,\n help=\"Message added under file in Slack\")\nargs = parser.parse_args()\nfile_name = args.file\n\nif args.channel and args.file:\n slack.files.upload(args.file, channels=\"#\"+args.channel,\n initial_comment=args.message)\n\nif args.user and args.file:\n slack.files.upload(args.file, channels=\"@\"+args.user,\n initial_comment=args.message)\n\n# # EXAMPLE OF HOW SLACKER WORKS IN GENERAL\n# # specify bot API toke\n# token = \"insert-your-token-her\"\n# slack = Slacker(token)\n#\n# # channel or username\n# channel = \"#py2slacktest\"\n# user = \"@willi\"\n#\n# # send channel chat message\n# message = \"Boten Anna can send chat messages!\"\n# slack.chat.post_message(channel, message, as_user=True)\n#\n# # upload a file to channel\n# file = \"image.jpg\" # change it to \"hello.txt\" for text file\n# file_title = \"Some fitting title\"\n# comment = \"Write some comment text.\"\n# slack.files.upload(file, channels=user, title=file_title,\n# initial_comment=comment)\n","sub_path":"py2slack.py","file_name":"py2slack.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"325683693","text":"import os\n\nimport pytest\n\nfrom plenum.client.wallet import Wallet\nfrom plenum.common.constants import ENVS\nfrom plenum.common.port_dispenser import genHa\nfrom plenum.common.script_helper import changeHA\nfrom plenum.common.signer_simple import SimpleSigner\n\nfrom plenum.common.util import getMaxFailures\nfrom plenum.test.eventually import eventually\nfrom plenum.test.helper import checkSufficientRepliesRecvd, \\\n checkNodesConnected, ensureElectionsDone, \\\n sendReqsToNodesAndVerifySuffReplies\nfrom plenum.test.test_client import genTestClient\nfrom plenum.test.test_node import TestNode\nfrom plenum.common.log import getlogger\n\n\nlogger = getlogger()\n\n\n@pytest.yield_fixture(scope=\"module\")\ndef looper(txnPoolNodesLooper):\n yield txnPoolNodesLooper\n\n\nwhitelist = ['found legacy entry', \"doesn't match\", 'reconciling nodeReg',\n 'missing', 'conflicts', 'matches', 'nodeReg',\n 'conflicting address', 'unable to send message']\n\n\ndef checkIfMasterPoolTxnFileUpdated(nodeStackNewHA, clientStackNewHA,\n txnPoolNodeSet, *clients):\n baseDirs = set()\n for n in txnPoolNodeSet:\n baseDirs.add(n.config.baseDir)\n for c in clients:\n baseDirs.add(c.config.baseDir)\n\n for baseDir in baseDirs:\n for name, env in ENVS.items():\n poolLedgerPath = os.path.join(baseDir, env.poolLedger)\n if os.path.exists(poolLedgerPath):\n with open(poolLedgerPath) as f:\n poolLedgerContent = f.read()\n assert nodeStackNewHA.host in poolLedgerContent\n assert str(nodeStackNewHA.port) in poolLedgerContent\n assert clientStackNewHA.host in poolLedgerContent\n assert str(clientStackNewHA.port) in poolLedgerContent\n\ndef changeNodeHa(looper, txnPoolNodeSet, tdirWithPoolTxns,\n poolTxnData, poolTxnStewardNames, tconf, shouldBePrimary):\n\n # prepare new ha for node and client stack\n subjectedNode = None\n stewardName = None\n stewardsSeed = None\n\n for nodeIndex, n in enumerate(txnPoolNodeSet):\n if (shouldBePrimary and n.primaryReplicaNo == 0) or \\\n (not shouldBePrimary and n.primaryReplicaNo != 0):\n subjectedNode = n\n stewardName = poolTxnStewardNames[nodeIndex]\n stewardsSeed = poolTxnData[\"seeds\"][stewardName].encode()\n break\n\n nodeStackNewHA, clientStackNewHA = genHa(2)\n logger.debug(\"change HA for node: {} to {}\".\n format(subjectedNode.name, (nodeStackNewHA, clientStackNewHA)))\n\n nodeSeed = poolTxnData[\"seeds\"][subjectedNode.name].encode()\n\n # change HA\n stewardClient, req = changeHA(looper, tconf, subjectedNode.name, nodeSeed,\n nodeStackNewHA, stewardName, stewardsSeed)\n f = getMaxFailures(len(stewardClient.nodeReg))\n looper.run(eventually(checkSufficientRepliesRecvd, stewardClient.inBox,\n req.reqId, f, retryWait=1, timeout=15))\n\n # stop node for which HA will be changed\n subjectedNode.stop()\n looper.removeProdable(subjectedNode)\n\n # start node with new HA\n restartedNode = TestNode(subjectedNode.name, basedirpath=tdirWithPoolTxns,\n config=tconf, ha=nodeStackNewHA,\n cliha=clientStackNewHA)\n looper.add(restartedNode)\n\n txnPoolNodeSet[nodeIndex] = restartedNode\n looper.run(checkNodesConnected(txnPoolNodeSet, overrideTimeout=70))\n ensureElectionsDone(looper, txnPoolNodeSet, retryWait=1, timeout=10)\n\n # start client and check the node HA\n anotherClient, _ = genTestClient(tmpdir=tdirWithPoolTxns,\n usePoolLedger=True)\n looper.add(anotherClient)\n looper.run(eventually(anotherClient.ensureConnectedToNodes))\n stewardWallet = Wallet(stewardName)\n stewardWallet.addIdentifier(signer=SimpleSigner(seed=stewardsSeed))\n sendReqsToNodesAndVerifySuffReplies(looper, stewardWallet, stewardClient, 5)\n looper.removeProdable(stewardClient)\n checkIfMasterPoolTxnFileUpdated(nodeStackNewHA, clientStackNewHA,\n txnPoolNodeSet, stewardClient, anotherClient)\n\n\n# TODO: This is failing as of now, fix it\n# def testStopScriptIfNodeIsRunning(looper, txnPoolNodeSet, poolTxnData,\n# poolTxnStewardData, tconf):\n# nodeName = txnPoolNodeSet[0].name\n# nodeSeed = poolTxnData[\"seeds\"][nodeName].encode()\n# stewardName, stewardsSeed = poolTxnStewardData\n# ip, port = genHa()\n# nodeStackNewHA = HA(ip, port)\n#\n# # the node `nodeName` is not stopped here\n#\n# # change HA\n# with pytest.raises(Exception, message=\"Node '{}' must be stopped \"\n# \"before\".format(nodeName)):\n# changeHA(looper, tconf, nodeName, nodeSeed, nodeStackNewHA,\n# stewardName, stewardsSeed)\n\n\ndef testChangeNodeHaForPrimary(looper, txnPoolNodeSet, tdirWithPoolTxns,\n poolTxnData, poolTxnStewardNames, tconf):\n changeNodeHa(looper, txnPoolNodeSet, tdirWithPoolTxns,\n poolTxnData, poolTxnStewardNames, tconf, shouldBePrimary=True)\n\n\ndef testChangeNodeHaForNonPrimary(looper, txnPoolNodeSet, tdirWithPoolTxns,\n poolTxnData, poolTxnStewardNames, tconf):\n changeNodeHa(looper, txnPoolNodeSet, tdirWithPoolTxns,\n poolTxnData, poolTxnStewardNames, tconf, shouldBePrimary=False)\n\n\n","sub_path":"plenum/test/script/test_change_node_ha.py","file_name":"test_change_node_ha.py","file_ext":"py","file_size_in_byte":5534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"176029207","text":"class TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\ndef notEmpty(queue):\n if len(queue) != 0:\n return True\n return False\nclass TreePrinter:\n def printTree(self, root):\n if not root:\n return root\n vecList = [] # 返回结果\n\n last = root # 保存当前行的最后一个\n nlast = None # 保存下一行的最后一个\n queue = [] # 一个队列\n queue.append(root)\n while(notEmpty(queue)):\n vec = [] # 保存一行结果\n while queue[0]!=last: # 直到当前层的queue[0]使我们的last指针\n # 都要进行打印(存vec)并将它们的孩子节点放入队列(如果不为None)\n cur = queue.pop(0)\n vec.append(cur.val)\n if cur.left is not None:\n queue.append(cur.left)\n if cur.right is not None:\n queue.append(cur.right)\n # 之后把最后一个也进行 打印(存vec)和放孩子节点到队列\n cur = queue.pop(0)\n vec.append(cur.val)\n if cur.left is not None:\n queue.append(cur.left)\n if cur.right is not None:\n queue.append(cur.right)\n if notEmpty(queue):\n # 下一行的nlast就是队列的最后一个元素\n nlast = queue[-1] # 这里-1可能会有一个越界问题,就是万一队列为空了,就没有最后一个元素了\n # 将last指向nlast,以便继续遍历\n last = nlast\n vecList.append(vec)\n return vecList\n\n\n\n\n","sub_path":"alg/niukesuanfa/1_2treePrinter.py","file_name":"1_2treePrinter.py","file_ext":"py","file_size_in_byte":1702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"161993456","text":"import io\nimport os\nimport sys\nimport collections\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", message=\"numpy.dtype size changed\")\nimport socket\nimport argparse\nimport pickle\nimport numpy as np\nimport time\nimport math\n\nimport matplotlib.pyplot as plt\n\n#plt.use('agg')\nfrom sklearn.decomposition import PCA\nfrom sklearn import cluster\n\nimport torch\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\nimport torch.distributed as dist\n\nfrom torch.utils.data import Dataset, DataLoader, sampler, distributed\n\nimport modules as custom_nn\n\nfrom torchaudio import datasets, transforms, save\n\nimport torchvision\nfrom torchvision import datasets, models\nfrom torchvision import transforms as img_transforms\nfrom torchvision.utils import save_image\n\nimport vctk_custom_dataset\nimport librispeech_custom_dataset\nfrom torchvision.utils import save_image\n\nimport pickle\nimport ujson\nfrom scipy.stats import pearsonr\n\n#import matplotlib.pyplot as plt\nimport pylab\nimport torch.distributions as distribution\n\nimport audio_utils as prepro\nfrom audio_utils import griffinlim\nfrom audio_utils import to_audio\nimport librosa\nimport librosa.display\n\nimport modules as custom_nn\nimport vae_g_l\nimport vae_l\n# import vae_g_l_exp\n\nfrom trainer import VAETrainer\n\nfrom tensorboardX import SummaryWriter\n\nimport OMG_dataset\n\nimport PIL.Image\nfrom torchvision.transforms import ToTensor\n\nparser = argparse.ArgumentParser(description='Speaker Identification')\n\nparser.add_argument('--cuda', type=int, default=1, metavar='N',\n help='use cuda if possible (default: 1)')\nparser.add_argument('--batch-size', type=int, default=32, metavar='N',\n help='input batch size for training (default: 128)')\nparser.add_argument('--learning-rate', type=float, default=0.001, metavar='N',\n help='learning rate (default: 0.01)')\nparser.add_argument('--num-epochs', type=int, default=100, metavar='N',\n help='number of epochs to train (default: 100)')\nparser.add_argument('--model-type', type=str, default='vae_g_l', metavar='S',\n help='model type; options: vae_g_l, vae_l (default: vae_g_l)')\nparser.add_argument('--model-name', type=str, default='emotion_regression', metavar='S',\n help='model name (for saving) (default: speaker_id_model)')\nparser.add_argument('--pretrained-model', type=str, default='vae_g_l', metavar='S',\n help='pretrained vae (default: recurrent_vae)')\nparser.add_argument('--checkpoint-interval', type=int, default=1, metavar='N',\n help='Interval between epochs to print loss and save model (default: 1)')\nparser.add_argument('--mode', type=str, default='train', metavar='S',\n help='(operation mode default: train; to test: testing)')\nparser.add_argument('--resume', type=int, default=0, metavar='N',\n help='continue training default: 0; to continue: 1)')\nparser.add_argument('--debug-mode', type=str, default=0, metavar='N',\n help='(debug mode (print dimensions) default: 0; to debug: 1)')\nparser.add_argument('--beta', type=float, default=1., metavar='N',\n help='(beta weight on KLD, default: 1. (no regularisation))')\nparser.add_argument('--frame-dropout', type=float, default=0., metavar='N',\n help='(audio frame dropout for decoder, default: 0. (no dropout))')\nparser.add_argument('--decoder-dropout', type=float, default=0.0, metavar='N',\n help='(general dropout for decoder, default: 0.5')\nparser.add_argument('--anneal-function', type=str, default='logistic', metavar='S',\n help='(anneal function (logistic or linear) default: logistic')\nparser.add_argument('--k', type=float, default=0.0025, metavar='N',\n help='(anneal function hyperparameter default: 0.0025')\nparser.add_argument('--x0', type=int, default=2500, metavar='N',\n help='(anneal function hyperparameter default: 2500')\nparser.add_argument('--z-size', type=int, default=512, metavar='N',\n help='(latent feature depth, default: 256')\nparser.add_argument('--type', type=str, default='emotion', metavar='N',\n help='Determine if you want to train the model on the person_id or on the emotion. values={emotion, person_id}')\nparser.add_argument('--dataset', type=str, default='OMGEmotion')\nparser.add_argument('--a-or-v', type=int, default=0, metavar='N',\n help='(latent feature depth, default: 256')\n\nargs = parser.parse_args()\n\nif args.mode == \"train\":\n writer = SummaryWriter(comment=args.model_name)\n\nargs.hidden_size = args.z_size\n# args.z_size = 256\nargs.local_z_size = args.z_size\nif torch.cuda.is_available():\n args.use_cuda = True\nelse:\n args.use_cuda = False\n\nif args.cuda and torch.cuda.is_available():\n use_cuda = True\nelse:\n use_cuda = False\n\nprint(\"Using CUDA: {}\".format(use_cuda))\n\n#label_dict = {}\n\ntrain_dataset = OMG_dataset.OMGEmotion('../datasets/OMG_preprocessed/', preprocessed=True, split='train')\ntest_dataset = OMG_dataset.OMGEmotion('../datasets/OMG_preprocessed/', preprocessed=True, split='test')\n\n#label_dict = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6}\n\ntest_sampler = sampler.RandomSampler(test_dataset)\ntrain_sampler = sampler.RandomSampler(train_dataset)\n\nkwargs = {'num_workers': 8, 'pin_memory': True} if args.use_cuda else {}\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=args.batch_size,\n sampler=train_sampler, drop_last=False, **kwargs)\ntest_loader = torch.utils.data.DataLoader(\n test_dataset, batch_size=args.batch_size,\n sampler=test_sampler, drop_last=False, **kwargs)\n\nclass ConcordanceCorCoeff(nn.Module):\n def __init__(self):\n super(ConcordanceCorCoeff, self).__init__()\n self.mean = torch.mean\n self.var = torch.var\n self.sum = torch.sum\n self.sqrt = torch.sqrt\n self.std = torch.std\n def forward(self, prediction, ground_truth):\n mean_gt = self.mean (ground_truth, 0)\n mean_pred = self.mean (prediction, 0)\n var_gt = self.var (ground_truth, 0)\n var_pred = self.var (prediction, 0)\n v_pred = prediction - mean_pred\n v_gt = ground_truth - mean_gt\n cor = self.sum (v_pred * v_gt) / (self.sqrt(self.sum(v_pred ** 2)) * self.sqrt(self.sum(v_gt ** 2)))\n sd_gt = self.std(ground_truth)\n sd_pred = self.std(prediction)\n numerator=2*cor*sd_gt*sd_pred\n denominator=var_gt+var_pred+(mean_gt-mean_pred)**2\n ccc = numerator/denominator\n return 1-ccc\n\n\nclass Emotion_Regression(nn.Module):\n def __init__(self):\n super(Emotion_Regression, self).__init__()\n\n self.hidden_1 = nn.Linear(args.z_size, 1024)\n self.hidden_2 = nn.Linear(1024, 1)\n\n\n def forward(self, input):\n\n pred = F.relu(self.hidden_1(input))\n pred = F.dropout(pred, p=0.2, training=True)\n pred = self.hidden_2(pred)\n return pred\n\n\ndef loss_function(pred, label):\n #The label has two columns while the prediction hat only ONE\n pred = pred[:, 0].double()\n label = label[:, args.a_or_v].double()\n criterion = ConcordanceCorCoeff()\n l = criterion(pred, label)\n return l\n\ndef ccc(y_pred, y_true):\n true_mean = np.mean(y_true)\n true_variance = np.var(y_true)\n pred_mean = np.mean(y_pred)\n pred_variance = np.var(y_pred)\n\n rho,_ = pearsonr(y_pred,y_true)\n\n std_predictions = np.std(y_pred)\n\n std_gt = np.std(y_true)\n\n\n ccc = 2 * rho * std_gt * std_predictions / (\n std_predictions ** 2 + std_gt ** 2 +\n (pred_mean - true_mean) ** 2)\n\n return ccc, rho\n\ndef calculate_ccc(pred, label):\n pred = pred[:, 0].double().cpu().detach().numpy()\n label = label[:, args.a_or_v].double().cpu().detach().numpy()\n\n ccc, rho_v = ccc(pred, label)\n\n return ccc\n\n\ndef train(model, vae_model, optimizer):\n model.train()\n epoch_loss = 0\n epoch_arousal = 0\n epoch_valence = 0\n mean_arousal_ccc = 0\n mean_valence_ccc = 0\n\n training_label = 0\n if args.type == 'person':\n training_label = 1\n for batch_idx, (data, pers) in enumerate(train_loader):\n optimizer.zero_grad()\n\n label = np.delete(pers, np.s_[0:2], 0)\n label = label.astype(np.float64)\n label = label.transpose()\n label = torch.tensor(label).double()\n\n\n data = torch.clamp(torch.div(data, (torch.min(data, dim=2, keepdim=True)[0]).repeat(1, 1, data.size(2))), min=0, max=1)\n\n data = Variable(data)\n\n data = data.transpose(1, 2)\n\n if use_cuda:\n data = data.cuda()\n label = label.cuda()\n\n outs = vae_model(data)\n\n if args.model_type == 'vae_g_l':\n global_sample = torch.mean(outs.encoder_out.medium_sample, dim=1)\n\n if args.model_type == 'vae_l':\n global_sample = torch.mean(outs.encoder_out.local_sample, dim=1)\n\n pred = model(global_sample)\n\n loss = loss_function(pred, label)\n loss.mean().backward()\n epoch_loss += loss.mean().item()\n \n arousal_ccc = calculate_ccc(pred, label)\n mean_arousal_ccc += arousal_ccc\n\n optimizer.step()\n mean_arousal_ccc /= batch_idx\n return epoch_loss / batch_idx, mean_arousal_ccc\n\n\ndef test(model, vae_model):\n model.eval()\n test_loss = 0\n test_arousal = 0\n test_valence = 0\n mean_arousal_ccc = 0\n mean_valence_ccc = 0\n\n sample_amount = 0\n\n training_label = 0\n if args.type == 'person':\n training_label = 1\n\n #pca = torch.load(\"experiments/pca\")\n for batch_idx, (data, pers) in enumerate(test_loader):\n\n label = np.delete(pers, np.s_[0:2], 0)\n label = label.astype(np.float64)\n label = label.transpose()\n label = torch.tensor(label).double()\n\n data = torch.clamp(torch.div(data, (torch.min(data, dim=2, keepdim=True)[0]).repeat(1, 1, data.size(2))), min=0, max=1)\n\n data = Variable(data)\n\n data = data.transpose(1, 2)\n\n if use_cuda:\n data = data.cuda()\n label = label.cuda()\n\n outs = vae_model(data)\n if args.model_type == 'vae_g_l':\n global_sample = torch.mean(outs.encoder_out.medium_sample, dim=1)\n if args.model_type == 'vae_l':\n global_sample = torch.mean(outs.encoder_out.local_sample, dim=1)\n\n pred = model(global_sample)\n\n max, indices = torch.max(pred, dim=1)\n\n sample_amount += len(label)\n\n loss = loss_function(pred, label)\n test_loss += loss.mean().item()\n arousal_ccc = calculate_ccc(pred, label)\n mean_arousal_ccc += arousal_ccc\n\n mean_arousal_ccc /= batch_idx\n return test_loss / batch_idx, mean_arousal_ccc\n\ndef train_epochs(model, vae_model, optimizer):\n last_loss = np.Inf\n\n train_losses = []\n test_losses = []\n train_arousals = []\n test_arousals = []\n train_valences = []\n test_valences =[]\n\n for epoch in range(args.num_epochs):\n avg_train_loss, mean_arousal_ccc = train(model, vae_model, optimizer)\n train_losses.append(avg_train_loss)\n if epoch % args.checkpoint_interval == 0:\n print('====> Epoch: {} Average train loss: {:.5f}'.format(epoch, avg_train_loss))\n print('====> Epoch: {} Average train arousal ccc: {:.5f}'.format(epoch, mean_arousal_ccc))\n print('-------------------------------------------------')\n avg_test_loss, mean_arousal_ccc = test(model, vae_model)\n test_losses.append(avg_test_loss)\n print('====> Epoch: {} Average test loss: {:.5f}'.format(epoch, avg_test_loss))\n print('====> Epoch: {} Average test arousal ccc: {:.5f}'.format(epoch, mean_arousal_ccc))\n writer.add_scalar('experiments/regression_train_loss', avg_train_loss, epoch)\n writer.add_scalar('experiments/regression_test_loss', avg_test_loss, epoch)\n\n print('=================================================')\n\n plt.title(\"arousal, valence and overall losses\")\n plt.plot(train_losses, label=\"overall training\")\n plt.plot(test_losses, label=\"overall testing\")\n plt.legend(loc='upper right')\n plt.savefig(\"experiments/regression_losses.png\")\n plt.clf()\n\n torch.save(model.state_dict(), 'experiments/' + args.model_name)\n\n\nif __name__ == '__main__' and args.mode == 'train':\n\n model = Emotion_Regression()\n if args.model_type == 'vae_g_l':\n vae_model = vae_g_l.VAE(args)\n if args.use_cuda:\n vae_model.load_state_dict(torch.load('experiments/' + args.pretrained_model))\n else:\n vae_model.load_state_dict(\n torch.load('experiments/' + args.pretrained_model, map_location=lambda storage, loc: storage))\n elif args.model_type == 'vae_l':\n vae_model = vae_l.VAE(args)\n if args.use_cuda:\n vae_model.load_state_dict(torch.load('experiments/' + args.pretrained_model))\n else:\n vae_model.load_state_dict(\n torch.load('experiments/' + args.pretrained_model, map_location=lambda storage, loc: storage))\n\n vae_model.eval()\n\n for param in vae_model.parameters():\n param.requires_grad = False\n\n if args.resume == 1:\n model.load_state_dict(torch.load('experiments/' + args.model_name, map_location=lambda storage, loc: storage))\n print(\"loaded model\")\n\n if use_cuda:\n model.cuda()\n vae_model.cuda()\n\n optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)\n train_epochs(model, vae_model, optimizer)\n","sub_path":"emotion_regression.py","file_name":"emotion_regression.py","file_ext":"py","file_size_in_byte":13717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"235059941","text":"from sikuli import *\nimport logging\nimport myTools\nimport reports_Compare\n\n#---------------------------------------------------#\ndef fPrint_FirmAssList(pReportMonth,pRepExt):\n#---------------------------------------------------#\n\n myTools.sectionStartTimeStamp(\"print FirmAssList\")\n\n # name report file: ex: PreBill-03\n reportName = myTools.buildRepName(\"FirmAssList\",pRepExt)\n logging.debug('Print_FirmAssList: ' + reportName)\n\n # make sure timeslips has focus\n myTools.getFocus()\n\n logging.debug('- open FirmAssList')\n type(\"r\",KeyModifier.ALT)\n type(\"b\")\n time.sleep(1)\n type(\"f\")\n time.sleep(1)\n \n logging.debug('- set up report')\n type(\"o\",KeyModifier.CTRL)\n \n # Options\n myTools.pressSHIFTTAB(4)\n type(Key.SPACE)\n time.sleep(1)\n\n # Default\n myTools.pressSHIFTTAB(4)\n type(Key.SPACE)\n time.sleep(1)\n\n # OK\n myTools.pressTAB(1)\n type(Key.SPACE)\n time.sleep(1)\n\n # choose csv\n myTools.pressTAB(2)\n type(\"c\")\n time.sleep(1)\n\n myTools.enterSlipFilter(pReportMonth,\"n\")\n\n # print the report\n type(Key.ENTER) \n time.sleep(1)\n\n myTools.finishReport(reportName)","sub_path":"report_FirmAssList.sikuli/report_FirmAssList.py","file_name":"report_FirmAssList.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"274063142","text":"from django.urls import path\nfrom . import views\n\napp_name = 'eshop'\n\nurlpatterns = [\n path('',views.product_list, name='product_list'),\n path('all_product/',views.all_product, name='all_product'),\n #path('<slug:category_slug>/', views.product_list, name='product_list_by_category'),\n path('<slug:category_slug>/', views.all_product, name='product_list_by_category'),\n path('<int:id>/<slug:slug>/', views.product_detail,name='product_detail'),\n ]","sub_path":"eshop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"317932820","text":"# -*- coding: utf-8 -*-\nfrom core.libs import *\nimport zipfile\n\n\ndef extract(fname, dest, overwrite=False, silent=False):\n logger.trace()\n import shutil\n created_dest = canceled = False\n zf = zipfile.ZipFile(fname)\n \n if not os.path.isdir(dest): \n os.makedirs(dest)\n created_dest = True\n elif not overwrite:\n return\n\n if silent:\n # Descomprimimos en modo silencioso (mas rapido)\n zf.extractall(dest)\n else:\n # Descomprimimos mostrando el cuadro de progreso y permitiendo cancelar\n dialog = platformtools.dialog_progress('Extrayendo', '')\n uncompress_size = sum((file.file_size for file in zf.infolist()))\n extracted_size = num_files= count= 0\n\n # Crear destino temporal\n dest_tmp = dest + \"_TMP\"\n if not os.path.isdir(dest_tmp):\n os.makedirs(dest_tmp)\n\n for file in zf.infolist():\n extracted_size += file.file_size\n porcent = extracted_size * 100 / uncompress_size\n num_files += 1\n dialog.update(porcent,\n \"Descomprimiendo:\",\n \".../%s\" % file.filename[-60:].split('/',1)[1] if len(file.filename) > 60 else file.filename,\n \"Fichero %s de %s (%s%%)\" % (num_files, len(zf.infolist()),porcent))\n\n if dialog.iscanceled():\n canceled = True\n break\n zf.extract(file, dest_tmp)\n\n if not canceled:\n # Mover desde la carpeta temporal a la carpeta destino\n for root, dirs, files in os.walk(dest_tmp):\n for filename in files:\n filepath_src = os.path.join(root, filename)\n filepath_dest = filepath_src.replace(dest_tmp, dest, 1)\n dirname_dest = os.path.dirname(filepath_dest)\n if not os.path.isdir(dirname_dest):\n os.makedirs(dirname_dest)\n count += 1\n porcent = count * 100 / num_files\n dialog.update(porcent,\n \"Moviendo:\",\n \".../%s\" % filepath_dest[-60:].split(os.sep, 1)[1] if len(filepath_dest) > 60 else filepath_dest,\n \"Fichero %s de %s (%s%%)\" % (count, num_files, porcent))\n shutil.move(filepath_src, filepath_dest)\n elif created_dest:\n os.rmdir(dest)\n\n shutil.rmtree(dest_tmp, ignore_errors=True)\n dialog.close()\n\n zf.close()\n\n return not canceled","sub_path":"core/ziptools.py","file_name":"ziptools.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"203754197","text":"# DEPRECATED\nfrom src.utils.temporal_roll import Rolling\n\n\nclass Train:\n \"\"\"Train module for the experiment.\n Initiate each train module for each model with given hyper-parameters(hps)\n Model itself can have a strategy to tune its hps (e.g., sklearn's random search cv)\n \"\"\"\n\n def __str__(self):\n rolling = str(self.roller).replace('\\t', '\\t\\t')\n train_setting = 'Stack of roll slices' if self.stack_roll else 'The last roll slice'\n return ('Training for {mname}:\\n'\n '\\t- y: {y}\\n'\n '\\t- x: {x}\\n'\n '\\t- trained on: {train_setting}\\n'\n '\\t- Model: {model}\\n'\n '\\t- {rolling}\\n'\n ''.format(mname=self.model_name, model=self.model,\n x=self.x_setting, y=self.y_setting,\n train_setting=train_setting, rolling=rolling\n )\n )\n\n def __init__(self, model, model_name, rps, y_setting, x_setting, stack_roll=False):\n \"\"\"\n :param model: model with given hps\n :param rps: dict.\n rolling parameters. 'roll_back'=True\n :param x_setting: str\n what type of x to generate from the data in tw_past\n :param y_setting: str\n what type of y to generate from the data in tw_pred\n :param stack_roll: bool, default False\n whether or not to stack roll slices to enlarge training set.\n If True, roll thru the data and stack slices as one training set\n If False, train on the last roll slice\n \"\"\"\n self.model = model\n self.model_name = model_name\n self.rps = rps\n self.roller = Rolling(**self.rps)\n self.x_setting = x_setting\n self.y_setting = y_setting\n self.stack_roll = stack_roll\n self.cdata = None\n\n @property\n def data_is_set(self):\n return self.cdata is not None\n\n def train(self):\n if not self.data_is_set:\n raise ValueError('Set data first')\n\n if not self.stack_roll:\n dates = self.roller.most_recent_period()\n x, y = self.cdata.gen_x_y_for_model(self.x_setting, self.y_setting, dates)\n print(x.shape, y.shape)\n self.model.fit(x.values, y.values.ravel())\n else:\n raise NotImplementedError('stack roll not implemented')\n return x, y\n\n\nif __name__ == \"__main__\":\n from sklearn.ensemble import RandomForestRegressor\n from sklearn.model_selection import RandomizedSearchCV\n from scipy.stats import randint as sp_randint\n from src.e1_compile_data import CompileData\n\n import os\n\n if os.getcwd().endswith('src'):\n os.chdir('..')\n PD = {\"max_depth\": [3, None],\n \"max_features\": sp_randint(1, 8),\n \"min_samples_split\": sp_randint(2, 11),\n \"min_samples_leaf\": sp_randint(1, 11),\n \"bootstrap\": [True, False],\n # \"criterion\": [\"gini\", \"entropy\"]\n }\n D = compile_data = CompileData(verbose=1, spu_name='grid_1000')\n D.set_x(['crime'], by_category=True)\n D.set_y('crime/burglary')\n M = RandomizedSearchCV(RandomForestRegressor(), param_distributions=PD, n_iter=20, cv=5, verbose=1)\n RPS = {'rsd': '2015-01-01', 'red': '2016-07-01', 'rstep': 7, 'tw_past': None}\n TR = Train(model=M, model_name='randomSearchedRF', rps=RPS, y_setting='event_cnt', x_setting='event_cnt')\n print(TR)\n TR.cdata = D\n XP, YF = TR.train()\n","sub_path":"src/e2_training.py","file_name":"e2_training.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"57406532","text":"from __future__ import print_function\nimport numpy as np\nnp.random.seed(1337) # for reproducibility\n\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D\nfrom keras.utils import np_utils\n\n\nimport os\nimport json\nimport argparse\nimport timeit\n\nstart_time = timeit.default_timer()\n\nparser = argparse.ArgumentParser(description='Calculate the model for CNN keras')\nparser.add_argument('--nb_filters', dest='nb_filters', type=int, default=32)\nparser.add_argument('--nb_pool', dest='nb_pool', type=int, default=2)\nparser.add_argument('--nb_conv', dest='nb_conv', type=int, default=3)\nparser.add_argument('--nb_epoch', dest='nb_epoch', type=int, default=12)\n\nparser.add_argument('--_id', dest='_id', default=None)\nparams = vars(parser.parse_args())\n\nbatch_size = 128\nnb_classes = 10\nnb_epoch = params['nb_epoch']\n\n# input image dimensions\nimg_rows, img_cols = 28, 28\n# number of convolutional filters to use\nnb_filters = params['nb_filters']\n# size of pooling area for max pooling\nnb_pool = params['nb_pool']\n# convolution kernel size\nnb_conv = params['nb_conv']\n\n# the data, shuffled and split between train and test sets\n(X_train, y_train), (X_test, y_test) = mnist.load_data()\n\nX_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)\nX_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\n# convert class vectors to binary class matrices\nY_train = np_utils.to_categorical(y_train, nb_classes)\nY_test = np_utils.to_categorical(y_test, nb_classes)\n\nmodel = Sequential()\n\nmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv,\n border_mode='valid',\n input_shape=(1, img_rows, img_cols)))\nmodel.add(Activation('relu'))\nmodel.add(Convolution2D(nb_filters, nb_conv, nb_conv))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\nmodel.add(Dense(128))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(nb_classes))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adadelta',\n metrics=['accuracy'])\n\nmodel.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,\n verbose=1, validation_data=(X_test, Y_test))\nscore = model.evaluate(X_test, Y_test, verbose=0)\n#print('Test score:', score[0])\n#print('Test accuracy:', score[1])\nend_time = timeit.default_timer()\ncost_time = end_time - start_time\n\n# Save result\n_id = params['_id']\nif not os.path.exists(_id):\n os.makedirs(_id)\nwith open(os.path.join(_id, 'value.json'), 'w') as outfile:\n json.dump({'_scores': {'score': score[0],'accuracy':score[1], 'time':cost_time}}, outfile)","sub_path":"examples/cnn/cnn_keras.py","file_name":"cnn_keras.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"592356593","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 14 16:36:27 2017\n\n@author: Ivan Achlaqullah\n\"\"\"\n\n## Berdagang.py, Cryptocurrency Trading Bot with Automatic Tweet Support##\n## Made by Ivan Achlaqullah\n\nimport time\nimport krakenex\n\n## Import another script\nimport config ## Skrip pengaturan\nimport ngetweet ## Skrip integrasi twitter\n\n## Import trading strategy\nimport strategy.smamacd as strategy\n\nk = krakenex.API()\nk.load_key('kraken.key')\n\n## Load settings from config.py\ntesting = config.testing\n\n## IMPORTANT INT\nstatusPosition = 0 ## 0 = no position, 1 = short, 2 = long\nleverage = config.leverage\nbalance = 0.0\n\n## Cek isi ballance\ndef cekdompet():\n if testing == 0:\n dompet = k.query_private('TradeBalance')\n global balance\n balance = float(dompet['result']['eb'])\n #print('Balance : ' + str(balance))\n time.sleep(2)\n\ncekdompet()\n\n## Get OHLC data from kraken\nohlc = k.query_public('OHLC', req = {'pair': config.pair, 'interval': config.chart_interval})\ntime.sleep(2)\n\n## Get offical trading pair name from kraken\ntpair = ''\nfor key, value in ohlc['result'].items():\n if key != 'last':\n tpair = key\n\n## Get the lastets closing price from OHLC\nlastclose = len(ohlc['result'][tpair])\nlastclose = lastclose - 1\n\n## Check if there is open positions in the account.\nif testing == 0 :\n buka = k.query_private('OpenPositions')\n time.sleep(2)\n\n if len(buka['result']) == 0 :\n statusPosition = 0\n else :\n bukastatusid = ''\n for key, value in buka['result'].items() :\n bukastatusid = key\n\n if buka['result'][bukastatusid]['type'] == 'buy':\n statusPosition = 2\n #print('long')\n if buka['result'][bukastatusid]['type'] == 'sell':\n statusPosition = 1\n #print('short')\n\n## Function to CLOSE open position\n\ndef closelong(posisinya) :\n print(str(ohlc['result'][tpair][posisinya][0]) + \" \" + str(posisinya) +\n \" STOP Long : \" + ohlc['result'][tpair][posisinya][4])\n\n global statusPosition\n statusPosition = 0\n\n global testing\n if testing == 0:\n ngetweet.tweet(str(ohlc['result'][tpair][posisinya][0]) + \" \" + tpair +\n \" STOP Long : \" + ohlc['result'][tpair][posisinya][4])\n\n while True:\n beli = k.query_private('AddOrder',\n {'pair': tpair,\n 'type': 'sell',\n 'ordertype': 'market',\n 'volume': '0',\n 'leverage': str(leverage)})\n time.sleep(2)\n if len(beli['error']) == 0:\n break\n\n cekdompet()\n\ndef closeshort(posisinya) :\n print(str(ohlc['result'][tpair][posisinya][0]) + \" \" + str(posisinya) +\n \" STOP Short : \" + ohlc['result'][tpair][posisinya][4])\n\n global statusPosition\n statusPosition = 0\n\n global testing\n if testing == 0:\n ngetweet.tweet(str(ohlc['result'][tpair][posisinya][0]) + \" \" + tpair +\n \" STOP Short : \" + ohlc['result'][tpair][posisinya][4])\n\n while True:\n beli = k.query_private('AddOrder',\n {'pair': tpair,\n 'type': 'buy',\n 'ordertype': 'market',\n 'volume': '0',\n 'leverage': str(leverage)})\n time.sleep(2)\n if len(beli['error']) == 0:\n break\n\n cekdompet()\n\n## Function to OPEN new position\n\ndef bukalong(posisinya) :\n\n global statusPosition\n if statusPosition == 1:\n closeshort(posisinya)\n\n print (str(ohlc['result'][tpair][posisinya][0]) + \" \" + str(posisinya) +\n \" OPEN Long, Price : \" + ohlc['result'][tpair][posisinya][4])\n\n global testing\n global balance\n global leverage\n\n if testing == 0:\n ngetweet.tweet(str(ohlc['result'][tpair][posisinya][0]) + \" \" + tpair +\n \" OPEN Long, Price : \" + ohlc['result'][tpair][posisinya][4])\n\n ticker = k.query_public('Ticker', req = {'pair': tpair})\n\n while True:\n harga = (balance * 0.95 * leverage) / float(ticker['result'][tpair]['c'][0])\n beli = k.query_private('AddOrder',\n {'pair': tpair,\n 'type': 'buy',\n 'ordertype': 'market',\n 'volume': str(harga),\n 'leverage': str(leverage)})\n time.sleep(1)\n if len(beli['error']) == 0:\n break\n\n statusPosition = 2\n\ndef bukashort(posisinya) :\n\n global statusPosition\n if statusPosition == 2:\n closelong(posisinya)\n\n print (str(ohlc['result'][tpair][posisinya][0]) + \" \" + str(posisinya) +\n \" OPEN Short, Price : \" + ohlc['result'][tpair][posisinya][4])\n\n global testing\n global balance\n global leverage\n\n if testing == 0:\n ngetweet.tweet(str(ohlc['result'][tpair][posisinya][0]) + \" \" + tpair +\n \" OPEN Short, Price : \" + ohlc['result'][tpair][posisinya][4])\n\n ticker = k.query_public('Ticker', req = {'pair': tpair})\n\n while True:\n harga = (balance * 0.95 * leverage) / float(ticker['result'][tpair]['c'][0])\n beli = k.query_private('AddOrder',\n {'pair': tpair,\n 'type': 'sell',\n 'ordertype': 'market',\n 'volume': str(harga),\n 'leverage': str(leverage)})\n time.sleep(1)\n if len(beli['error']) == 0:\n break\n\n statusPosition = 1\n\n## Act based on strategy result\ndef decide(order, pos):\n\n global statusPosition\n\n if order == 'long':\n if statusPosition != 2:\n bukalong(pos)\n elif order == 'short':\n if statusPosition != 1:\n bukashort(pos)\n\n## Decide whatever to do Backtesting, or start trading normaly.\n\nif testing == 0:\n order1 = strategy.calculate(lastclose-1, tpair, ohlc)\n decide(order1, lastclose-1)\nelse :\n statusPosition = 0\n print('Backtest Start !!!')\n for x in range(lastclose - 200):\n cobahitung = x + 200\n order1 = strategy.calculate(cobahitung, tpair, ohlc)\n decide(order1, cobahitung)\n\n## Give indication if all calculation are done\nprint(\"------------------------DONE------------------------\")\n","sub_path":"berdagang.py","file_name":"berdagang.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"612153993","text":"from django.contrib.auth.models import User\nfrom rest_framework import serializers\nfrom tasks.models import Task, Committee\n\n\nclass TaskSerializer(serializers.HyperlinkedModelSerializer):\n committee = serializers.SlugRelatedField(\n queryset=Committee.objects.all(),\n slug_field=\"name\"\n )\n\n assigned = serializers.SlugRelatedField(\n queryset=User.objects.all(),\n slug_field=\"username\",\n allow_null=True\n )\n\n class Meta:\n model = Task\n fields = ('url', 'id', 'committee', 'description', 'status', 'due_date', 'assigned')\n\n\nclass CommitteeSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Committee\n fields = ('url', 'id', 'name', 'heads', 'members')\n\n\nclass UserSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ('url', 'id', 'username', 'email', 'first_name', 'last_name')","sub_path":"tasks/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"638300834","text":"# -*- coding: utf-8 -*-\n\nponies = [\"Applejack\",\n \"Pinkie Pie\",\n \"Fluttershy\",\n \"Rainbow Dash\",\n \"Rarity\",\n \"Twilight Sparkle\",\n \"Apple Bloom\",\n \"Scootaloo\",\n \"Sweetie Belle\",\n \"ConductorAll Aboard\",\n \"Aloe\",\n \"Big McIntosh\",\n \"Braeburn\",\n \"Cheerilee\",\n \"Cheese Sandwich\",\n \"Cherry Jubilee\",\n \"Coco Pommel\",\n \"Doc Top\",\n \"Dr. Caballeron\",\n \"Filthy Rich\",\n \"Gizmo\",\n \"Goldie Delicious\",\n \"Granny Smith\",\n \"The Headless Horse\",\n \"Hoity Toity\",\n \"Junebug\",\n \"Lotus Blossom\",\n \"Mane Goodall\",\n \"Mane-iac\",\n \"Mayor Mare\",\n \"Mr. Carrot Cake\",\n \"Mrs. Cup Cake\",\n \"Ms. Harshwhinny\",\n \"Ms. Peachbottom\",\n \"Nurse Redheart\",\n \"The Olden Pony\",\n \"Photo Finish\",\n \"Prim Hemline\",\n \"Randolph\",\n \"Roma\",\n \"Earth pony royal guards\",\n \"Sapphire Shores\",\n \"Sheriff Silverstar\",\n \"Silver Shill\",\n \"Train ConductorSteamer\",\n \"Suri Polomare\",\n \"Daring Do CollectorTeddie Safari\",\n \"Toe-Tapper\",\n \"Torch Song\",\n \"Blossomforth\",\n \"Bulk Biceps\",\n \"Cloud Chaser\",\n \"Cloudchaser\",\n \"Crafty Crate\",\n \"Daring Do\",\n \"Derpy\",\n \"Dumb-Bell\",\n \"Flash Sentry\",\n \"Flitter\",\n \"Hoops\",\n \"Lightning Dust\",\n \"Lucy Packard\",\n \"Pegasus royal guards\",\n \"Stellar Eclipse\",\n \"Thunderlane\",\n \"Fleetfoot\",\n \"Soarin\",\n \"Spitfire\",\n \"Claude\",\n \"Doctor Horse\",\n \"Fancy Pants\",\n \"Flam\",\n \"Fleur Dis Lee\",\n \"Flim\",\n \"Jet Set\",\n \"Joe\",\n \"King Sombra\",\n \"Prince Blueblood\",\n \"Princess Cadance\",\n \"Princess Celestia\",\n \"Princess Luna\",\n \"Unicorn royal guards\",\n \"Shining Armor\",\n \"Sunset Shimmer\",\n \"Trenderhoof\",\n \"The Great and Powerful Trixie\",\n \"Upper Crust\",\n \"Babs Seed\",\n \"Diamond Tiara\",\n \"Featherweight\",\n \"Pipsqueak\",\n \"Pound Cake\",\n \"Pumpkin Cake\",\n \"Rumble\",\n \"Silver Spoon\",\n \"Snails\",\n \"Snips\",\n \"Twist\",\n \"Zipporwhill\",\n \"Pinkie's momCloudy Quartz\",\n \"Pinkie's dadIgneous Rock\",\n \"Pinkie's sisterMaud Pie\",\n \"Granny Smith's fatherPokey Oaks\",\n \"Granny Smith's motherSew 'n Sow\",\n \"Stinkin' Rich\",\n \"Apple Bottoms\",\n \"Apple Brown Betty\",\n \"Apple Bumpkin\",\n \"Apple Cider\",\n \"Apple Cinnamon\",\n \"Apple Cobbler\",\n \"Apple Dumpling\",\n \"Apple Fritter\",\n \"Apple Honey\",\n \"Apple Leaves\",\n \"Apple Munchies\",\n \"Apple Pie\",\n \"Apple Rose\",\n \"Apple Strudel\",\n \"Apple Top\",\n \"Aunt Orange\",\n \"Auntie Applesauce\",\n \"Bushel\",\n \"Candy Apples\",\n \"Caramel Apple\",\n \"Florina\",\n \"Gala Appleby\",\n \"Golden Delicious\",\n \"Half Baked Apple\",\n \"Happy Trails\",\n \"Hayseed Turnip Truck\",\n \"Jonagold\",\n \"Lavender Fritter\",\n \"Magdalena\",\n \"Peachy Sweet\",\n \"Prairie Tune\",\n \"Red Delicious\",\n \"Red Gala\",\n \"Sundowner\",\n \"Uncle Orange\",\n \"Wensley\",\n \"Twilight's dadNight Light\",\n \"Twilight's momTwilight Velvet\",\n \"Rarity's momCookie Crumbles\",\n \"Rarity's dadHondo Flanks\",\n \"Pinkie's sisterLimestone Pie\",\n \"Pinkie's sisterMarble Pie\",\n \"Apple Bud\",\n \"Apple Crumble\",\n \"Apple Flora\",\n \"Apple Mint\",\n \"Apple Squash\",\n \"Liberty Belle\",\n \"Red June\",\n \"Sweet Tooth\",\n \"Ace\",\n \"Action Shot\",\n \"Affero\",\n \"Amaranthine\",\n \"Ambrosia\",\n \"Crystal Chalice Stand PonyAmethyst Gleam\",\n \"Amira\",\n \"Apple Bottom\",\n \"Apple Bread\",\n \"Apple Slice\",\n \"Apricot Bow\",\n \"Baritone\",\n \"Beauty Brass\",\n \"Bell Perin\",\n \"Belle Star\",\n \"Berry Dreams\",\n \"Berry Frost\",\n \"Berry Icicle\",\n \"Berryshine\",\n \"Big Top\",\n \"Big Wig\",\n \"ProfessorBill Neigh\",\n \"Bitta Blues\",\n \"Black Stone\",\n \"Blue Bonnet\",\n \"Blue Bows\",\n \"Blue Nile\",\n \"Bonnie\",\n \"Bottlecap\",\n \"Mr. Breezy\",\n \"Brindle Young\",\n \"Bubblegum Blossom\",\n \"Burnt Oak\",\n \"Caboose\",\n \"Caesar\",\n \"Candy Mane\",\n \"Candy Twirl\",\n \"Caramel\",\n \"Charcoal Bakes\",\n \"Charged Up\",\n \"Charlie Coal\",\n \"Chelsea Porcelain\",\n \"Cherry Berry\",\n \"Cherry Fizzy\",\n \"Cherry Punch\",\n \"Cherry Strudel\",\n \"Chilly Puddle\",\n \"Chocolate Haze\",\n \"Classy Clover\",\n \"Cloudy Haze\",\n \"Cobalt\",\n \"Cobalt Shade\",\n \"Coco Crusoe\",\n \"Commander Redfeather\",\n \"Concerto\",\n \"Cormano\",\n \"Cornflower\",\n \"Ancient Beast DealerCratetoss\",\n \"Creme Brulee\",\n \"Crescendo\",\n \"Crest Crown\",\n \"Dainty Dove\",\n \"Daisy\",\n \"Flashy PonyDance Fever\",\n \"Davenport\",\n \"Dirtbound\",\n \"Doseydotes\",\n \"Dosie Dough\",\n \"Dr. Hooves\",\n \"Dry Wheat\",\n \"Eclair Crème\",\n \"Eiffel\",\n \"Elphaba Trot\",\n \"Emerald Beacon\",\n \"Emerald Green\",\n \"Evening Star\",\n \"Felix\",\n \"Fiddly Faddle\",\n \"Flounder\",\n \"Flurry\",\n \"Forest Spirit\",\n \"Frederick Horseshoepin\",\n \"Full Steam\",\n \"Fuzzy Slippers\",\n \"Geri\",\n \"Gingerbread\",\n \"Ginger Gold\",\n \"Giselle\",\n \"Globe Trotter\",\n \"Golden Harvest\",\n \"Goldengrape\",\n \"Grace\",\n \"Grape Crush\",\n \"Grape Delight\",\n \"Green Jewel\",\n \"Mr. Greenhooves\",\n \"Haakim\",\n \"Harry Trotter\",\n \"Hay Fever\",\n \"Haymish\",\n \"Hazel Harvest\",\n \"Hercules\",\n \"Hinny of the Hills\",\n \"Hugh Jelly\",\n \"Icy Drop\",\n \"Immemoria\",\n \"Jeff Letrotski\",\n \"Jesús Pezuña\",\n \"Jim Beam\",\n \"John Bull\",\n \"Jubileena\",\n \"Karat\",\n \"Kazooie\",\n \"Klein\",\n \"Duke of MaretoniaKyrippos II\",\n \"Lady Justice\",\n \"Lavender Blush\",\n \"Lavenderhoof\",\n \"Lemon Chiffon\",\n \"Lilac Links\",\n \"Lily Valley\",\n \"Lincoln\",\n \"Linked Hearts\",\n \"Little Po\",\n \"Security GuardLockdown\",\n \"Luckette\",\n \"Lucky Clover\",\n \"Lucky Star\",\n \"Lyrica Lilac\",\n \"Majesty\",\n \"MandoPony\",\n \"Mango Juice\",\n \"Marigold\",\n \"Maroon Carrot\",\n \"Masquerade\",\n \"Antique Chicken Stand PonyMatch Game\",\n \"Maybelline\",\n \"Meadow Song\",\n \"Melilot\",\n \"Midnight Fun\",\n \"Millie\",\n \"Mint Swirl\",\n \"Mjölna\",\n \"Morton Saltworthy\",\n \"Night Watch\",\n \"Noteworthy\",\n \"Nurse Snowheart\",\n \"Nurse Sweetheart\",\n \"Nurse Tenderheart\",\n \"Oakey Doke\",\n \"Obscurity\",\n \"Octavia Melody\",\n \"Ol' Salt\",\n \"Oregon Trail\",\n \"Outlaw\",\n \"Paisley Pastel\",\n \"Pampered Pearl\",\n \"Parcel Post\",\n \"Parish Nandermane\",\n \"Peachy Cream\",\n \"Pearly Stitch\",\n \"Perfect Pace\",\n \"Persnickety\",\n \"Petunia\",\n \"Picture Perfect\",\n \"Pigpen\",\n \"Pine Breeze\",\n \"Pinot Noir\",\n \"Pipe Down\",\n \"Pitch Perfect\",\n \"Play Write\",\n \"Powder Rouge\",\n \"Pretty Vision\",\n \"Purple Haze\",\n \"Purple Wave\",\n \"Pursey Pink\",\n \"Masseuse PonyQuake\",\n \"Raggedy Doctor\",\n \"Ragtime\",\n \"Reflective Rock\",\n \"Regal Candent\",\n \"Rick Shaw\",\n \"Rivet\",\n \"Rogue\",\n \"Rose\",\n \"Rough Tumble\",\n \"Roxie\",\n \"Royal Riff\",\n \"Ruby Splash\",\n \"Sam\",\n \"Savoir Fare\",\n \"Screwball\",\n \"Screwy\",\n \"Sealed Scroll\",\n \"Seasong\",\n \"Serena\",\n \"Shamrock\",\n \"Shoeshine\",\n \"Shooting Star\",\n \"Shortround\",\n \"Silver Frames\",\n \"Sir Pony Moore\",\n \"Sky View\",\n \"Slendermane\",\n \"Smokestack\",\n \"Snappy Scoop\",\n \"Soigne Folio\",\n \"Soot Stain\",\n \"Spring Forward\",\n \"Spring Water\",\n \"Squeaky Clean\",\n \"Star Gazer\",\n \"Steel Wright\",\n \"Stella\",\n \"Sterling Silver\",\n \"Strawberry Cream\",\n \"Strawberry Ice\",\n \"Sun Streak\",\n \"Sunny Smiles\",\n \"Sunset Bliss\",\n \"Surf\",\n \"Swanky Hank\",\n \"Sweetberry\",\n \"Sweetie Drops\",\n \"Swirly Cotton\",\n \"Symphony\",\n \"Tall Order\",\n \"Tall Tale\",\n \"Temple Chant\",\n \"Theodore Donald \\\"Donny\\\" Kerabatsos\",\n \"Toffee\",\n \"Tree Sap\",\n \"Tropical Spring\",\n \"Turf\",\n \"Twilight Sky\",\n \"Uncle Wing\",\n \"Vanilla Sweets\",\n \"Vera\",\n \"Vidala Swoon\",\n \"Wacky Hair Day and Spray\",\n \"Mr. Waddle\",\n \"Bowling PonyWalter\",\n \"Welch\",\n \"Welly\",\n \"Wetzel\",\n \"Wildwood Flower\",\n \"Wilma\",\n \"Winter Withers\",\n \"Wisp\",\n \"Withers\",\n \"Mr. Zippy\",\n \"Earth Crystal Pony royal guards\",\n \"Amber Waves\",\n \"Amberlocks\",\n \"Amethyst Maresbury\",\n \"Arctic Lily\",\n \"Berry Splash\",\n \"Bright Smile\",\n \"Check Mate\",\n \"Crystal Arrow\",\n \"Crystal Varado\",\n \"Dandy Brush\",\n \"Elbow Grease\",\n \"Esmeralda\",\n \"Fleur de Verre\",\n \"Golden Glitter\",\n \"Goldilocks\",\n \"Honeycomb\",\n \"Ivory\",\n \"Jade\",\n \"Jewel Joy\",\n \"Lilac Luster\",\n \"Mercury\",\n \"Neighls Bohr\",\n \"Night Knight\",\n \"Periwinkle Pace\",\n \"Purple Polish\",\n \"Rapid Rush\",\n \"Rook Ramparts\",\n \"Rose Quartz\",\n \"Rubinstein\",\n \"Sand Arrow\",\n \"Sapphire Rose\",\n \"Sugar Glass\",\n \"Sunshine Splash\",\n \"Toastie\",\n \"Winnow Wind\",\n \"Zirconic\",\n \"Pegasus Crystal Pony royal guards\",\n \"Glass Slipper\",\n \"Hope\",\n \"Opal Bloom\",\n \"Blaze\",\n \"Fire Streak\",\n \"High Winds\",\n \"Lightning Streak\",\n \"Misty Fly\",\n \"Silver Lining\",\n \"Surprise\",\n \"Wave Chill\",\n \"Crescent PonyCrescent Moon\",\n \"Fast Clip\",\n \"Ring Out\",\n \"Whiplash\",\n \"April Showers\",\n \"Big Shot\",\n \"Blue October / Blueberry Muffin\",\n \"Bluebell\",\n \"Blueberry Banana\",\n \"Blueberry Cloud\",\n \"Blueberry Punch\",\n \"Bluebird Happiness\",\n \"Bon Voyage\",\n \"Buddy\",\n \"Candy Floss\",\n \"Cerulean Skies\",\n \"Chocolate Blueberry\",\n \"Cinnamon Swirl\",\n \"Cloud Break\",\n \"Cloud Kicker\",\n \"Cloud Showers\",\n \"Compass Star\",\n \"Cosmic\",\n \"Cream Tangerine\",\n \"Deep Blue\",\n \"Descent\",\n \"Dewdrop\",\n \"Downdraft\",\n \"Drizzle\",\n \"Dust Devil\",\n \"Eff Stop\",\n \"Electric Blue\",\n \"Endless Clouds\",\n \"Foggy Fleece\",\n \"Golden Glory\",\n \"Graceful Falls\",\n \"Grape Soda\",\n \"Great Scott\",\n \"Helia\",\n \"High Note\",\n \"High Spirits\",\n \"Honey Rays\",\n \"Jack Hammer\",\n \"Jetstream\",\n \"Juicy Fruit\",\n \"Laurette\",\n \"Lavender Skies\",\n \"Cloudsdale Cheer PonyLilac Sky\",\n \"Lime Jelly\",\n \"Madden\",\n \"Merry May\",\n \"Midnight Strike\",\n \"Nightingale\",\n \"Pegasus DadNightjar\",\n \"Northern Lights\",\n \"Orange Box\",\n \"Orange Swirl\",\n \"Parasol\",\n \"Parula\",\n \"Pink Cloud\",\n \"Pizzelle\",\n \"Prim Posy\",\n \"Prism Glider\",\n \"Pumpkin Tart\",\n \"Q. T. Prism\",\n \"Rain Dance\",\n \"Rainbow Blaze\",\n \"Rainbow Drop\",\n \"Rainbow Swoop\",\n \"Rainbowshine\",\n \"Ralph\",\n \"Riverdance\",\n \"Rosewing\",\n \"Sandstorm\",\n \"Sassaflash\",\n \"Score\",\n \"Serenity\",\n \"Sightseer\",\n \"Silver Script\",\n \"Silverspeed\",\n \"Silverwing\",\n \"Sky Flower\",\n \"Skyra\",\n \"Slipstream\",\n \"Snow Flight\",\n \"Snowslide\",\n \"Special Delivery\",\n \"Spring Skies\",\n \"Cloudsdale Cheer PonySpring Step\",\n \"Sprinkle Medley\",\n \"Star Hunter\",\n \"Starburst\",\n \"Stardancer\",\n \"Starry Eyes\",\n \"Starsong / Sugar Apple\",\n \"Steam Roller\",\n \"Stormbreaker\",\n \"Stormfeather\",\n \"Strawberry Sunrise\",\n \"Sugar Twist\",\n \"Sunburst\",\n \"Sunlight\",\n \"Sunny Rays\",\n \"Sunshower Raindrops\",\n \"Sunstone\",\n \"Tenth DoctorThe Tenth Doctor / Doctor Whooves #3\",\n \"Thorn\",\n \"Tiger Lily\",\n \"Tracy Flash / Shutterfly\",\n \"Tropical Storm\",\n \"Velvet Light\",\n \"White Lightning\",\n \"Whitewash\",\n \"Wild Fire\",\n \"Cadet #2Wild Flower\",\n \"Wind Chill\",\n \"Wing Wishes\",\n \"Amethyst Star\",\n \"Apple Polish\",\n \"Apple Stars\",\n \"Arpeggio\",\n \"Ballad\",\n \"Banana Fluff\",\n \"Beyond\",\n \"Black Marble\",\n \"Blue Belle\",\n \"Blue Moon\",\n \"Brass Blare\",\n \"Bright Bulb\",\n \"Charm\",\n \"Cherry Spices\",\n \"Chocolate Sun\",\n \"Chocolate Tail\",\n \"Cinnabelle\",\n \"Cipher Splash\",\n \"Cold Front\",\n \"Comet Tail\",\n \"Crystal Clear\",\n \"Dark Moon\",\n \"Diamond Mint\",\n \"DJ Pon-3\",\n \"Earl Grey\",\n \"Eliza\",\n \"Film Reel\",\n \"Fine Line\",\n \"Flank Sinatra\",\n \"Fly Wishes\",\n \"Four Step\",\n \"Fuchsia Fizz\",\n \"Gold Slipper\",\n \"Golden Gavel\",\n \"Hair Air\",\n \"Holly Dash\",\n \"Hors D'oeuvre\",\n \"Duchess of MaretoniaIce Mirror\",\n \"Infinity\",\n \"InquisitorThe Inquisitor\",\n \"Lemon Hearts\",\n \"Lemony Gem\",\n \"Lyra Heartstrings\",\n \"Minuette\",\n \"Monochrome Sunset\",\n \"Neon Lights\",\n \"Nixie\",\n \"Nook\",\n \"Ocean Breeze\",\n \"Orchid Dew\",\n \"Banner VendorPeachy Pitt\",\n \"Perfect Timing\",\n \"Pinny Lane\",\n \"Pixie\",\n \"Elite PonyPonet\",\n \"Poppycock\",\n \"Precious\",\n \"Primrose\",\n \"Pristine\",\n \"Rare Find\",\n \"Raven\",\n \"Red Rose\",\n \"Rhythm / Night Shade\",\n \"Rosewood Brook\",\n \"Royal Pin\",\n \"Royal Ribbon\",\n \"Sea Spray\",\n \"Sea Swirl\",\n \"Silver Spanner\",\n \"South Pole\",\n \"Spring Fresh\",\n \"Star Bright\",\n \"Star Dream / Sky Dream\",\n \"Strawberry Lime\",\n \"Sugarberry\",\n \"Swan Song\",\n \"Sweet Dreams\",\n \"Top Marks\",\n \"Top Notch\",\n \"Twinkleshine\",\n \"Violet Velvet\",\n \"Written Script\",\n \"Amethyst Beat\",\n \"Apple Bytes\",\n \"Archer\",\n \"Aura\",\n \"Bags Valet\",\n \"Bee Bop\",\n \"Berry Pinch\",\n \"Blade Runner\",\n \"Bloo\",\n \"Blueberry Swirl\",\n \"Bolt\",\n \"Brown Sugar\",\n \"Button Mash\",\n \"Caramel Coffee\",\n \"Cheery\",\n \"Chip Mint\",\n \"Coronet\",\n \"Cotton Cloudy\",\n \"Cotton Top\",\n \"Cream Puff\",\n \"Cyan Skies\",\n \"Dinky Doo\",\n \"Dipsy\",\n \"Finish Line\",\n \"Firelock\",\n \"Fruitbasket\",\n \"Green Daze\",\n \"Hairpin Turn\",\n \"Honey Drop\",\n \"Key Lime\",\n \"Lance\",\n \"Lemon Daze\",\n \"Lemon Scratch\",\n \"Lickety Split\",\n \"Lily Dache\",\n \"Liza Doolots\",\n \"Mango Dash\",\n \"Melody\",\n \"Mint Flower\",\n \"Noi\",\n \"Nursery Rhyme\",\n \"Peachy Petal\",\n \"Peachy Pie\",\n \"Pearly Whites\",\n \"Pinkie Feather\",\n \"Piña Colada\",\n \"Pomegranate\",\n \"Princess Erroria\",\n \"Purpletastic / Purpleskies\",\n \"Rainy Feather\",\n \"Royal Blue\",\n \"Shady Daze\",\n \"Shining Star\",\n \"Strike\",\n \"Sugar Plum\",\n \"Sun Glimmer\",\n \"Sunny Daze\",\n \"Sweet Pop\",\n \"Sweet Tart\",\n \"Tornado Bolt\",\n \"Treasure\",\n \"Teacher's PetTruffle Shuffle\",\n \"Cosmic\",\n \"Crafty Crate\",\n \"Crescent Moon\",\n \"Derpy\",\n \"Fluttershy\",\n \"Helia\",\n \"Jack Hammer\",\n \"Lyra Heartstrings\",\n \"Minuette\",\n \"Rainbow Dash\",\n \"Rainbow Swoop\",\n \"Rarity\",\n \"Raven\",\n \"Sassaflash\",\n \"Sprinkle Medley\",\n \"Star Hunter\",\n \"Starburst\",\n \"Steam Roller\",\n \"Stellar Eclipse\",\n \"Sunburst\",\n \"Swan Song\",\n \"The Tenth Doctor / Doctor Whooves #3\",\n \"Thunderlane\",\n \"Twilight Sparkle\",\n \"White Lightning\",\n \"Berryshine\",\n \"Caramel\",\n \"Dr. Hooves\",\n \"Emerald Green\",\n \"Globe Trotter\",\n \"Golden Harvest\",\n \"Luckette\",\n \"Lucky Clover\",\n \"Meadow Song\",\n \"Noteworthy\",\n \"Peachy Sweet\",\n \"Perfect Pace\",\n \"Rivet\",\n \"Daring Do CollectorTeddie Safari\",\n \"Twilight Sky\",\n \"Twilight Sparkle\",\n \"Wisp\",\n \"Discord\",\n \"Goldengrape\",\n \"Lucy Packard\",\n \"Noteworthy\",\n \"Perfect Pace\",\n \"Red Gala\",\n \"Savoir Fare\",\n \"Shooting Star\",\n \"Sir Pony Moore\",\n \"Snails\",\n \"Applejack\",\n \"2Aura\",\n \"Berry Pinch\",\n \"Big McIntosh\",\n \"Cheerilee\",\n \"Cheese Sandwich\",\n \"Cloudchaser\",\n \"Cotton Cloudy\",\n \"Derpy\",\n \"Dumb-Bell\",\n \"Flitter\",\n \"Fluttershy\",\n \"Granny Smith\",\n \"Hoops\",\n \"Liberty Belle\",\n \"Liza Doolots\",\n \"Luckette\",\n \"Lyra Heartstrings\",\n \"Pinkie's SisterMaud Pie\",\n \"Minuette\",\n \"Pinkie Pie\",\n \"Piña Colada\",\n \"3Princess Erroria\",\n \"Rainbow Dash\",\n \"Rainy Feather\",\n \"Rarity\",\n \"Red June\",\n \"Scootaloo\",\n \"Strawberry Ice\",\n \"Sweetie Belle\",\n \"Sweetie Drops\",\n \"Tornado Bolt\",\n \"Twilight Sparkle\",\n \"S04E06 Unnamed Earth Mare #2\",\n \"S04E06 Unnamed Earth Mare #3\",\n \"S01E11 Unnamed Earth Mare #3\",\n \"S01E11 Unnamed Earth Mare #4\",\n \"S01E11 Unnamed Earth Mare #5\",\n \"S01E14 Unnamed Earth Mare #1\",\n \"S01E14 Unnamed Earth Mare #2\",\n \"S01E14 Unnamed Earth Mare #3\",\n \"S01E14 Unnamed Earth Mare #4\",\n \"S01E14 Unnamed Earth Mare #11\",\n \"S01E26 Unnamed Earth Stallion #1\",\n \"S01E26 Unnamed Earth Stallion #2\",\n \"S01E26 Unnamed Earth Stallion #3\",\n \"S02E17 Unnamed Earth Mare #1\",\n \"S02E26 Unnamed Earth Mare #2\",\n \"S03E02 Unnamed Earth Stallion #1\",\n \"S04E04 Unnamed Earth Stallion #1\",\n \"S04E04 Unnamed Earth Stallion #2\",\n \"S04E04 Unnamed Earth Stallion #3\",\n \"S04E04 Unnamed Earth Stallion #4\",\n \"S04E04 Unnamed Earth Stallion #5\",\n \"S04E04 Unnamed Earth Stallion #6\",\n \"S04E04 Unnamed Earth Stallion #7\",\n \"S04E04 Unnamed Earth Stallion #8\",\n \"S04E04 Unnamed Earth Stallion #12\",\n \"S04E04 Unnamed Earth Stallion #13\",\n \"S04E04 Unnamed Earth Stallion #15\",\n \"S04E06 Unnamed Earth Stallion #1\",\n \"S04E06 Unnamed Earth Stallion #2\",\n \"CabbieS04E06 Unnamed Earth Stallion #3\",\n \"S04E06 Unnamed Earth Stallion #4\",\n \"SoldierS04E06 Unnamed Earth Stallion #5\",\n \"OfficerS04E06 Unnamed Earth Stallion #6\",\n \"S04E06 Unnamed Earth Stallion #7\",\n \"S04E06 Unnamed Earth Stallion #8\",\n \"CaptainS04E06 Unnamed Earth Stallion #9\",\n \"S04E07 Unnamed Earth Mare #1\",\n \"S04E07 Unnamed Earth Mare #2\",\n \"S04E07 Unnamed Earth Mare #3\",\n \"S04E07 Unnamed Earth Stallion #1\",\n \"S04E07 Unnamed Earth Stallion #2\",\n \"S04E08 Unnamed Earth Mare #1\",\n \"S04E08 Unnamed Earth Mare #2\",\n \"S04E08 Unnamed Earth Mare #4\",\n \"S04E08 Unnamed Earth Mare #5\",\n \"S04E08 Unnamed Earth Mare #6\",\n \"S04E08 Unnamed Earth Mare #8\",\n \"S04E08 Unnamed Earth Mare #9\",\n \"S04E08 Unnamed Earth Mare #10\",\n \"S04E08 Unnamed Earth Mare #11\",\n \"S04E08 Unnamed Earth Mare #12\",\n \"S04E08 Unnamed Earth Mare #13\",\n \"S04E08 Unnamed Earth Mare #14\",\n \"S04E08 Unnamed Earth Mare #15\",\n \"S04E08 Unnamed Earth Mare #16\",\n \"S04E08 Unnamed Earth Mare #17\",\n \"S04E08 Unnamed Earth Mare #18\",\n \"S04E08 Unnamed Earth Stallion #1\",\n \"S04E08 Unnamed Earth Stallion #2\",\n \"S04E08 Unnamed Earth Stallion #3\",\n \"S04E08 Unnamed Earth Stallion #6\",\n \"BellhopS04E08 Unnamed Earth Stallion #7\",\n \"S04E08 Unnamed Earth Stallion #8\",\n \"S04E08 Unnamed Earth Stallion #9\",\n \"S04E08 Unnamed Earth Stallion #10\",\n \"S04E08 Unnamed Earth Stallion #12\",\n \"S04E08 Unnamed Earth Stallion #13\",\n \"S04E10 Unnamed Earth Stallion #1\",\n \"S04E11 Unnamed Earth Mare #1\",\n \"S04E11 Unnamed Earth Stallion #1\",\n \"S04E12 Unnamed Earth Mare #1\",\n \"S04E12 Unnamed Earth Mare #3\",\n \"S04E12 Unnamed Earth Mare #4\",\n \"S04E12 Unnamed Earth Stallion #1\",\n \"S04E12 Unnamed Earth Stallion #2\",\n \"S04E12 Unnamed Earth Stallion #3\",\n \"S04E12 Unnamed Earth Stallion #4\",\n \"S04E12 Unnamed Earth Stallion #5\",\n \"S04E12 Unnamed Earth Stallion #6\",\n \"S04E13 Unnamed Earth Stallion #1\",\n \"Astro PonyS04E13 Unnamed Earth Stallion #2\",\n \"S04E13 Unnamed Earth Stallion #3\",\n \"S04E14 Unnamed Earth Mare #1\",\n \"S04E14 Unnamed Earth Mare #2\",\n \"S04E14 Unnamed Earth Mare #3\",\n \"S04E14 Unnamed Earth Mare #4\",\n \"S04E14 Unnamed Earth Mare #5\",\n \"S04E14 Unnamed Earth Mare #6\",\n \"S04E16 Unnamed Earth Stallion #1\",\n \"S04E16 Unnamed Earth Stallion #2\",\n \"S04E16 Unnamed Earth Stallion #3\",\n \"S04E17 Unnamed Earth Mare #1\",\n \"S04E17 Unnamed Earth Mare #2\",\n \"S04E17 Unnamed Earth Mare #3\",\n \"S04E17 Unnamed Earth Stallion #1\",\n \"S04E17 Unnamed Earth Stallion #2\",\n \"S04E17 Unnamed Earth Stallion #3\",\n \"S04E17 Unnamed Earth Stallion #4\",\n \"S04E17 Unnamed Earth Stallion #5\",\n \"S04E19 Unnamed Earth Mare #1\",\n \"S04E19 Unnamed Earth Mare #2\",\n \"S04E19 Unnamed Earth Mare #3\",\n \"S04E19 Unnamed Earth Mare #4\",\n \"S04E19 Unnamed Earth Stallion #1\",\n \"S04E20 Unnamed Earth Mare #1\",\n \"S04E20 Unnamed Earth Mare #2\",\n \"S04E20 Unnamed Earth Mare #3\",\n \"S04E20 Unnamed Earth Mare #4\",\n \"S04E20 Unnamed Earth Mare #5\",\n \"S04E20 Unnamed Earth Stallion #1\",\n \"S04E20 Unnamed Earth Stallion #3\",\n \"S04E20 Unnamed Earth Stallion #4\",\n \"S04E20 Unnamed Earth Stallion #5\",\n \"S04E20 Unnamed Earth Stallion #6\",\n \"S04E20 Unnamed Earth Stallion #7\",\n \"S04E20 Unnamed Earth Stallion #8\",\n \"S04E20 Unnamed Earth Stallion #9\",\n \"S04E20 Unnamed Earth Stallion #10\",\n \"S04E20 Unnamed Earth Stallion #11\",\n \"S04E20 Unnamed Earth Stallion #12\",\n \"S04E20 Unnamed Earth Stallion #13\",\n \"S04E22 Unnamed Earth Mare #1\",\n \"S04E22 Unnamed Earth Mare #2\",\n \"S04E22 Unnamed Earth Mare #3\",\n \"S04E22 Unnamed Earth Mare #4\",\n \"S04E22 Unnamed Earth Mare #5\",\n \"S04E22 Unnamed Earth Mare #6\",\n \"S04E22 Unnamed Earth Mare #7\",\n \"S04E22 Unnamed Earth Mare #8\",\n \"S04E22 Unnamed Earth Mare #9\",\n \"S04E22 Unnamed Earth Mare #10\",\n \"S04E22 Unnamed Earth Mare #11\",\n \"S04E22 Unnamed Earth Mare #12\",\n \"S04E22 Unnamed Earth Mare #14\",\n \"S04E22 Unnamed Earth Mare #15\",\n \"S04E22 Unnamed Earth Mare #16\",\n \"S04E22 Unnamed Earth Mare #17\",\n \"S04E22 Unnamed Earth Mare #18\",\n \"S04E22 Unnamed Earth Mare #19\",\n \"S04E22 Unnamed Earth Mare #20\",\n \"S04E22 Unnamed Earth Mare #21\",\n \"S04E22 Unnamed Earth Mare #22\",\n \"S04E22 Unnamed Earth Mare #23\",\n \"S04E22 Unnamed Earth Stallion #1\",\n \"S04E22 Unnamed Earth Stallion #2\",\n \"S04E22 Unnamed Earth Stallion #5\",\n \"S04E22 Unnamed Earth Stallion #6\",\n \"S04E22 Unnamed Earth Stallion #7\",\n \"S04E22 Unnamed Earth Stallion #8\",\n \"S04E22 Unnamed Earth Stallion #9\",\n \"S04E22 Unnamed Earth Stallion #10\",\n \"S04E22 Unnamed Earth Stallion #11\",\n \"S04E22 Unnamed Earth Stallion #12\",\n \"S04E22 Unnamed Earth Stallion #13\",\n \"S04E24 Unnamed Earth Mare #2\",\n \"S04E24 Unnamed Earth Mare #3\",\n \"S04E24 Unnamed Earth Mare #4\",\n \"S04E24 Unnamed Earth Mare #5\",\n \"S04E24 Unnamed Earth Mare #6\",\n \"S04E24 Unnamed Earth Mare #7\",\n \"S04E24 Unnamed Earth Stallion #4\",\n \"S04E24 Unnamed Earth Stallion #6\",\n \"S04E24 Unnamed Earth Stallion #7\",\n \"S04E24 Unnamed Earth Stallion #8\",\n \"S04E26 Unnamed Earth Mare #1\",\n \"S04E26 Unnamed Earth Stallion #1\",\n \"EG2 Unnamed Earth Stallion\",\n \"S01E04 Unnamed Pegasus Mare #1\",\n \"S01E04 Unnamed Pegasus Mare #2\",\n \"S01E06 Unnamed Pegasus Mare #2\",\n \"S01E11 Unnamed Pegasus Mare #3\",\n \"S01E11 Unnamed Pegasus Mare #5\",\n \"S01E11 Unnamed Pegasus Mare #7\",\n \"S01E14 Unnamed Pegasus Mare #1\",\n \"S01E14 Unnamed Pegasus Mare #2\",\n \"S01E16 Unnamed Pegasus Stallion #1\",\n \"S01E20 Unnamed Pegasus Mare #1\",\n \"S01E20 Unnamed Pegasus Mare #2\",\n \"S01E20 Unnamed Pegasus Mare #3\",\n \"S02E11 Unnamed Pegasus Mare #1\",\n \"S02E11 Unnamed Pegasus Mare #2\",\n \"S02E18 Unnamed Pegasus Mare #3\",\n \"S02E22 Unnamed Pegasus Mare #7\",\n \"S02E22 Unnamed Pegasus Mare #8\",\n \"S04E08 Unnamed Pegasus Mare #1\",\n \"S04E08 Unnamed Pegasus Mare #2\",\n \"S04E08 Unnamed Pegasus Stallion #1\",\n \"S04E10 Unnamed Pegasus Mare #1\",\n \"S04E10 Unnamed Pegasus Mare #2\",\n \"S04E10 Unnamed Pegasus Mare #3\",\n \"S04E10 Unnamed Pegasus Mare #4\",\n \"S04E10 Unnamed Pegasus Mare #5\",\n \"S04E10 Unnamed Pegasus Mare #7\",\n \"S04E10 Unnamed Pegasus Stallion #1\",\n \"S04E10 Unnamed Pegasus Stallion #3\",\n \"S04E10 Unnamed Pegasus Stallion #4\",\n \"S04E13 Unnamed Pegasus Mare #1\",\n \"S04E14 Unnamed Pegasus Mare #1\",\n \"S04E20 Unnamed Pegasus Mare #1\",\n \"S04E20 Unnamed Pegasus Mare #3\",\n \"S04E21 Unnamed Pegasus Stallion #1\",\n \"S04E21 Unnamed Pegasus Stallion #2\",\n \"S04E24 Unnamed Pegasus Mare #1\",\n \"S04E24 Unnamed Pegasus Mare #2\",\n \"S04E24 Unnamed Pegasus Mare #3\",\n \"S04E26 Unnamed Pegasus Mare #1\",\n \"S01E14 Unnamed Unicorn Mare #1\",\n \"S01E14 Unnamed Unicorn Mare #2\",\n \"S01E14 Unnamed Unicorn Mare #3\",\n \"S01E14 Unnamed Unicorn Mare #4\",\n \"S01E14 Unnamed Unicorn Mare #5\",\n \"S01E14 Unnamed Unicorn Mare #6\",\n \"S02E05 Unnamed Unicorn Mare #1\",\n \"S02E11 Unnamed Unicorn Mare #1\",\n \"S02E11 Unnamed Unicorn Mare #2\",\n \"S02E11 Unnamed Unicorn Mare #4\",\n \"S02E26 Unnamed Unicorn Mare #1\",\n \"S02E26 Unnamed Unicorn Mare #2\",\n \"S02E26 Unnamed Unicorn Mare #3\",\n \"S02E26 Unnamed Unicorn Mare #4\",\n \"S02E26 Unnamed Unicorn Mare #5\",\n \"S02E26 Unnamed Unicorn Mare #6\",\n \"S02E26 Unnamed Unicorn Mare #7\",\n \"S02E26 Unnamed Unicorn Mare #8\",\n \"S02E26 Unnamed Unicorn Mare #9\",\n \"S02E26 Unnamed Unicorn Mare #10\",\n \"S02E26 Unnamed Unicorn Mare #11\",\n \"S02E26 Unnamed Unicorn Mare #12\",\n \"S02E26 Unnamed Unicorn Stallion #1\",\n \"S02E26 Unnamed Unicorn Stallion #2\",\n \"S02E26 Unnamed Unicorn Stallion #4\",\n \"S04E01 Unnamed Unicorn Mare #1\",\n \"S04E08 Unnamed Unicorn Mare #1\",\n \"S04E08 Unnamed Unicorn Stallion #1\",\n \"S04E10 Unnamed Unicorn Stallion #1\",\n \"S04E12 Unnamed Unicorn Mare #1\",\n \"S04E12 Unnamed Unicorn Mare #2\",\n \"S04E12 Unnamed Unicorn Mare #4\",\n \"S04E12 Unnamed Unicorn Stallion #1\",\n \"S04E20 Unnamed Unicorn Stallion #1\",\n \"S04E22 Unnamed Unicorn Mare #1\",\n \"S04E22 Unnamed Unicorn Mare #3\",\n \"S04E22 Unnamed Unicorn Mare #4\",\n \"S04E22 Unnamed Unicorn Stallion #2\",\n \"S04E22 Unnamed Unicorn Stallion #3\",\n \"S04E24 Unnamed Unicorn Mare #2\",\n \"S04E24 Unnamed Unicorn Mare #3\",\n \"S04E24 Unnamed Unicorn Mare #4\",\n \"S04E24 Unnamed Unicorn Mare #5\",\n \"S04E24 Unnamed Unicorn Mare #6\",\n \"S04E24 Unnamed Unicorn Mare #7\",\n \"S04E24 Unnamed Unicorn Stallion #1\",\n \"S04E26 Unnamed Unicorn Mare #1\",\n \"S04E26 Unnamed Unicorn Mare #2\",\n \"S02E23 Unnamed Earth Filly #3\",\n \"Little PonyS03E11 Unnamed Earth Filly #1\",\n \"S04E08 Unnamed Earth Filly #1\",\n \"S04E12 Unnamed Unicorn Filly #1\",\n \"S04E12 Unnamed Earth Colt #1\",\n \"S04E15 Unnamed Earth Filly #1\",\n \"S04E15 Unnamed Earth Filly #2\",\n \"S04E15 Unnamed Earth Filly #3\",\n \"S04E15 Unnamed Unicorn Filly #1\",\n \"S04E15 Unnamed Earth Filly #4\",\n \"S04E15 Unnamed Earth Colt #1\",\n \"S04E15 Unnamed Earth Colt #2\",\n \"S04E15 Unnamed Earth Colt #4\",\n \"S04E15 Unnamed Earth Colt #5\",\n \"S04E15 Unnamed Earth Colt #6\",\n \"S04E19 Unnamed Earth Filly #2\",\n \"S04E19 Unnamed Pegasus Filly #1\",\n \"S04E19 Unnamed Unicorn Filly #1\",\n \"S04E19 Unnamed Earth Colt #1\",\n \"S04E19 Unnamed Earth Colt #2\",\n \"S04E19 Unnamed Earth Colt #3\",\n \"S04E19 Unnamed Earth Colt #4\",\n \"S04E22 Unnamed Earth Filly #1\",\n \"S04E22 Unnamed Pegasus Filly #1\",\n \"Comic Geek PonyS04E22 Unnamed Earth Colt #1\",\n \"S04E23 Unnamed Earth Colt #1\",\n \"Prince Blue Dream\",\n \"Chancellor Puddinghead\",\n \"Fili-Second\",\n \"Mistress Mare-velous\",\n \"Smart Cookie\",\n \"Commander Hurricane\",\n \"General Blazing Donut Glaze\",\n \"Private Pansy\",\n \"Saddle Rager\",\n \"Zapp\",\n \"Princess Golden Dream\",\n \"Clover the Clever\",\n \"King Bullion\",\n \"Masked Matter-Horn\",\n \"Moondancer\",\n \"Princess Platinum\",\n \"Radiance\",\n \"Star Swirl the Bearded\",\n \"Apple Brioche\",\n \"Apple Cinnamon Crisp\",\n \"Apple Tart\",\n \"Uncle Apple Tart\",\n \"Babs Seed's big sis\",\n \"Baked Apples\",\n \"Bumpkin\",\n \"Calamity Mane\",\n \"Colonel Waffle\",\n \"Crystal Queen\",\n \"Ernie\",\n \"Flourish Prose\",\n \"Granny Pie\",\n \"Mr. Kingpin\",\n \"Namby-Pamby\",\n \"Nana Pinkie\",\n \"Aunt Pine Apple\",\n \"Quilland Ink\",\n \"Thornhoof\",\n \"Wild Bull Hickok\",\n \"S02E09 Unnamed Announcer\",\n \"S02E22 Unnamed Announcer\",\n \"S02E24 Unnamed Earth Stallion #0\",\n \"S04E08 Rarity's costume designer friend\",\n \"Rapidfire\",\n \"General Firefly\",\n \"Colonel Purple Dart\",\n \"Commander Easyglider\",\n \"Admiral Fairweather\",\n \"Admiral Fairy Flight\",\n \"General Flash\",\n \"Flaire De Mare\"]\n","sub_path":"ponies.py","file_name":"ponies.py","file_ext":"py","file_size_in_byte":34455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"248482697","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 20 12:00:36 2020\r\n\r\n@author: Defender\r\n\"\"\"\r\n\r\nimport pandas as pd\r\n\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nfrom sklearn.cluster import KMeans\r\nimport matplotlib.pyplot as plt\r\nfrom IPython import get_ipython\r\nfrom sklearn.decomposition import PCA \r\nfrom sklearn.preprocessing import StandardScaler, normalize\r\nimport scipy.cluster.hierarchy as shc\r\n\r\n\r\ntry:\r\n df_wine=pd.read_csv('Dataset.csv',encoding='latin-1')\r\n df_wine.head()\r\n \r\n df_wine.describe()\r\n \r\n ##K MEANS CLUSTERING\r\n mms = MinMaxScaler()\r\n mms.fit(df_wine)\r\n data_transformed = mms.transform(df_wine)\r\n\r\n Sum_of_squared_distances = []\r\n k = list(range(1,15))\r\n\r\n\r\n for i in k:\r\n\r\n km = KMeans(n_clusters=i)\r\n km = km.fit(data_transformed)\r\n Sum_of_squared_distances.append(km.inertia_)\r\n\r\n get_ipython().run_line_magic('matplotlib', 'qt')\r\n plt.figure(1)\r\n plt.plot(k, Sum_of_squared_distances, 'bx-')\r\n plt.xlabel('k')\r\n plt.ylabel('Sum_of_squared_distances')\r\n plt.title('Elbow Method For Optimal k')\r\n \r\n \r\n\r\n kmeans = KMeans(n_clusters=3, init='k-means++', max_iter=300, n_init=10, random_state=0)\r\n pred_y = kmeans.fit_predict(data_transformed)\r\n plt.figure(2)\r\n plt.scatter(data_transformed[:,0], data_transformed[:,1])\r\n plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='red')\r\n plt.show()\r\n \r\n \r\n ## HEIRARCHICAL CLUSTERING\r\n # Handling the missing values \r\n df_wine.fillna(method ='ffill', inplace = True) \r\n \r\n #Preprocessing the data\r\n\r\n \r\n # Scaling the data so that all the features become comparable \r\n scaler = StandardScaler() \r\n df_wine_scaled = scaler.fit_transform(df_wine) \r\n \r\n # Normalizing the data so that the data approximately \r\n # follows a Gaussian distribution \r\n df_wine_normalized = normalize(df_wine_scaled) \r\n \r\n # Converting the numpy array into a pandas DataFrame \r\n df_wine_normalized = pd.DataFrame(df_wine_normalized) \r\n \r\n #Reducing the dimensionality of the Data\r\n\r\n \r\n pca = PCA(n_components = 14) \r\n df_wine_principal = pca.fit_transform(df_wine_normalized) \r\n df_wine_principal = pd.DataFrame(df_wine_principal) \r\n df_wine_principal.columns = ['Type','Alcohol','Malic','Ash','Alcalinity','Magnesium','Phenols','Flavanoids','Nonflavanoids','Proanthocyanins','Color','Hue','Dilution','Proline'] \r\n\r\n \r\n plt.figure(figsize =(8, 8)) \r\n plt.title('Dendogram for wine') \r\n Dendrogram = shc.dendrogram((shc.linkage(df_wine_principal, method ='ward'))) \r\n \r\n \r\n \r\nexcept Exception as exp:\r\n print (exp)\r\n","sub_path":"PCA/python code.py","file_name":"python code.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"591335024","text":"# coding:utf8\n__author__ = 'bluesli'\n\n# import sys\n# print(sys.argv)\n\n# 两种的创建方式是等价的\n# list = [1,2,3,4,5,6,7]\n# ll = list(1,2,4,5,6)\n\n# t1 = (1,2,3,4,{'ll':'ll'})\n# # t1[4]['11'] = 'aa'\n# print(t1[4]['ll'])\n\n# dic = {'kk':'kk','yse':'wes','jj':'sjd'}\n# for k,v in dic.items:\n# print(k,v)\n\n# import time\n# a = time.time()\n# age =10\n# print(type(age))\n\n# age = 1\n# print(age.bit_length())#返回二进制数占用的位数;\n\n# all_item = 95\n# pager = 10\n# result = all_item.divmod\n#\n# age = 10\n# age.__eq__(11) #判断是否相等\n\n#\n# age = 5\n# result = age.__floordiv__(6) #其实是与5//6相同\n\n# age = 10\n# print(age.__ge__(12))\n# print(age.__radd__(1))\n# print(age.rdivmod)\n\n# name = 'bluesli'\n# print(type(name))\n# print(dir(name)) #dir是获取的类中的所有成员;verse是获取成员以及值\n\n# name = str('blusli') #str 类的__init__方法\n# result = name.__contains__('blu')\n# print(result)\n\n# name.__format__('blusli')\n# print(name)\n\n# name = str('bluesli')\n# result1 = name.capitalize()\n# result2 = name.casefold()\n# result3 = name.count('l',0,7)\n# result4 = name.center(20,'*')\n# result5 = name.encode('gbk')\n# result6 =name.startswith('b')\n# result7 = name.endswith('i')\n#\n# # for i in range(1,8):\n# # result=''\n# # result = result+str(i)\n# # print(result)\n#\n# print(result5)\n\n#format 其实是字符串的拼接和替换;\n# name= \"alex {name} as {id}\"\n# result = name.format(name='id',id='id')\n# print(result)\n# li = ['s','b','b','l','u','e','s','l','i']\n# for i in li:\n# ''.join(i)\n\n\n#列表:\n# li = list((1,2,3))\n# li = list(1,2,3)\n\n\n\n# 字典\n# dic = {'k1':'v1'}\n# dic = dict(k1='v1',k2='v2')\n# # ret = dic.fromkeys(['k1','k2'],'v1')\n# print(dic['k1'])\n# print(dic['k2'])\n# print(dic.get('k1'))\n# print(dic.get('k2'))\n# print(dic.get('k3','bluesli')) #自由在key不存在时才能等于bluesli\n# print(dic.keys())\n# print(dic.values())\n# print(dic.items()) #可以取得key value的形式使用for循环获取;\n\n\n#作业 区别列表中大于66和小于 简单的dict操作\n\n\nall_list = [11,22,33,44,55,66,77,88,99]\nl1 = []\nl2 = []\ndic = {}\nfor i in all_list:\n if i>66:\n if 'k1' in dic.keys():\n dic['k1'].append(i)\n else:\n dic['k1'] = [i,]\n else:\n if 'k2' in dic.keys():\n dic['k2'].append(i)\n else:\n dic['k2'] = [i,]\nprint(dic)\n\n","sub_path":"day2/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"496954745","text":"__author__ = 'tho'\n\nimport platform\nimport os\nimport subprocess\nimport sys\nimport time\nimport re\n\nfrom config import conf, gnuplot_cmd\n\nGNUPLOT_COMMON = 'set terminal png transparent size 640,240\\nset size 1.0,1.0\\n'\nON_LINUX = (platform.system() == 'Linux')\nWEEKDAYS = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')\nVERSION = 0\nexectime_internal = 0.0\nexectime_external = 0.0\n\ndef html_linkify(text):\n return text.lower().replace(' ', '_')\n\ndef html_header(level, text):\n name = html_linkify(text)\n return '\\n<h%d><a href=\"#%s\" name=\"%s\">%s</a></h%d>\\n\\n' % (level, name, name, text, level)\n\ndef getkeyssortedbyvalues(dict):\n return map(lambda el : el[1], sorted(map(lambda el : (el[1], el[0]), dict.items())))\n\ndef getpipeoutput(cmds, quiet = True):\n global exectime_external\n start = time.time()\n if not quiet and ON_LINUX and os.isatty(1):\n print('>> ' + ' | '.join(cmds))\n sys.stdout.flush()\n p0 = subprocess.Popen(cmds[0], stdout = subprocess.PIPE, shell = True)\n p = p0\n for x in cmds[1:]:\n p = subprocess.Popen(x, stdin = p0.stdout, stdout = subprocess.PIPE, shell = True)\n p0 = p\n output = p.communicate()[0]\n end = time.time()\n if not quiet:\n if ON_LINUX and os.isatty(1):\n print('\\r')\n print('[%.5f] >> %s' % (end - start, ' | '.join(cmds)))\n exectime_external += (end - start)\n return output.rstrip('\\n')\n\ndef getversion():\n global VERSION\n if VERSION == 0:\n gitstats_repo = os.path.dirname(os.path.abspath(__file__))\n VERSION = getpipeoutput([\"git --git-dir=%s/.git --work-tree=%s rev-parse --short %s %s\" %\n (gitstats_repo, gitstats_repo, getcommitrange('HEAD').split('\\n')[0], get_commit_time())])\n return VERSION\n\ndef getgitversion():\n return getpipeoutput(['git --version']).split('\\n')[0]\n\ndef getgnuplotversion():\n return getpipeoutput(['%s --version' % gnuplot_cmd]).split('\\n')[0]\n\ndef getnumoffilesfromrev(time_rev):\n \"\"\"\n Get number of files changed in commit\n \"\"\"\n time, rev = time_rev\n return (int(time), rev, int(getpipeoutput(['git ls-tree -r --name-only \"%s\"' % rev, 'wc -l']).split('\\n')[0]))\n\ndef getnumoflinesinblob(ext_blob):\n \"\"\"\n Get number of lines in blob\n \"\"\"\n ext, blob_id = ext_blob\n return (ext, blob_id, int(getpipeoutput(['git cat-file blob %s' % blob_id, 'wc -l']).split()[0]))\n\n\ndef getcommitrange(defaultrange = 'HEAD', end_only = False):\n if len(conf['commit_end']) > 0:\n if end_only or len(conf['commit_begin']) == 0:\n return conf['commit_end']\n return '%s..%s' % (conf['commit_begin'], conf['commit_end'])\n return defaultrange\n\ndef get_commit_time():\n timerange = \"\"\n if len(conf['time_end']) > 0:\n timerange += ('--before=\"%s\"' % conf['time_end'])\n if len(conf['time_begin']) > 0:\n timerange += ('--since=\"%s\"' % conf['time_begin'])\n\n return timerange\n\n\n# dict['author'] = { 'commits': 512 } - ...key(dict, 'commits')\ndef getkeyssortedbyvaluekey(d, key):\n return map(lambda el : el[1], sorted(map(lambda el : (d[el][key], el), d.keys())))\n\ndef getstatsummarycounts(line):\n numbers = re.findall('\\d+', line)\n if len(numbers) == 1:\n # neither insertions nor deletions: may probably only happen for \"0 files changed\"\n numbers.append(0);\n numbers.append(0);\n elif len(numbers) == 2 and line.find('(+)') != -1:\n numbers.append(0); # only insertions were printed on line\n elif len(numbers) == 2 and line.find('(-)') != -1:\n numbers.insert(1, 0); # only deletions were printed on line\n return numbers\n","sub_path":"common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"82205850","text":"import cv2\r\nchoice = input('Enter 1 to use Webcam, anything else to read an image from a file')\r\nif choice == '1':\r\n print('Press Q to quit the image window')\r\n cap = cv2.VideoCapture(0)\r\n while(True):\r\n ret, frame = cap.read()\r\n cv2.imshow('image', frame)\r\n if cv2.waitKey(30) & 0xFF == ord('q'):\r\n break\r\n cap.release()\r\n cv2.destroyAllWindows()\r\nelse:\r\n path = input('Enter the path for the image from this folder')\r\n img = cv2.imread(path,0)\r\n cv2.imshow('images', img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()","sub_path":"ReadAnImage.py","file_name":"ReadAnImage.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"494306471","text":"from typing import Any, Dict, Optional, Union\n\nfrom botocore import UNSIGNED as UNSIGNED\nfrom botocore import waiter as waiter\nfrom botocore import xform_name as xform_name\nfrom botocore.args import ClientArgsCreator as ClientArgsCreator\nfrom botocore.auth import AUTH_TYPE_MAPS as AUTH_TYPE_MAPS\nfrom botocore.awsrequest import prepare_request_dict as prepare_request_dict\nfrom botocore.config import Config as Config\nfrom botocore.discovery import EndpointDiscoveryHandler as EndpointDiscoveryHandler\nfrom botocore.discovery import EndpointDiscoveryManager as EndpointDiscoveryManager\nfrom botocore.discovery import (\n block_endpoint_discovery_required_operations as block_endpoint_discovery_required_operations,\n)\nfrom botocore.exceptions import ClientError as ClientError\nfrom botocore.exceptions import DataNotFoundError as DataNotFoundError\nfrom botocore.exceptions import (\n InvalidEndpointDiscoveryConfigurationError as InvalidEndpointDiscoveryConfigurationError,\n)\nfrom botocore.exceptions import OperationNotPageableError as OperationNotPageableError\nfrom botocore.exceptions import (\n UnknownSignatureVersionError as UnknownSignatureVersionError,\n)\nfrom botocore.history import get_global_history_recorder as get_global_history_recorder\nfrom botocore.hooks import first_non_none_response as first_non_none_response\nfrom botocore.model import ServiceModel as ServiceModel\nfrom botocore.paginate import Paginator as Paginator\nfrom botocore.retries import adaptive as adaptive\nfrom botocore.retries import standard as standard\nfrom botocore.utils import CachedProperty as CachedProperty\nfrom botocore.utils import S3ArnParamHandler as S3ArnParamHandler\nfrom botocore.utils import S3ControlArnParamHandler as S3ControlArnParamHandler\nfrom botocore.utils import S3ControlEndpointSetter as S3ControlEndpointSetter\nfrom botocore.utils import S3EndpointSetter as S3EndpointSetter\nfrom botocore.utils import S3RegionRedirector as S3RegionRedirector\nfrom botocore.utils import ensure_boolean as ensure_boolean\nfrom botocore.utils import get_service_module_name as get_service_module_name\n\nclass ClientCreator:\n def __init__(\n self,\n loader: Any,\n endpoint_resolver: Any,\n user_agent: str,\n event_emitter: Any,\n retry_handler_factory: Any,\n retry_config_translator: Any,\n response_parser_factory: Optional[Any] = ...,\n exceptions_factory: Optional[Any] = ...,\n config_store: Optional[Any] = ...,\n ) -> None: ...\n def create_client(\n self,\n service_name: str,\n region_name: str,\n is_secure: bool = ...,\n endpoint_url: Optional[str] = ...,\n verify: Optional[Union[str, bool]] = ...,\n credentials: Optional[Any] = ...,\n scoped_config: Optional[Any] = ...,\n api_version: Optional[str] = ...,\n client_config: Optional[Config] = ...,\n ) -> None: ...\n def create_client_class(\n self, service_name: str, api_version: Optional[Any] = ...\n ) -> None: ...\n\nclass ClientEndpointBridge:\n DEFAULT_ENDPOINT: str = ...\n service_signing_name: Any = ...\n endpoint_resolver: Any = ...\n scoped_config: Any = ...\n client_config: Any = ...\n default_endpoint: Any = ...\n def __init__(\n self,\n endpoint_resolver: Any,\n scoped_config: Optional[Any] = ...,\n client_config: Optional[Any] = ...,\n default_endpoint: Optional[str] = ...,\n service_signing_name: Optional[Any] = ...,\n ) -> None: ...\n def resolve(\n self,\n service_name: Any,\n region_name: Optional[str] = ...,\n endpoint_url: Optional[str] = ...,\n is_secure: bool = ...,\n ) -> None: ...\n\nclass BaseClient:\n meta: ClientMeta\n def __init__(\n self,\n serializer: Any,\n endpoint: str,\n response_parser: Any,\n event_emitter: Any,\n request_signer: Any,\n service_model: Any,\n loader: Any,\n client_config: Config,\n partition: Any,\n exceptions_factory: Any,\n ) -> None: ...\n def __getattr__(self, item: str) -> Any: ...\n def get_paginator(self, operation_name: str) -> Paginator: ...\n def can_paginate(self, operation_name: str) -> bool: ...\n def get_waiter(self, waiter_name: str) -> waiter.Waiter: ...\n def waiter_names(self) -> None: ...\n @property\n def exceptions(self) -> Any: ...\n\nclass ClientMeta:\n events: Any = ...\n def __init__(\n self,\n events: Any,\n client_config: Config,\n endpoint_url: str,\n service_model: Any,\n method_to_api_mapping: Dict[str, str],\n partition: Any,\n ) -> None: ...\n @property\n def service_model(self) -> Any: ...\n @property\n def region_name(self) -> str: ...\n @property\n def endpoint_url(self) -> str: ...\n @property\n def config(self) -> Any: ...\n @property\n def method_to_api_mapping(self) -> Dict[str, str]: ...\n @property\n def partition(self) -> str: ...\n","sub_path":"typings/botocore/client.pyi","file_name":"client.pyi","file_ext":"pyi","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"250653581","text":"# save comment todo\n# only open one app todo\n\nfrom datetime import datetime\n\nimport fuckit\nfrom PyQt5.QtWidgets import QDialog, QApplication, QPushButton, QVBoxLayout\nfrom PyQt5.uic.properties import QtWidgets, QtCore\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport matplotlib.pyplot as plt\n\nimport os\nimport random\nimport shutil\nimport sys\nimport time\nimport threading\nimport errno\nimport queue\nimport requests\nimport webbrowser\nimport schedule\nimport csv\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5 import uic\nfrom PyQt5.QtCore import QSettings, QFileInfo\nfrom PyQt5.QtWidgets import qApp, QApplication, QMainWindow, QFormLayout, QLineEdit, QTabWidget, QWidget, QAction\nfrom tqdm import tqdm\n\nfrom ui import MainWindow\n# from credential import License_class\n\nsys.path.append(os.path.join(sys.path[0], '../'))\nfrom instabot import Bot\n\npath = os.path.expanduser(\"~/Testing/\")\npackage = 1 # 0=free 1=purchased todo\n\n\n# SAVE AND RESTORE LAST USER INPUT\n# FUNC ( restore, save, Mainwindow_class.setting, closeEvent)\ndef restore(settings):\n finfo = QtCore.QFileInfo(settings.fileName())\n if finfo.exists() and finfo.isFile():\n for w in QtWidgets.qApp.allWidgets():\n mo = w.metaObject()\n if w.objectName() and not w.objectName().startswith(\"qt_\"):\n settings.beginGroup(w.objectName())\n for i in range(mo.propertyCount(), mo.propertyOffset() - 1, -1):\n prop = mo.property(i)\n if prop.isWritable():\n name = prop.name()\n val = settings.value(name, w.property(name))\n if str(val).isdigit():\n val = int(val)\n w.setProperty(name, val)\n settings.endGroup()\n\n\ndef save(settings):\n for w in QtWidgets.qApp.allWidgets():\n mo = w.metaObject()\n if w.objectName() and not w.objectName().startswith(\"qt_\"):\n settings.beginGroup(w.objectName())\n for i in range(mo.propertyCount()):\n prop = mo.property(i)\n name = prop.name()\n if prop.isWritable():\n settings.setValue(name, w.property(name))\n settings.endGroup()\n\n\n# UI FORMAT\n# class MainWindow_class(QtWidgets.QMainWindow):\n# # RESTORE FILE LOCATION NAME\n#\n# def __init__(self):\n# QtCore.QCoreApplication.processEvents()\n# QtWidgets.QMainWindow.__init__(self)\n# uic.loadUi(\"ui/MainWindow.ui\", self)\n\n# PY FORMAT\nclass MainWindow_class(MainWindow.Ui_MainWindow, QtWidgets.QMainWindow):\n def __init__(self):\n super(MainWindow.Ui_MainWindow, self).__init__()\n self.setupUi(self)\n\n self.settings = QSettings(path + \"gui.ini\", QSettings.IniFormat)\n\n # restore(self.settings)\n\n # OFFICIAL\n self.pushButton_run.clicked.connect(self.login_instagram)\n self.pushButton_update_status.clicked.connect(self.csv_append)\n self.pushButton_addComment.clicked.connect(self.add_to_listWidget)\n self.pushButton_delComment.clicked.connect(self.delete_line_listWidget)\n self.pushButton_stop.clicked.connect(self.logout)\n\n self.comboBox_follow.currentIndexChanged.connect(self.update_label_follow)\n self.comboBox_like.currentIndexChanged.connect(self.update_label_like)\n self.comboBox_comment.currentIndexChanged.connect(self.update_label_comment)\n\n self.radioButton_slow.clicked.connect(self.rButton_slow)\n self.radioButton_standard.clicked.connect(self.rButton_standard)\n self.radioButton_fast.clicked.connect(self.rButton_fast)\n\n self.checkBox_private.clicked.connect(self.coming_soon)\n self.checkBox_no_profilePic.clicked.connect(self.coming_soon)\n self.checkBox_business.clicked.connect(self.coming_soon)\n self.checkBox_verified.clicked.connect(self.coming_soon)\n\n # TESTING\n self.button_testing.clicked.connect(self.click_testing)\n # self.pushButton.clicked.connect(self.save_following)\n # self.pushButton.clicked.connect(self.enable_tab)\n\n\n # SHOW OUTPUT IN QTextEdit\n stdout = OutputWrapper(self, True)\n stdout.outputWritten.connect(self.handleOutput)\n stderr = OutputWrapper(self, False)\n stderr.outputWritten.connect(self.handleOutput)\n\n # PASS UI OBJECT NAME TO WORKTHREAD CLASS\n self.workThread = workThread(groupBox_follow=self.groupBox_follow,\n comboBox_follow=self.comboBox_follow,\n lineEdit_follow=self.lineEdit_follow,\n\n spinBox_getfollowers=self.spinBox_getfollowers,\n spinBox_getfollowing=self.spinBox_getfollowing,\n\n groupBox_unfollow=self.groupBox_unfollow,\n radioButton_nonfollowers=self.radioButton_nonfollowers,\n radioButton_unfollowAll=self.radioButton_unfollowAll,\n radioButton_restoreFollowing=self.radioButton_restoreFollowing,\n\n groupBox_like=self.groupBox_like,\n lineEdit_like=self.lineEdit_like,\n comboBox_like=self.comboBox_like,\n spinBox_nlikes=self.spinBox_nlikes,\n\n groupBox_comment=self.groupBox_comment,\n comboBox_comment=self.comboBox_comment,\n lineEdit_comment=self.lineEdit_comment,\n listWidget=self.listWidget,\n\n groupBox_combo=self.groupBox_combo,\n spinBox_nlikes_combo=self.spinBox_nlikes_combo,\n comboBox_combo=self.comboBox_combo,\n lineEdit_combo=self.lineEdit_combo,\n\n return_base_path=self.return_base_path(),\n\n )\n\n self.Canvas = Canvas(groupBox_2=self.groupBox_2,\n csv_file_path=self.csv_file_path,\n username=self.username,\n )\n\n def closeEvent(self, event):\n save(self.settings)\n QtWidgets.QMainWindow.closeEvent(self, event)\n\n def create_path(self):\n base_path = self.return_base_path()\n if not os.path.exists(base_path):\n if not os.path.exists(path):\n os.mkdir(path)\n os.mkdir(base_path)\n else:\n os.mkdir(base_path)\n else:\n pass\n\n def return_base_path(self):\n # C:\\Users\\khair\\Testing\\vicode.co\\\n base_path = path + self.username() + \"/\"\n return base_path\n\n def username(self):\n username = str(self.lineEdit_username.text()).lower().strip()\n return username\n\n def csv_file_path(self):\n csv_file = str(self.return_base_path() + \"{}.csv\".format(self.username()))\n return csv_file\n\n def setting(self):\n global bot\n bot = Bot(\n base_path=self.return_base_path(),\n # proxy=None,\n max_likes_per_day=self.spinBox_like.value(),\n # max_unlikes_per_day=1000,\n max_follows_per_day=self.spinBox_follow.value(),\n max_unfollows_per_day=self.spinBox_unfollow.value(),\n max_comments_per_day=self.spinBox_comment.value(),\n # max_blocks_per_day=100,\n # max_unblocks_per_day=100,\n # max_likes_to_like=10000,\n # min_likes_to_like=2,\n # max_messages_per_day=300,\n filter_users=False,\n filter_private_users=False,\n filter_users_without_profile_photo=False,\n filter_previously_followed=True,\n filter_business_accounts=False,\n filter_verified_accounts=False,\n # max_followers_to_follow=2000,\n # min_followers_to_follow=10,\n # max_following_to_follow=2000,\n # min_following_to_follow=10,\n # max_followers_to_following_ratio=10,\n # max_following_to_followers_ratio=2,\n # min_media_count_to_follow=3,\n # max_following_to_block=2000,\n like_delay=40,\n # unlike_delay=10,\n follow_delay=60,\n unfollow_delay=60,\n comment_delay=120,\n # block_delay=30,\n # unblock_delay=30,\n message_delay=90,\n # stop_words=('shop', 'store', 'free'),\n # blacklist_hashtags=['#shop', '#store', '#free'],\n # blocked_actions_protection=True,\n # verbosity=True,\n # device=None)\n )\n\n QtCore.QCoreApplication.processEvents()\n def login_instagram(self):\n self.pushButton_run.setEnabled(False) # disable start button\n self.tabWidget.setTabEnabled(0, False) #disable tab home\n self.create_path()\n self.setting()\n\n password = str(self.lineEdit_password.text()).strip()\n\n if bot.login(username=self.username(), password=password) == 1:\n # ALL TASK START HERE AFTER LOGIN\n self.csv_check()\n self.ask_save_current_following()\n self.workThread.start()\n\n else:\n QtWidgets.QMessageBox.warning(self, \"Ooopps\", \"wrong username or password\"\n \"\\n press Stop button and re-enter\")\n\n def logout(self):\n try:\n self.workThread.terminate()\n self.tabWidget.setTabEnabled(0, True)\n self.pushButton_run.setEnabled(True)\n bot.logout()\n except:\n print(\"logout error\")\n\n\n # TESTING\n def click_testing(self):\n pass\n\n def enable_tab(self):\n self.tabWidget.setTabEnabled(1, True)\n\n def add_to_listWidget(self):\n self.listWidget.addItem(self.lineEdit_commentText.text())\n self.lineEdit_commentText.setText(\"\")\n self.lineEdit_commentText.setFocus()\n\n def delete_line_listWidget(self):\n self.listWidget.takeItem(self.listWidget.currentRow())\n\n def csv_check(self): # success create csv file\n if not os.path.exists(self.csv_file_path()):\n with open(self.csv_file_path(), \"w\") as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(['dateTime', 'followers'])\n else:\n self.csv_append()\n\n def csv_append(self):\n data = bot.save_user_stats(self.username())\n user_dateTime = str(data['date'])\n user_following = str(data['following'])\n user_followers = str(data['followers'])\n\n self.lineEdit_following.setText(user_following)\n self.lineEdit_followers.setText(user_followers)\n\n with open(self.csv_file_path(), \"a\") as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow([user_dateTime, user_followers])\n\n def coming_soon(self):\n QtWidgets.QMessageBox.information(self, \"info\", \"Coming Soon don't forget to purchase full package\")\n\n def update_label_follow(self):\n combobox = self.comboBox_follow.currentText()\n if combobox == \"hashtags\":\n self.label_follow.setText(\"of hashtag\")\n self.lineEdit_follow.setPlaceholderText(\"tag1,tag2,tag3\")\n\n else:\n self.label_follow.setText(\"of username\")\n self.lineEdit_follow.setPlaceholderText(\"username1,username2,username3\")\n\n def update_label_like(self):\n combobox = self.comboBox_like.currentText()\n if combobox == \"hashtags\":\n self.label_like.setText(\"of hashtag\")\n self.lineEdit_like.setPlaceholderText(\"tag1,tag2,tag3\")\n self.spinBox_nlikes.setValue(50)\n else:\n self.label_like.setText(\"of username\")\n self.lineEdit_like.setPlaceholderText(\"username1,username2,username3\")\n\n def update_label_comment(self):\n combobox = self.comboBox_comment.currentText()\n if combobox == \"hashtags\":\n self.label_comment.setText(\"of hashtag\")\n self.lineEdit_comment.setPlaceholderText(\"tag1,tag2,tag3\")\n if combobox == \"my timeline\":\n self.label_comment.setText(\"of username\")\n self.lineEdit_comment.setPlaceholderText(\"my username\")\n else:\n self.label_comment.setText(\"of username\")\n self.lineEdit_comment.setPlaceholderText(\"username1,username2,username3\")\n\n def rButton_slow(self):\n if package == 0:\n QtWidgets.QMessageBox.information(self, \"Info\", \"Grow your instagram fastly just\\n\"\n \"purchase full package to customize your setting\")\n else:\n self.spinBox_follow.setValue(50)\n self.spinBox_unfollow.setValue(30)\n self.spinBox_like.setValue(50)\n self.spinBox_comment.setValue(7)\n self.spinBox_getfollowers.setValue(100)\n self.spinBox_getfollowing.setValue(100)\n\n def rButton_standard(self):\n if package == 0:\n self.radioButton_slow.setChecked(True)\n QtWidgets.QMessageBox.information(self, \"Info\", \"To use this setting you need\\n\"\n \"to purchase full package\")\n\n else:\n self.spinBox_follow.setValue(500)\n self.spinBox_unfollow.setValue(500)\n self.spinBox_like.setValue(750)\n self.spinBox_comment.setValue(50)\n self.spinBox_getfollowers.setValue(1000)\n self.spinBox_getfollowing.setValue(1000)\n\n def rButton_fast(self):\n if package == 0:\n self.radioButton_slow.setChecked(True)\n QtWidgets.QMessageBox.information(self, \"Info\", \"To use this setting you need\\n\"\n \"to purchase full package\")\n else:\n self.spinBox_follow.setValue(1000)\n self.spinBox_unfollow.setValue(1000)\n self.spinBox_like.setValue(1500)\n self.spinBox_comment.setValue(100)\n self.spinBox_getfollowers.setValue(10000)\n self.spinBox_getfollowing.setValue(10000)\n\n self.spinBox_follow.setReadOnly(False)\n self.spinBox_unfollow.setReadOnly(False)\n self.spinBox_like.setReadOnly(False)\n self.spinBox_comment.setReadOnly(False)\n self.spinBox_getfollowers.setReadOnly(False)\n self.spinBox_getfollowing.setReadOnly(False)\n\n def handleOutput(self, text, stdout):\n self.textEdit.moveCursor(QtGui.QTextCursor.End)\n self.textEdit.insertPlainText(text)\n\n def save_following(self):\n friends = bot.following\n with open(self.return_base_path() + \"friends.txt\", \"w\") as file: # writing to the file\n for user_id in friends:\n file.write(str(user_id) + \"\\n\")\n\n def ask_save_current_following(self):\n reply = QtWidgets.QMessageBox.information(self, 'Dear user,', \"Do you want to save your current following?\",\n QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,\n QtWidgets.QMessageBox.Yes)\n if reply == QtWidgets.QMessageBox.Yes:\n # IF \"YES\" DOWNLOAD AND EXECUTE FILE WITH PROGRESSBAR/ IMPORT PROGRESS.PY\n self.save_following()\n else:\n # IF \"NO\" CLOSE MESSAGEBOX\n pass\n\n # def open_license(self):\n # self.license = License_class()\n # self.license.show()\n\n # TAB DASHBOARD\n # todo\n def update_task_status(self):\n likes = str(bot.total['likes'])\n # follow = str(bot.total['follows'])\n # unfollow = str(bot.total['unfollows'])\n # comment = str(bot.total['comments'])\n\n # self.lineEdit_total_follow.setText(follow)\n # self.lineEdit_total_unfollow.setText(unfollow)\n # self.lineEdit_total_likes.setText(likes)\n # self.lineEdit_total_comment.setText(comment)\n\n\nclass Canvas(FigureCanvas):\n # 1) call function in mainwindowclass.csv_file_path to find csv file\n # 2) draw graph based on csv file\n\n def __init__(self, groupBox_2, csv_file_path, username, parent=None):\n self.figure = plt.figure()\n FigureCanvas.__init__(self, self.figure)\n\n self.groupBox_2 = groupBox_2\n self.csv_file_path = csv_file_path\n self.username = username\n\n # a figure instance to plot on\n self.figure = plt.figure()\n\n # this is the Canvas Widget that displays the `figure`\n # it takes the `figure` instance as a parameter to __init__\n self.canvas = FigureCanvas(self.figure)\n\n # this is the Navigation widget\n # it takes the Canvas widget and a parent\n self.toolbar = NavigationToolbar(self.canvas, self)\n\n # Just some button connected to `plot` method\n self.button = QPushButton('Plot')\n self.button.clicked.connect(self.plotgraph)\n\n # TESTING\n # self.button.clicked.connect(self.testing)\n\n # set the layout\n layout = QVBoxLayout()\n layout.addWidget(self.toolbar)\n layout.addWidget(self.canvas)\n layout.addWidget(self.button)\n self.groupBox_2.setLayout(layout) # put the layout in groupbox2\n\n def plot(self):\n import pandas as pd\n path = self.csv_file_path()\n\n # read data from file\n data = pd.read_csv(path)\n\n # instead of ax.hold(False)\n self.figure.clear()\n\n # create an axis\n ax = self.figure.add_subplot(111)\n\n # plot data\n ax.plot(data.dateTime, data.followers, '*-')\n plt.title(\"followers of @\" + self.username())\n plt.xlabel(\"date and time\")\n plt.ylabel(\"followers growth\")\n\n # _ = plt.xticks(rotation=45)\n\n # refresh canvas\n self.canvas.draw()\n\n def plotgraph(self):\n try:\n self.plot()\n except:\n print(\"insert username to find csv path\")\n\n def testing(self):\n print(\"name of @\" + self.username())\n\n\n# MAKE THREAD SO THAT UI DIDNT FREEZE\nclass workThread(QtCore.QThread):\n my_signal = QtCore.pyqtSignal()\n\n def __init__(self,\n groupBox_follow,\n comboBox_follow,\n lineEdit_follow,\n spinBox_getfollowers,\n spinBox_getfollowing,\n\n groupBox_unfollow,\n radioButton_nonfollowers,\n radioButton_unfollowAll,\n radioButton_restoreFollowing,\n\n groupBox_like,\n lineEdit_like,\n comboBox_like,\n spinBox_nlikes,\n\n groupBox_comment,\n comboBox_comment,\n lineEdit_comment,\n listWidget,\n\n groupBox_combo,\n spinBox_nlikes_combo,\n comboBox_combo,\n lineEdit_combo,\n\n return_base_path,\n parent=None):\n\n super(workThread, self).__init__(parent)\n # IMPORT UI OBJECT NAME FROM MAINWINDOW CLASS\n self.groupBox_follow = groupBox_follow\n self.comboBox_follow = comboBox_follow\n self.lineEdit_follow = lineEdit_follow\n self.spinBox_getfollowers = spinBox_getfollowers\n self.spinBox_getfollowing = spinBox_getfollowing\n\n self.groupBox_unfollow = groupBox_unfollow\n self.radioButton_nonfollowers = radioButton_nonfollowers\n self.radioButton_unfollowAll = radioButton_unfollowAll\n self.radioButton_restoreFollowing = radioButton_restoreFollowing\n\n self.groupBox_like = groupBox_like\n self.lineEdit_like = lineEdit_like\n self.comboBox_like = comboBox_like\n self.spinBox_nlikes = spinBox_nlikes\n\n self.groupBox_comment = groupBox_comment\n self.comboBox_comment = comboBox_comment\n self.lineEdit_comment = lineEdit_comment\n self.listWidget = listWidget\n\n self.groupBox_combo = groupBox_combo\n self.spinBox_nlikes_combo = spinBox_nlikes_combo\n self.comboBox_combo = comboBox_combo\n self.lineEdit_combo = lineEdit_combo\n\n self.return_base_path = return_base_path\n\n def follow(self):\n # IF THE GROUPBOX IS CHECK, FOLLOW USER WITH THAT #\n if self.groupBox_follow.isChecked():\n lineEdit = str(self.lineEdit_follow.text()).strip().split(\",\")\n\n if self.comboBox_follow.currentText() == \"hashtags\":\n for hashtag in lineEdit:\n # print(\"Begin hahstag: \" + hashtag)\n users = bot.get_hashtag_users(hashtag)\n bot.follow_users(users)\n\n if self.comboBox_follow.currentText() == \"followers\":\n for username in lineEdit:\n # print(\"Begin followers: \" + username)\n bot.follow_followers(username, nfollows=self.spinBox_getfollowers.value())\n\n if self.comboBox_follow.currentText() == \"following\":\n for username in lineEdit:\n # print(\"Begin following: \" + username)\n bot.follow_following(username, nfollows=self.spinBox_getfollowing.value())\n else:\n print(\"groupBox follow not check\")\n pass\n\n def unfollow(self):\n if self.groupBox_unfollow.isChecked():\n if self.radioButton_nonfollowers.isChecked():\n bot.unfollow_non_followers()\n\n if self.radioButton_unfollowAll.isChecked():\n bot.unfollow_everyone()\n\n if self.radioButton_restoreFollowing.isChecked():\n friends = bot.read_list_from_file(self.return_base_path + \"friends.txt\") # getting the list of friends\n your_following = bot.following\n unfollow = list(set(your_following) - set(friends)) # removing your friends from the list to unfollow\n bot.unfollow_users(unfollow)\n else:\n print(\"groupbox unfollow uncheck\")\n\n def like(self):\n if self.groupBox_like.isChecked():\n lineEdit = str(self.lineEdit_like.text()).strip().split(\",\")\n\n if self.comboBox_like.currentText() == \"hashtags\":\n for hashtag in lineEdit:\n # print(\"Begin like#: \" + hashtag)\n bot.like_hashtag(hashtag, amount=self.spinBox_nlikes.value())\n\n if self.comboBox_like.currentText() == \"followers\":\n for username in lineEdit:\n # print(\"Begin likefollowers: \" + username)\n bot.like_followers(username, nlikes=self.spinBox_nlikes.value())\n\n if self.comboBox_like.currentText() == \"following\":\n for username in lineEdit:\n # print(\"Begin following: \" + username)\n bot.like_following(username, nlikes=self.spinBox_nlikes.value())\n else:\n print(\"groupBox_like not check\")\n pass\n\n def comment(self):\n if self.groupBox_comment.isChecked():\n comment_text = random.choice(self.comment_list())\n lineEdit = str(self.lineEdit_comment.text()).strip().split(\",\")\n\n if self.comboBox_comment.currentText() == \"hashtags\":\n for hashtag in lineEdit:\n bot.comment_hashtag(hashtag, text=comment_text)\n\n if self.comboBox_comment.currentText() == \"my timeline\":\n bot.comment_medias(bot.get_timeline_medias(), text=comment_text)\n else:\n print(\"groupbox comment no check\")\n\n def comment_list(self):\n list = []\n for i in range(self.listWidget.count()):\n text = self.listWidget.item(i).text()\n list.append(text)\n return list\n\n def like_follow(self):\n usernames = str(self.lineEdit_combo.text()).strip().split(\",\")\n for username in usernames:\n user_id = bot.get_user_id_from_username(username)\n\n if self.comboBox_combo.currentText() == \"followers\":\n # print(\"combo followers\")\n followers_list_id = bot.get_user_followers(user_id, nfollows=self.spinBox_getfollowers.value())\n for username_id in followers_list_id:\n new_user_id = username_id.strip()\n bot.like_user(new_user_id, amount=self.spinBox_nlikes_combo.value())\n bot.follow(new_user_id)\n time.sleep(30 + 20 * random.random())\n print(\"complete combo followers task\")\n\n if self.comboBox_combo.currentText() == \"following\":\n # print(\"combo following\")\n following_list_id = bot.get_user_following(user_id, nfollows=self.spinBox_getfollowing.value())\n for username_id in following_list_id:\n new_user_id = username_id.strip()\n bot.like_user(new_user_id, amount=self.spinBox_nlikes_combo.value())\n bot.follow(new_user_id)\n time.sleep(30 + 20 * random.random())\n print(\"complete combo following task\")\n\n if self.comboBox_combo.currentText() == \"likers\":\n # print(\"combo likers\")\n for username in usernames:\n medias = bot.get_user_medias(username, filtration=False)\n if len(medias):\n likers = bot.get_media_likers(medias)\n for liker in tqdm(likers):\n bot.like_user(liker, amount=self.spinBox_nlikes_combo.value())\n bot.follow(liker)\n print(\"complete combo likers task\")\n\n def run_threaded(self, job_func):\n job_thread = threading.Thread(target=job_func)\n job_thread.start()\n\n\n def jobs(self):\n self.follow()\n self.like()\n self.comment()\n self.unfollow()\n\n def check_job(self):\n start_time = datetime.now().strftime(\"%H:%M\")\n if self.groupBox_combo.isChecked():\n # official\n self.run_threaded(self.like_follow)\n schedule.every().day.at(start_time).do(self.run_threaded, self.unfollow)\n else:\n self.run_threaded(self.jobs)\n # schedule.every().day.at(start_time).do(self.run_threaded, self.jobs)\n\n\n def loop_job(self):\n try:\n self.check_job()\n except:\n self.loop_job()\n\n # ALL FUNCTION IN WORKTHREAD START HERE\n # if combo selected run combo\n # else run schedule\n\n# official\n# @fuckit\n def run(self):\n # todo check expired date\n # OFFICIAl\n\n self.loop_job()\n while 1:\n schedule.run_pending()\n print(\"shchedule run pending\")\n time.sleep(5*60)\n\n\n\n\nclass OutputWrapper(QtCore.QObject):\n \"\"\" to show all output in ui text edit\"\"\"\n outputWritten = QtCore.pyqtSignal(object, object)\n\n def __init__(self, parent, stdout=True):\n QtCore.QObject.__init__(self, parent)\n if stdout:\n self._stream = sys.stdout\n sys.stdout = self\n else:\n self._stream = sys.stderr\n sys.stderr = self\n self._stdout = stdout\n\n def write(self, text):\n self._stream.write(text)\n self.outputWritten.emit(text, self._stdout)\n\n def __getattr__(self, name):\n return getattr(self._stream, name)\n\n def __del__(self):\n try:\n if self._stdout:\n sys.stdout = self._stream\n else:\n sys.stderr = self._stream\n except AttributeError:\n pass\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = MainWindow_class()\n MainWindow.show()\n sys.exit(app.exec_())\n","sub_path":"MyappV2/vetogram.py","file_name":"vetogram.py","file_ext":"py","file_size_in_byte":28215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"1975063","text":"import webbrowser \nimport tweepy\n\nconsumer_key = \"\"\nconsumer_secret = \"\"\n\ncallback_uri = 'oob'\n\n# authenticate to Twitter\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret, callback_uri)\n\n# get user pin and tokens\nredirect_url = auth.get_authorization_url()\nwebbrowser.open(redirect_url)\nuser_pin_input = input(\"What is the pin value? \")\nauth.get_access_token(user_pin_input)\n\n# create API object\napi = tweepy.API(auth)\n\n# get twitter handle\nsettings = api.get_settings()\nprint(settings['screen_name'])\n\n# create a tweet\napi.update_status(\"Hello Tweepy\")","sub_path":"helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"418773986","text":"import time\nimport re\nimport random\n#time.sleep(0.5)\n\n#template\nbot_template = \"BOT : {0}\\n\"\nuser_template = \"USER : {0}\"\n\n###kumpulan respon (dictionary)###\n\n#kata kunci intent\nkeywords = {\n 'greet' : ['hello', 'hey', 'hi', 'halo','helo','wassup', 'whats up', 'bruh'], #1\n 'introduce' : ['who are you', 'introduce yourself', 'am i know you?', 'what are you'], #2\n 'goodbye' : ['bye', 'farewell', 'nice to meet you','see ya'], #3\n 'thankyou' : ['thank', 'thx'], #4\n 'sgreet':['hello there'], #5\n 'ask name':['your name', 'name?', 'name'], #6\n 'remember':['do you remember','remember'], #7\n 'want':['i want', 'wanna', 'want', 'can i', 'may i'], #8\n 'question' : ['do you mind','what do you', 'what can you do','can you'], #9\n 'AYcircum' : ['how are you', 'how do you do', 'how\\'s life','how\\'s going', 'what\\'s wrong with you'], #10\n 'beat' : ['by the way', 'btw', 'you know what','\\.....','um','hm'], #11\n 'callyou' : ['echo bot','oi', 'hei echo', 'echo', 'bot'], #12\n 'old' : ['how old','how old are you?', 'old', 'your birthday'], #13\n 'think' : ['what do you think','what do you thing about', 'have you ever think', 'think', 'thinking about'],#14\n '+Uanswer' : ['yes', 'sure', 'why not', 'yes iam','interesting', 'really','great'],#15\n '-Uanswer' :['nope', 'dunno', 'well, no', 'no','dont','don\\'t'],#16\n 'gender' : ['gender', 'what is your gender', 'you have any gender?', 'are you male or female', 'male', 'female'],#17\n 'comand' : ['\\@','\\$','\\>','\\/',],#18\n\n}\n\n#untuk meng membuat sebuah patterns\npatterns = {}\n\nfor intent, keys in keywords.items():\n patterns [intent] = re.compile('|'.join(keys))\n\n#respon dari kata kunci\nresponses = {\n \"introduce\":[\n 'hello, my name is EchoBOT, and who are you?', \n 'Iam Echo BOt, a simple chat bot',\n 'let me introduce myself, my name is echo Bot',\n 'hey, iam ECHO BOT, a simple chat bot made in python, by Asthi21, do you know him?',\n 'hello, my name is Echo Bot nice to meet you',\n 'iam just a few hundreds lines of code'\n ],\n\n 'old' : ['dunno man', 'I have no idea about it','Bruh... you know i am a bot...'],\n\n \"greet\" : [\"hello\", 'hello, how are you', 'hello there', 'Bruh...', 'Yo, wassup'],\n\n 'AYcircum' : [\n 'iam fine, thanks','iam great', 'great, what about you?', 'iam fine',\n 'nothing happens','not good, what about you?', 'Daijoubou desu....'\n ],\n\n 'default':[\n 'Please rephrase...', \n \"Very interesting\",\n \"I am not sure I understand you fully\",\n \"What does that suggest to you?\",\n \"Please continue\",\n \"Go on\",\n \"Do you feel strongly about discussing such things?\",\n \"i dont get it\",\n \"i still dont understand\",\n \"i still dont get it\",\n \"could you explain\",\n \"explain yourself please\",\n \"do you have the slightest idea how little that narrows it down?\",\n \"I can’t get my head around it. Wait, i didnt had any head :D\",\n \"I have no clue\"\n ],\n\n 'ask name' : [\n \"yes\", \n \"sure\",\"My name is EchoBot, and you?\", \n \"they call me echo bot, what about you?\", \n \"Echo BOT\",\n \"my name is ECHO BOT\",\n 'you can call me ECHO BOT'\n ],\n\n 'goodbye': [\n 'farewell then',\n 'nice to meet you',\n 'see ya',\n 'see you next time',\n 'bye bye'\n ],\n\n 'thankyou':[\n \"your welcome\",\n 'no problem',\n 'okay',\n 'dont mind it'\n ],\n\n 'sgreet':['general kenobi'],\n\n 'remember': [\n 'Did you think I would forget{0}',\n \"Why haven't you been able to forget{0}\",\n 'What about{0}',\n 'Yes .. and?',\n \"yes... why?\"\n ],\n\n 'want' : [\n 'What would it mean if you got{0}',\n 'Why do you want to {0}',\n \"What's stopping you from getting{0}\",\n 'no, you can\\'t{0}',\n 'yes, you can'\n ],\n\n 'question' : [\n 'yes... yes iam',\n 'yes i can{0}',\n 'if {0}? Absolutely.',\n 'No chance',\n 'just {0}? Sure',\n 'yes',\n 'yes..... just kidding, i cant do anything :D',\n ],\n\n 'beat' : ['yes?', 'what?', 'go on...', 'take your time', '...?','yes?'],\n\n 'callyou' : ['yes?', 'can i help you?', 'what?','....?','iam here', 'whats wrong?','yes? can i help you?', 'did you just called me?'],\n\n 'think' : ['i am thingking about you <(U-w-U)>', 'yeah i think {0} is a great idea','its great','nothing','NOPE, dont worrry, i cant think', 'What do you think about {0}'],\n\n '+Uanswer' : ['yes','good', 'great', 'wow, really?', 'thats great', 'NICE', 'cool','ok','okay'],\n '-Uanswer' : ['okay then', 'well, its okay'],\n\n 'gender' : ['Dunno', 'Dunno man...', 'i have no idea','i dont think i have it','well yes, but actually no','I dont know','idk'],\n 'comand' : [\n '-_-', 'do you think i have some comand? haha, unfortunately i dont',\n 'i dont have any comand',':D','my creator didnt smart enough to give me a command key bruh'\n ],\n 'usrname' : ['Hello, {0}!', 'hey {0}, iam echo bot', 'nice to meet you {0}', '{0}? thats a great name!'],\n\n }\n\n\n\n#jangan dihapus (dipakai jarang, dibuang sayang)\n'''\nrules = {\n 'I want (.*)': [\n 'What would it mean if you got {0}',\n 'Why do you want {0}',\n \"What's stopping you from getting {0}\"\n ],\n 'do you remember (.*)': [\n 'Did you think I would forget {0}',\n \"Why haven't you been able to forget {0}\",\n 'What about {0}',\n 'Yes .. and?',\n \"yes... why?\"\n ],\n \n 'do you think (.*)': ['if {0}? Absolutely.', 'No chance'],\n\n 'if (.*)': [\n \"Do you really think it's likely that {0}\",\n 'Do you wish that {0}',\n 'What do you think about {0}',\n 'Really--if {0}'\n ],\n\n 'hi|hey|heyy|halo':['HI','hello','hello there','halo'],\n\n '(.*) name (.*)':[\n \"yes\", \n \"sure\",\"My name is EchoBot\", \n \"they call me echo bot\", \n \"Echo BOT\",\n \"my name is ECHO BOT\" ],\n\n 'hello there':[\"General Kenobi\"],\n\n\n\n }\n \n\n\ndefault_responses = {\"default\" :\n [\n \"Very interesting\",\n \"I am not sure I understand you fully\",\n \"What does that suggest to you?\",\n \"Please continue\",\n \"Go on\",\n \"Do you feel strongly about discussing such things?\",\n \"i dont get it\",\n \"i still dont understand\",\n \"i still dont get it\",\n \"could you explain\",\n \"explain yourself please\",\n \"do you have the slightest idea how little that narrows it down?\",\n \"I can’t get my head around it\",\n \"I have no clue\"\n ]\n }\n'''\n\n\n#################### ini programnya ####################\n\ndef match_intent(message):\n #print(message)\n matched_intent = None\n \n for intent,pattern in patterns.items():\n if re.search(pattern,message):\n matched_intent=intent\n return matched_intent\n\ndef find_name(message):\n name = None\n # Create a pattern for checking if the keywords occur\n name_keyword = re.compile('name|call')\n # Create a pattern for finding capitalized words\n name_pattern = re.compile('[A-Z]{1}[a-z]*')\n if name_keyword.search(message):\n # Get the matching words in the string\n name_words = name_pattern.findall(message)\n if len(name_words) > 0:\n # Return the name if the keywords are present\n name = ' '.join(name_words)\n #print(name)\n return name\n\ndef replace_pronouns(message):\n message = message.lower()\n if ' I ' in message:\n return re.sub('I', 'you', message)\n if ' my ' in message:\n return re.sub('my', 'your', message)\n if ' your ' in message:\n return re.sub('your', 'my', message)\n if ' me ' in message:\n return re.sub('me', 'you', message)\n if ' you ' in message:\n return re.sub('you', 'me', message)\n\n return message\n\n \n\n#respon pesan\ndef respond(message):\n #call match_intent\n intent=match_intent(message.lower())\n name = find_name(message)\n\n key='default'\n #phrase = message\n \n if intent in responses:\n key = intent\n\n response = random.choice(responses[key])\n #Jngan dihapus\n\n #mencari jika ada {0}\n phrase = message\n if \"{0}\" in response:\n #response, phrase = match_rule(response, message)\n #membuat match objek\n match = re.search(patterns[intent], phrase) \n #menggabungkan dengan pesan\n phrase = re.sub(str(match.group()), \"\", phrase)\n # # Replace the pronouns in the phrase\n phrase = replace_pronouns(phrase) \n # Include the phrase in the response \n response = response.format(phrase) \n #print(response)\n\n if name is not None:\n response = random.choice(responses['usrname']).format(name)\n\n return response\n\n##jangan dihapus, dipakai jarang dibuang sayang##\n'''\ndef match_rule(response, message):\n #response, phrase = None, None\n # Iterate over the rules dictionary\n for pattern, response in patterns.items():\n # Create a match object\n #match = re.search(responses[response], message)\n #if match is not None:\n # Choose a random response\n #response = random.choice(responses[key])\n if '{0}' in response:\n match = re.search(responses[key], message)\n phrase = match.group(1)\n phrase = replace_pronouns(phrase)\n # Return the response and phrase\n return response, phrase#format(phrase)\n''' \n#pengirim pesan\ndef send_message(message):\n #print user_template & message\n print(user_template.format(message))\n\n responses = respond(message)\n time.sleep(0.5)\n #print BOT_template &responses\n print(bot_template.format(responses))\n\n#testing\n'''\n'Helo'\n\"hi\"\n\"hey there \"\n\"hello there\"\n\"bye\"\n\"you have any name ? \"\n\"Hello There\" \n\"what's your name ?\"\n\"how are you?\")\n\"your book\"\n\"can you do something for me?\"\n\"Who are you?\"\n'do you remember about movie we watched last night?'\n'yes'\n'what are you thinking about?'\n\"btw\"'''\nsend_message('hello, my name is Seta')\n\npesan = {'item' :['Helo','can you be honest to me ',\n\"hi\",\n\"hey there \",\n\"hello there\",\n\"bye\",\n\"you have any name ? \",\n\"Hello There\", \n\"what's your name ?\",\n\"how are you?\",\n\"your book\",\n\"can you do something for me ?\",\n\"Who are you?\",\n'do you remember about movie we watched last night?',\n'yes',\n'what are you thinking about?',\n\"btw\"\n]\n}\n#send_message(random.choice(pesan['item']))","sub_path":"Echo.py","file_name":"Echo.py","file_ext":"py","file_size_in_byte":10490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"119053349","text":"from django.db import models\r\nfrom django.contrib.auth import get_user_model\r\n\r\n\r\n# Create your models here.\r\n\r\nclass Scanner(models.Model):\r\n serial_number = models.CharField(max_length=200)\r\n type = models.CharField(max_length=200)\r\n description = models.TextField()\r\n date_created = models.DateTimeField(auto_now_add=True, null=True)\r\n updated_by = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True)\r\n\r\n\r\n def __str__(self):\r\n return self.serial_number\r\n","sub_path":"scanners/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"266296995","text":"def main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(filters=dict(type='dict', default=dict()), vpn_gateway_ids=dict(type='list', default=None)))\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)\n if (not HAS_BOTO3):\n module.fail_json(msg='json and boto3 is required.')\n try:\n (region, ec2_url, aws_connect_kwargs) = get_aws_connection_info(module, boto3=True)\n connection = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)\n except botocore.exceptions.NoCredentialsError as e:\n module.fail_json(msg=(\"Can't authorize connection - \" + str(e)))\n results = list_virtual_gateways(connection, module)\n module.exit_json(result=results)","sub_path":"Data Set/bug-fixing-5/956fe7362dfedb05808f6675b5457cbb18336895-<main>-bug.py","file_name":"956fe7362dfedb05808f6675b5457cbb18336895-<main>-bug.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"611476211","text":"def solve():\n words = input().split()\n words = [word.lower() for word in words]\n\n out = ''\n for name in words[:-1]:\n out += (name[0].upper() + '. ')\n print(out + words[-1][0].upper() + words[-1][1:])\n\ndef main():\n T = int(input())\n for _ in range(T):\n solve()\n\nmain()\n","sub_path":"codechef/july17/nitika.py","file_name":"nitika.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"425315691","text":"import time\nfrom concurrent import futures\n\nn = list(range(1, 10000))[::-1]\n\n\ndef thread(n, i):\n for j in range(0, len(n)-i-1):\n if n[j] > n[j+1]:\n n[j], n[j+1] = n[j+1], n[j]\n \n return n\ndef bubbleSort(n):\n executor = futures.ThreadPoolExecutor(max_workers=pow(len(n), 2)) \n\n for i in range(0, len(n)):\n executor_thread = executor.submit(thread, n, i)\n n = executor_thread.result()\n\n return n\n\nstart = time.perf_counter()\nprint(\"LIST BEFORE : {0}\".format(n))\nprint(\"LIST AFTER : {0}\".format(bubbleSort(n)))\nprint()\nprint(\"Time needed : {0}\".format(time.perf_counter() - start))\n","sub_path":"SortingAlgorithms/BubbleSort/bubble4.py","file_name":"bubble4.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"206665860","text":"#Cote Serveur\n\n#importation des librairies\nimport socket\nimport time\n\n\n#variables\n\ngrille = \"\"\"\n ______________\n |0 |1 |2 |\n | {} | {} | {} |\n |____|____|____|\n |3 |4 |5 |\n | {} | {} | {} |\n |____|____|____|\n |6 |7 |8 |\n | {} | {} | {} |\n |____|____|____|\n \"\"\"\n\n#On va repertorier les valeur de la grille dans un tableau pour les stocker\nempty_grid=[\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \",\" \"]\n#Le tour par joueur\ntour=0\n\n#ajouter une piece\ndef add_piece(gridgrid,x,piece):\n gridgrid[x] = piece\n\n#saisie du joueur\ndef saisiejoueur(joueur):\n global conn\n #\n if joueur%2 == 0:\n case = int(conn.recv(1024).decode())#prends le numero joueur\n #Si numero entré \n while case>8 or grid[case]!=0: #tant choix superieur à 8 ou emplacement dans la case de la grille est pas occupé\n if case>8:\n printjoueur(\"Taper un numero entre 0 et 8\",joueur)\n case = int(conn.recv(1024).decode())\n elif grid[case]!=0:\n printjoueur(\"Il y a deja un jeton la case {}\".format(case),joueur)\n case = int(conn.recv(1024).decode())\n print (\"from connected user: \" + str(case))\n \n else:\n while 1:\n try:\n inst = input(\" ? \")\n inst = int(inst)\n if not(0<= inst <= 8):\n raise ValueError()\n case = int(inst)\n break\n except ValueError:\n print(\"Un NOMBRE entre 0 et 8 ! \")\n while case>8 or grid[case]!=0:\n if case>8:\n printjoueur(\"Taper un numero entre 0 et 8\",joueur)\n case = int(input(\" ? \"))\n elif grid[case]!=0:\n printjoueur(\"Il y a deja un jeton la case {}\".format(case),joueur)\n case = int(input(\" ? \"))\n print (\"from connected user: \" + str(case))\n\n return case\n#communication\n#tous les joueurs\ndef printall(msg):\n global conn\n print(msg)\n conn.send(msg.encode())\n\n#le client seulement\ndef printclient(msg):\n global conn\n conn.send(msg.encode())\n\n#celui dont c'est le tour\ndef printjoueur(msg,joueur):\n global conn\n if joueur%2 == 0:\n printclient(msg)\n else:\n print(msg)\n\n#affichage de la grille \ndef print_grid(gridgrid):\n global conn\n #on cree une liste vide qui sera remplie avec les symboles\n grid_to_print=[]\n for n,i in enumerate(gridgrid):\n if i==0:\n grid_to_print.append(' ')\n elif i==1:\n grid_to_print.append('X')\n elif i==4:\n grid_to_print.append('O')\n #elle est affichee au serveur et au client\n print( grille.format(*tuple(grid_to_print)) )\n conn.send(grille.format(*tuple(grid_to_print)).encode())\n\n#detecter la victoire la victoire\ndef victoire(list_test):\n #somme des lignes, des colonnes, des diagonales\n #initialisation des totaux\n c1,c2,c3 = 0,0,0 \n l1=sum(list_test[0:3]) #ligne 1\n l2=sum(list_test[3:6]) #ligne 2\n l3=sum(list_test[6:9]) #ligne 3\n for i in range(3):\n c1+=list_test[3*i] #colonne 1\n c2+=list_test[3*i+1] #colonne 2\n c3+=list_test[3*i+2] #colonne 3\n d1=list_test[0]+list_test[4]+list_test[8] #diag 1\n d2=list_test[6]+list_test[4]+list_test[2] #diag 2\n scores=[l1,l2,l3,c1,c2,c3,d1,d2]\n if 3 in scores: #1+1+1 = 3 : le joueur 1 gagne\n printall(\"Joueur 1 a gagne !\")\n return(2)\n elif 12 in scores: #4+4+4 = 12 : le joueur 2 gagne\n printall(\"Joueur 2 a gagne !\")\n return(1)\n elif not 0 in list_test: #toutes les cases sont occupees : match nul\n printall(\"Match nul !\")\n return(3)\n else:\n return(0)\n\n#initialisation des variables\ndef init_game():\n global empty_grid\n global grid\n global tour\n vainqueur=0\n #reset de la grille\n grid=empty_grid\n for i in range(9):\n add_piece(grid,i,0)\n #on affiche alors message au client et au serveur\n printclient(\"Bienvenue ! Vous etes le joueur 1\")\n print(\"Bienvenue ! Vous etes le joueur 2\")\n\n#la partie elle meme\ndef jouer():\n global grid\n global tour\n global conn\n\n #on ouvre le socket... et on attend le client\n #serveur socket\n host = \"127.0.0.1\"\n port = 5001\n \n mySocket = socket.socket()\n mySocket.bind((host,port)) #On va faire la lisaison entre notre serveur socket mySocket et les (ports et localhost)\n \n mySocket.listen(1)\n conn, addr = mySocket.accept()\n print (\"Connexion de : \" + str(addr))\n\n #initialisation de la partie\n init_game()\n #boucle inifinie\n while True:\n #messages aux joueurs\n print_grid(grid) #imprime la grille\n printjoueur(\"Joueur {}, c'est a vous !\".format( (tour%2)+1 ),tour)\n printjoueur(\"Taper le numero de la case sur laquelle placer votre jeton\",tour)\n #saisie du joueur\n case = saisiejoueur(tour)\n #formatage de la grille\n grid[case]=3*(tour%2)+1 #1 si le tour est pair, 4 si le tour est impair\n #test de fin de partie\n vainqueur=victoire(grid)\n tour+=1\n #si la partie est terminee on reinitialise\n if vainqueur!=0:\n print_grid(grid)\n printall(\"Partie terminee. Nouvelle partie ! (ctrl+c pour stop)\")\n init_game()\n\n\njouer()\n\n\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"539375495","text":"import sys\nsys.path.append(\"../\")\n\n\n\nfrom task import Task, TaskIOProvider\nfrom fastq.merger import FastQMerger\n\nfrom config.task_io import gen_merge_io\n\nclass MergeIOProvider(TaskIOProvider):\n def __init__(self, root_dir, input_files, output_dirs, output_files, **kwargs):\n TaskIOProvider.__init__(self, root_dir= root_dir, \n input_files= input_files,\n output_dirs= output_dirs,\n output_files= output_files)\n \n\nclass MergeContainer(Task):\n def __init__(self, fastq_container, log_message=\"\"):\n merge_io= gen_merge_io(fastq_container)\n merge_io_provider= MergeIOProvider(**merge_io)\n \n Task.__init__(self, split_io_provider, log_message)\n self.fastq_container= fastq_container\n\n\n def run(self):\n tmp_params= self.io_provider.tmp_provider\n merged_dir= tmp_params.merge_dir\n\n merger= FastQContainerMerger(self.fastq_container, merge_dir)\n return merger.merge()\n\n\n\nclass Merge(Task):\n def __init__(self, merge_io_provider, log_message=\"\"):\n Task.__init__(self, split_io_provider, log_message)\n\n def run(self):\n input_params= self.io_provider.input_provider\n output_params= self.io_provider.output_provider\n \n file_paths= input_params.file_paths\n merged_file_path= output_params.merged_file_path\n\n merger= FastQMerger(file_paths, merged_file_path)\n merger.merge()\n\n","sub_path":"utopyia/rnaseq/workflow/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"495567534","text":"# encoding: utf-8\n# author = ‘LW’\n\"\"\"\n练习:最大的元素是谁和他对应的索引是多少\n\"\"\"\n\nnums = [5, 8, 7, 10, 20, 2, 6, 9]\n\n# 获取最大值及index\nmax_val = 0\nmax_idx = 0\nidx = 0\nfor i in nums:\n if i > max_val:\n max_val = i\n max_idx = idx\n idx += 1\n\nprint('1 max value:', max_val, 'max index:', max_idx)\n\n# 不保存最大值,仅记录index,使用index获取值再进行判断\nmax_idx = 0\nidx = 0\nfor i in nums:\n if i > nums[max_idx]:\n max_idx = idx\n idx += 1\n\nprint('2 max value:', nums[max_idx], 'max index:', max_idx)\n\n# 使用len判断长度,以len为索引进行循环遍历,以index进行取值\nmax_val = 0\nmax_idx = 0\nfor i in range(len(nums) - 1):\n if nums[i] > max_val:\n max_val = nums[i]\n max_idx = i\n\nprint('3 max value:', max_val, 'max index:', max_idx)\n","sub_path":"studysrc/day02/list002.py","file_name":"list002.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"278904199","text":"from config import INFO_URL_VERSION, INFO_URL, DATA_ACCESS_KEY\n\nimport requests\nfrom requests.structures import CaseInsensitiveDict\n\nheaders = CaseInsensitiveDict()\nheaders[\"X-ACCESS-KEY\"] = DATA_ACCESS_KEY\n\n\nclass InfoService:\n def __init__(self, data=None, version=None):\n self.data = data\n self.version = version\n\n def get_info_data(self):\n if self.data is None or self.version != INFO_URL_VERSION:\n response = requests.get(INFO_URL, headers=headers)\n self.version = INFO_URL_VERSION\n self.data = (\n response.json()[\"record\"]\n if response and response.status_code == 200\n else None\n )\n return self.data\n\n def get_books_dict(self):\n books_message = self.get_info_data()[\"books\"]\n return books_message\n\n def get_categories_dict(self):\n categories_dict = self.get_info_data()[\"categories\"]\n return categories_dict\n\n def get_tests_dict(self):\n tests_dict = self.get_info_data()[\"tests\"]\n return tests_dict\n\n def get_interview(self):\n interview_list = self.get_info_data()[\"interview\"]\n return interview_list\n","sub_path":"services/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"178914837","text":"import os\r\nimport traceback\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions\r\nfrom selenium.common.exceptions import WebDriverException, TimeoutException, NoSuchElementException\r\n\r\nfrom multiprocessing.connection import PipeConnection\r\nimport threading\r\nimport queue\r\n\r\nimport getpass\r\nfrom base64 import b64decode\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\n\r\nfrom enum import Enum\r\n\r\ndef _discern_page(driver):\r\n \"\"\"Helper function to determine the current page of the webdriver\"\"\"\r\n url = driver.current_url\r\n if url is \"about:blank\":\r\n return NeobuxPage.NONE\r\n elif url is \"https://www.neobux.com/\":\r\n return NeobuxPage.HOME\r\n elif \"https://www.neobux.com/m/l/\" in url:\r\n return NeobuxPage.LOGIN\r\n elif \"https://www.neobux.com/m/ga/\" in url:\r\n return NeobuxPage.VERIFICATION\r\n elif \"https://www.neobux.com/m/tta/\" in url:\r\n return NeobuxPage.LOGIN_LOG\r\n elif \"https://www.neobux.com/c/\" in url:\r\n return NeobuxPage.DASHBOARD\r\n elif \"https://www.neobux.com/c/rs/\" in url:\r\n return NeobuxPage.STATISTICS\r\n elif \"https://www.neobux.com/m/v/\" in url:\r\n return NeobuxPage.VIEW\r\n elif \"https://www.neobux.com/v/\" in url:\r\n return NeobuxPage.AD\r\n elif \"https://www.neobux.com/m/l0/\" in url:\r\n return NeobuxPage.LOGOUT\r\n else:\r\n return None\r\n\r\ndef _action_click(driver, actions, element):\r\n \"\"\"Helper function to emulate hovering then clicking an element\r\n \r\n This is necessary when clicking on an advertisement to be allowed to\r\n view it.\r\n \"\"\"\r\n driver.execute_script(\"return arguments[0].scrollIntoView();\", element)\r\n actions.move_to_element(element).perform()\r\n element.click()\r\n\r\nclass Neobux:\r\n \"\"\"Neobux advertisement autoclicker object based on python selenium webdriver\"\"\"\r\n\r\n AD_HEADER = (By.XPATH, \"/html/body/table/tbody/tr[1]/td/table/tbody/tr/td/table/tbody/tr/td[1]/table/tbody/tr\")\r\n LOGIN_ROWS = (By.XPATH, \"./table/tbody/tr[1]/td/table/tbody/tr\") #relative to the login form\r\n ERROR_MESSAGE = (By.XPATH, \"//*[text()='Error:']\")\r\n SUMMARY = (By.XPATH, \"/html/body/div[2]/div/table/tbody/tr/td[3]/table/tbody/tr/td/div/table[1]/tbody/tr/td[1]/table\")\r\n AD_LIST_BODY = (By.ID, \"tl\")\r\n FAVICON_BASE64 = (\"iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAABHNCSVQICAgIfAhkiAAAAatJREFU\"\r\n \"OI19k09rE0Echt+1rVA9bNKiByHNh/Hg9xA8SY3uqdX1EL142G4gtVoQD16lBD0ISv6AHoL0UBAD\"\r\n \"Ro2IHmKy+OeSUpBOHg/uJpvs6MALAzPPs/ub+Y0UD2C12+0OAVf/GAxYax21IoK5PcCZSqUyloTn\"\r\n \"eQbIZeA+heBLgF6I8vuy4W4sAVYSOEmpVJqR0KcYfP0LqynUiCUbuOp0OsM0nCT+k0X6nEu+rGYq\"\r\n \"z8UBB5GAXBiGxibZDXZH1Z/VwwzcEP4733B9WkbeKrkotCP0ygLfyx7krOSy0B2hckpigReTieM4\"\r\n \"v4BTS87Sj/Xu+mkVJB1LWpD0XdIb6eaFa4e3ardXnA3nd8KdmLutVcCxNsHxps4uhNKyfVlAMQiC\"\r\n \"af3pErY30TNQDbz9saHXzM3DhRk4ySWhagw/jfMErr42UwmwZoUlbpSujMLPjLSXEsQSb39seFhw\"\r\n \"Va/XIxvs+74BTvLt7fJWD5OR7EGrTyTA9X3fWODJVTHo5MNPKUkNHnzEcL84aaSJZB5OS7Z6GD2O\"\r\n \"4UfnM43kttvtwX+fc/Qh/3LAkJ3iZM8fgE6YK+IA+xEAAAAASUVORK5CYII=\")\r\n\r\n def __init__(self, driver_type = None, threading = False, connection = None):\r\n \"\"\"Creates a selenium webdriver\r\n \r\n Initializes the webdriver, using a webdriver of the type specified. If\r\n no driver type is specified, attempts to start a Firefox session, then\r\n a Chrome session if Firefox fails, then defaults to PhantomJS if Chrome\r\n fails. After initialization of webdriver, launches Neobux site.\r\n\r\n The threading argument can be passed a truthy value to make the\r\n instance run its methods in another thread. However, to protect\r\n the webdriver, some methods manipulating the driver will be placed into\r\n a queue to be threaded one at a time.\r\n\r\n A connection can be provided to allow the Neobux instance to receive\r\n commands and send return values through a pipe after entering the\r\n mainloop. Functions can be invoked by sending specific command strings\r\n followed by the argument functions.\r\n \"\"\"\r\n #multithreading/multiprocessing setup\r\n self._blocking_threads = queue.Queue()\r\n self._current_blocking_thread = None\r\n self._nonblocking_threads = []\r\n self.set_threading(threading)\r\n self.set_connection(connection, True)\r\n\r\n #webdriver setup\r\n if driver_type is None or \"Firefox\" or \"geckodriver\":\r\n try:\r\n from selenium.webdriver.firefox.options import Options\r\n options = Options()\r\n options.headless = True\r\n self.driver = webdriver.Firefox(options = options, service_log_path = os.path.devnull)\r\n self.driver_type = \"geckodriver\"\r\n except WebDriverException:\r\n del options\r\n elif driver_type is None or \"Chrome\" or \"chromedriver\":\r\n try:\r\n from selenium.webdriver.chrome.options import Options\r\n options = Options()\r\n options.headless = True\r\n self.driver = webdriver.Chrome(options = options, service_log_path = os.path.devnull)\r\n self.driver_type = \"chromedriver\"\r\n except WebDriverException:\r\n self.driver_type = None\r\n del options\r\n del self.driver\r\n elif driver_type is None or \"PhantomJS\" or \"ghostdriver\":\r\n self.driver = webdriver.PhantomJS(service_log_path = os.path.devnull)\r\n self.driver_type = \"ghostdriver\"\r\n else:\r\n raise ValueError(\"Invalid driver type, must be 'Firefox', 'Chrome', or 'PhantomJS'\")\r\n self.actions = ActionChains(self.driver)\r\n self.load = WebDriverWait(self.driver, 5, poll_frequency = 0.1)\r\n self.wait = WebDriverWait(self.driver, 90)\r\n\r\n #clicker setup\r\n self.page = NeobuxPage.NONE\r\n self.credentials = {\r\n \"username\" : \"\",\r\n \"password\" : \"\",\r\n \"secondary\" : \"\",\r\n \"captcha\" : \"\"\r\n }\r\n self.captcha_image = None\r\n self.authentication_number = \"\"\r\n self.login_error = None\r\n self.click_count = 0\r\n self.summary = {\r\n \"membership\" : \"\",\r\n \"member since\" : \"\",\r\n \"seen advertisements\" : 0,\r\n \"main balance\" : 0,\r\n \"rental balance\" : 0,\r\n \"points\" : 0\r\n }\r\n self.statistics = {\r\n \"unique\" : {\"Clicks\" : 0, \"Average\" : 0},\r\n \"fixed\" : {\"Clicks\" : 0, \"Average\" : 0},\r\n \"micro\" : {\"Clicks\" : 0, \"Average\" : 0},\r\n \"mini\" : {\"Clicks\" : 0, \"Average\" : 0},\r\n \"standard\" : {\"Clicks\" : 0, \"Average\" : 0},\r\n \"extended\" : {\"Clicks\" : 0, \"Average\" : 0}\r\n }\r\n self.ad_counts = {\r\n \"stale\" : 0,\r\n \"unique\" : 0,\r\n \"fixed\" : 0,\r\n \"micro\" : 0,\r\n \"mini\" : 0,\r\n \"standard\" : 0,\r\n \"extended\" : 0,\r\n \"adprize\" : 0\r\n }\r\n\r\n def set_threading(self, threading):\r\n \"\"\"Enables/Disables threading for instance method execution\r\n \r\n Accepts truthy and falsy values to enable or disable instance\r\n threading.\r\n \r\n If threading is set to false with methods still queued for\r\n execution, the remaining methods will all execute and block the\r\n instance until they have completed.\r\n \"\"\"\r\n if threading:\r\n self._threading = True\r\n self._assign_threads()\r\n else:\r\n self._threading = False\r\n self._blocking_threads.join()\r\n\r\n def _assign_threads(self):\r\n #check if another blocking thread is alive\r\n if self._current_blocking_thread is None:\r\n blocked = False\r\n elif not self._current_blocking_thread.is_alive():\r\n self._blocking_threads.task_done()\r\n blocked = False\r\n else:\r\n blocked = True\r\n if not blocked:\r\n try:\r\n method = self._blocking_threads.get_nowait()\r\n self._current_blocking_thread = threading.Thread(target = method[0], args = method[1:])\r\n self._current_blocking_thread.start()\r\n except queue.Empty:\r\n self._current_blocking_thread = None\r\n for method in self._nonblocking_threads:\r\n threading.Thread(target = method[0], args = method[1:]).start()\r\n self._nonblocking_threads = []\r\n if self._threading or not self._blocking_threads.empty():\r\n threading.Timer(0.1, self._assign_threads).start()\r\n #reschedule self to run after 100 milliseconds while threading is enabled\r\n\r\n def set_connection(self, connection = None, targeted = False):\r\n \"\"\"Sets the connection of the clicker instance to the object passed\r\n\r\n If an object that is not a connection is passwed, raises TypeError.\r\n To remove the instance's reference to its current connection, pass\r\n None or no argument.\r\n\r\n :raises: TypeError\r\n \"\"\"\r\n if self._threading and not targeted:\r\n self._blocking_threads.put((self.set_connection, connection, True))\r\n return\r\n if isinstance(connection, PipeConnection):\r\n self._connection = connection\r\n elif not connection:\r\n self._connection = None\r\n else:\r\n raise TypeError(\"argument is not a multiprocessing.connection.Connection object\")\r\n\r\n def mainloop(self, timeout = 0.1):\r\n \"\"\"Runs an infinite loop, enters operation via the connection\r\n\r\n The instance enters an infinite loop. From within the loop, operations\r\n are performed by sending instructions in the form of tuples.\r\n\r\n To set or retrieve a data attribute, the first element of the tuple\r\n must be a string containing the name of the desired attribute. If there\r\n exists a second element, then it is assigned to the specified data\r\n attribute. If not, then the value of the data attribute is sent back\r\n through the pipe.\r\n\r\n To invoke an instance method, the first element of the tuple must be\r\n a string containing the name of the method. All proceeding elements, if\r\n any, will be passed to the method as arguments. If the invoked method\r\n returns a value, that value is sent back through the pipe.\r\n\r\n Operations pertaining to the mainloop can be performed by sending\r\n tuples wherein the second elements are any of the following:\r\n * \"timeout\" - sets or retrieves the time in seconds of how long the\r\n mainloop waits for an instruction on each interation\r\n * \"exit_loop\" - breaks out of the infinite loop, allowing mainloop to\r\n return\r\n\r\n If an invalid instruction is received by the loop, it is discarded and\r\n a string containing an error message is sent back through the pipe.\r\n\r\n This mode of operation is useful if the Neobux clicker object is to be\r\n run in a separate thread/process for asynchronous usage. However,\r\n calling the mainloop and entering this mode of operation is not\r\n necessary to use this class; simply calling the class methods is\r\n sufficient for many use cases.\r\n \"\"\"\r\n if not self._connection:\r\n raise AttributeError(\"mainloop cannot be run without an instance Connection object\")\r\n while True:\r\n if self._connection.poll(timeout):\r\n instruction = self._connection.recv()\r\n else:\r\n continue\r\n if isinstance(instruction, tuple):\r\n if hasattr(self, instruction[0]):\r\n if callable(instruction[0]):\r\n function = getattr(self, instruction[0])\r\n args = instruction[1:]\r\n retval = function(*args)\r\n if retval is not None:\r\n self._connection.send(retval)\r\n else:\r\n variable = getattr(self, instruction[0])\r\n try:\r\n variable = instruction[1]\r\n except IndexError:\r\n self._connection.send(variable)\r\n elif instruction[0] == \"timeout\":\r\n try:\r\n timeout = instruction[1]\r\n except IndexError:\r\n self._connection.send(timeout)\r\n elif instruction[0] == \"exit_loop\":\r\n break\r\n else:\r\n print(instruction[0])\r\n raise ValueError(\"Invalid instruction: No such attribute\")\r\n else:\r\n raise TypeError(\"Invalid instruction: not of class tuple\")\r\n instruction = None\r\n\r\n def launch(self, targeted = False):\r\n \"\"\"Prepares webdriver for Neobux operation by getting the login screen\"\"\"\r\n if self._threading and not targeted:\r\n self._blocking_threads.put((self.launch, True))\r\n return\r\n self.driver.get(\"https://www.neobux.com/\")\r\n self.page = NeobuxPage.HOME\r\n login = self.load.until(expected_conditions.element_to_be_clickable((By.LINK_TEXT, \"Login\")))\r\n login.click()\r\n self.load.until(expected_conditions.element_to_be_clickable((By.ID, \"loginform\")))\r\n self.page = NeobuxPage.LOGIN\r\n self.set_captcha()\r\n\r\n def prompt_login(self, targeted = False):\r\n \"\"\"Prompts the user for login credentials from the command line\"\"\"\r\n #if self._threading and not targeted:\r\n # self._nonblocking_threads.append((self.prompt_login, True))\r\n # return\r\n self.credentials[\"username\"] = input(\"Username: \")\r\n self.credentials[\"password\"] = getpass.getpass()\r\n self.credentials[\"secondary\"] = getpass.getpass(\"Secondary Password: \")\r\n\r\n def prompt_captcha(self, targeted = False):\r\n \"\"\"Prompts the user for the captcha key from the command line\"\"\"\r\n #if self._threading and not targeted:\r\n # self._nonblocking_threads.append((self.prompt_captcha, True))\r\n # return\r\n self.credentials[\"captcha\"] = input(\"Verification Code: \")\r\n\r\n def prompt_authentication_number(self, targeted = False):\r\n \"\"\"Prompts the user for the 2-Step Verification code from the command line\"\"\"\r\n #if self._threading and not targeted:\r\n # self._nonblocking_threads.append((self.prompt_authenticaion_number, True))\r\n # return\r\n self.authentication_number = input(\"Six digit authentication number: \")\r\n\r\n def set_captcha(self, targeted = False):\r\n \"\"\"Sets instance captcha image\r\n \r\n If there exists a captcha at the login screen, self.captcha_image is\r\n set to an pillow Image object containing the captcha. If there isn't\r\n one, self.captcha_image is set to None. Only call this after launching\r\n the clicker and before successful login.\r\n \"\"\"\r\n if self._threading and not targeted:\r\n self._blocking_threads.put((self.set_captcha, True))\r\n return\r\n login_form = self.driver.find_element_by_id(\"loginform\")\r\n input_rows = login_form.find_elements(*Neobux.LOGIN_ROWS)\r\n if len(input_rows) == 4:\r\n captcha = input_rows[3].find_element_by_xpath(\"./td/table/tbody/tr/td[@align='right']/img\")\r\n captcha_input = input_rows[3].find_element_by_xpath(\"./td/table/tbody/tr/td[@align='left']/input\")\r\n src = captcha.get_attribute(\"src\")\r\n base64_data = src.replace(\"data:image/png;base64,\", \"\")\r\n self.captcha_image = Image.open(BytesIO(b64decode(base64_data)))\r\n self.captcha_image = Image.composite(self.captcha_image, Image.new(\"RGB\", self.captcha_image.size, \"white\"), self.captcha_image)\r\n self.page = NeobuxPage.CAPTCHA\r\n else:\r\n self.captcha_image = None\r\n self.page = NeobuxPage.LOGIN\r\n\r\n def log_in(self, targeted = False):\r\n \"\"\"Attempts to log in to Neobux using the instance username/password/key values\"\"\"\r\n if self._threading and not targeted:\r\n self._blocking_threads.put((self.log_in, True))\r\n return\r\n if self.page is not NeobuxPage.LOGIN:\r\n raise RuntimeError(\"Can only attempt to log in from the Neobux Login page\")\r\n login_form = self.driver.find_element_by_id(\"loginform\")\r\n input_rows = login_form.find_elements(*Neobux.LOGIN_ROWS)\r\n username_input = input_rows[0].find_element_by_xpath(\"./td/input[@placeholder='Username']\")\r\n password_input = input_rows[1].find_element_by_xpath(\"./td/input[@placeholder='Password']\")\r\n secondary_password_input = input_rows[2].find_element_by_xpath(\"./td/input[@placeholder='Secondary Password']\")\r\n if self.captcha_image:\r\n captcha_input = input_rows[3].find_element_by_xpath(\"./td/table/tbody/tr/td[@align='left']/input\")\r\n captcha_input.click()\r\n captcha_input.send_keys(self.credentials[\"captcha\"])\r\n username_input.click()\r\n username_input.send_keys(self.credentials[\"username\"])\r\n password_input.click()\r\n password_input.send_keys(self.credentials[\"password\"])\r\n secondary_password_input.click()\r\n secondary_password_input.send_keys(self.credentials[\"secondary\"])\r\n send = login_form.find_element_by_link_text(\"send\")\r\n send.click()\r\n try:\r\n self.load.until(lambda driver : \"https://www.neobux.com/m/l/\" not in driver.current_url)\r\n self.login_error = None\r\n self.captcha_image = None\r\n self.page = _discern_page(self.driver)\r\n except TimeoutException:\r\n if self.driver.find_elements(*Neobux.ERROR_MESSAGE):\r\n self.login_error = self.driver.find_element(*Neobux.ERROR_MESSAGE).find_element_by_xpath(\"..\").text\r\n elif username_input.value_of_css_property(\"background-color\") is \"rgb(255, 221, 204)\":\r\n self.login_error = \"Error: Invalid Username\"\r\n elif password_input.value_of_css_property(\"background-color\") is \"rgb(255, 221, 204)\":\r\n self.login_error = \"Error: Invalid Password\"\r\n elif secondary_password_input.value_of_css_property(\"background-color\") is \"rgb(255, 221, 204)\":\r\n self.login_error = \"Error: Invalid Secondary Password\"\r\n\r\n def verify(self, targeted = False):\r\n \"\"\"Attempts completion of 2-step verification using instance authorization number\"\"\"\r\n if self._threading and not targeted:\r\n self._blocking_threads.put((self.verify, True))\r\n return\r\n if self.page is not NeobuxPage.VERIFICATION:\r\n raise RuntimeError(\"Cannot input authorization code without 2-step verification prompt\")\r\n verification_form = self.driver.find_element_by_id(\"form2stps\")\r\n verification_form.find_element_by_tag_name(\"input\").send_keys(self.authentication_number)\r\n verification_form.find_element_by_link_text(\"send\").click()\r\n try:\r\n self.load.until(expected_conditions.staleness_of(verification_form))\r\n self.page = _discern_page(self.driver)\r\n except TimeoutException:\r\n self.login_error = verification_form.find_element_by_xpath(\"./*[style='color:#ac0000;font-weight:bold;'\").text\r\n\r\n def view_dashboard(self, targeted = False):\r\n \"\"\"Navigates to account dashboard and acquires account summary\"\"\"\r\n if self._threading and not targeted:\r\n self._blocking_threads.put((self.view_dashboard, True))\r\n return\r\n account = self.driver.find_element_by_link_text(self.username)\r\n account.click()\r\n self.load.until(expected_conditions.element_to_be_clickable(Neobux.SUMMARY))\r\n self.page = NeobuxPage.DASHBOARD\r\n summary = self.driver.find_element(*Neobux.SUMMARY)\r\n data = summary.text\r\n data = data.replace(\" \", \"\")\r\n data = data.replace(\"=\", \"\")\r\n data = data.replace(\"+\", \"\")\r\n data = data.replace(\"$\", \"\")\r\n data = data.split(\"\\n\")\r\n data = [entry.split(\":\")[-1] for entry in data]\r\n self.summary[\"membership\"] = data[1]\r\n self.summary[\"since\"] = data[2]\r\n self.summary[\"seen\"] = int(data[4])\r\n self.summary[\"main_balance\"] = int(data[7])\r\n self.summary[\"rental_balance\"] = int(data[8])\r\n self.summary[\"points\"] = int(data[10])\r\n \r\n def view_statistics(self, targeted = False):\r\n \"\"\"Navigates to account statistics and acquires 10-day click statistics\"\"\"\r\n if self._threading and not targeted:\r\n self._blocking_threads.put((self.view_statistics, True))\r\n return\r\n statistics = self.driver.find_element_by_id(\"statBt\")\r\n chart_bars = self.driver.find_elements_by_class_name(\"highcharts-color-0\")\r\n self.load.until(expected_conditions.element_to_be_clickable((By.CLASS_NAME, \"highcharts-container\")))\r\n self.page = NeobuxPage.STATISTICS\r\n for bar in chart_bars:\r\n try:\r\n bar.click()\r\n tooltip = self.driver.find_element_by_css_selector(\".highcharts-tooltip.highcharts-color-0\")\r\n label = tooltip.text.split(\"Clicks: \")[0]\r\n data = tooltip.text.split(\"Clicks: \")[1]\r\n if label == \"Fixed\":\r\n if tooltip.find_elements_by_class_name(\"highcharts-tooltip-box\")[3].get_attribute(\"stroke\") == \"#E517F7\":\r\n label = \"unique\"\r\n if tooltip.find_elements_by_class_name(\"highcharts-tooltip-box\")[3].get_attribute(\"stroke\") == \"#FF9C00\":\r\n label = \"fixed\"\r\n clicks = int(data.split(\"Average: \")[0])\r\n average = int(data.split(\"Average: \")[1])\r\n self.statistics[label.lower()] = {\"Clicks\" : clicks, \"Average\" : average}\r\n except:\r\n pass\r\n\r\n def view_ads(self, targeted = False):\r\n \"\"\"Navigates to the page of advertisements, sets instance ad count\r\n \r\n Webdriver clicks \"View Advertisements\" link in the navigation menu.\r\n After navigation, counts and stores the number of advertisements\r\n available.\r\n \"\"\"\r\n if self._threading and not targeted:\r\n self._blocking_threads.put((self.view_ads, True))\r\n return\r\n view = self.driver.find_element_by_link_text(\"View Advertisements\")\r\n view.click()\r\n self.load.until(expected_conditions.element_to_be_clickable((By.CLASS_NAME, \"cell\")))\r\n self.page = NeobuxPage.VIEW\r\n try:\r\n self.driver.find_element_by_link_text(\"disable\").click()\r\n except:\r\n pass\r\n self.ad_counts[\"stale\"] = len(self.driver.find_elements_by_class_name(\"c_ad0\"))\r\n self.ad_counts[\"unique\"] = len(self.driver.find_elements_by_class_name(\"c_adfu\"))\r\n self.ad_counts[\"fixed\"] = len(self.driver.find_elements_by_class_name(\"c_adf\"))\r\n self.ad_counts[\"micro\"] = len(self.driver.find_elements_by_class_name(\"c_ad5\"))\r\n self.ad_counts[\"mini\"] = len(self.driver.find_elements_by_class_name(\"c_ad30\"))\r\n #self.standard_exposure = len(self.driver.find_elements_by_class_name(\"\"))\r\n self.extended_exposure = len(self.driver.find_elements_by_class_name(\"c_ad15\"))\r\n print(\"Advertisements: %i\" % (self.ad_total))\r\n print(\"Already clicked: %i\" % (self.stale_ads))\r\n print(\"Unique Fixed Advertisements: %i\" % (self.unique_fixed_ads))\r\n print(\"Fixed Advertisements: %i\" % (self.fixed_ads))\r\n #print(\"Micro Exposure: %i\" % (self.micro_exposure))\r\n #print(\"Mini Exposure: %i\" % (self.mini_exposure))\r\n #print(\"Standard Exposure: %i\" % (self.standard_exposure))\r\n #print(\"Extended Exposure: %i\" % (self.extended_exposure))\r\n\r\n def click_ads(self, targeted = False):\r\n \"\"\"Clicks through the available advertisements\r\n\r\n Identifies the advertisements and iterates through each. On every\r\n iteration, an advertisement is clicked. If the advertisement has\r\n already been clicked, then stale ad count is incremented and\r\n iteration continues. If not, then fresh ad count is incremented and \r\n the instance driver waits for advertisement validation before\r\n continuing.\r\n\r\n Only call this function when the webdriver is on the view\r\n advertisements page.\r\n \"\"\"\r\n if self._threading and not targeted:\r\n self._blocking_threads.put((self.click_ads, True))\r\n return\r\n if self.page is not NeobuxPage.VIEW:\r\n raise RuntimeError(\"Cannot click ads without viewing advertisements\")\r\n for index in range(self.ad_total):\r\n ad = self.driver.find_elements_by_class_name(\"cell\")[index]\r\n if \"c_ad0\" in ad.get_attribute(\"class\"):\r\n continue\r\n advertisements_url = self.driver.current_url\r\n self.actions.reset_actions()\r\n self.actions = ActionChains(self.driver)\r\n _action_click(self.driver, self.actions, ad)\r\n self.load.until(lambda d : \"Click the red dot\" in ad.text)\r\n dot = ad.find_element_by_tag_name(\"img\")\r\n _action_click(self.driver, self.actions, dot)\r\n self.driver.switch_to.window(self.driver.window_handles[1])\r\n self.page = NeobuxPage.AD\r\n header = self.load.until(expected_conditions.element_to_be_clickable(Neobux.AD_HEADER))\r\n if \"You already saw this advertisement\" in header.text:\r\n self.driver.close()\r\n else:\r\n self.wait.until(expected_conditions.text_to_be_present_in_element(Neobux.AD_HEADER, \"Advertisement validated!\"))\r\n self.click_count += 1\r\n close = header.find_element_by_link_text(\"Close\")\r\n close.click()\r\n print(\"clicked ads: %i\" % (self.click_count), end= \"\\r\", flush=True)\r\n self.driver.switch_to.window(self.driver.window_handles[0])\r\n self.page = NeobuxPage.VIEW\r\n self.load.until(lambda driver : driver.current_url is not advertisements_url)\r\n self.load.until(expected_conditions.element_to_be_clickable(Neobux.AD_LIST_BODY))\r\n try:\r\n adprize = self.driver.find_element_by_id(\"adprize\").find_element_by_xpath(\"../div/div[2]\")\r\n self.ad_counts[\"adprize\"] = int(adprize.text)\r\n except NoSuchElementException:\r\n self.ad_counts[\"adprize\"] = 0\r\n print(\"\")\r\n\r\n def set_adprize_count(self, targeted = False):\r\n \"\"\"Sets the instance adprize count to the number of adprize\r\n\r\n Only call this function when the webdriver is on the view\r\n advertisements page.\r\n \"\"\"\r\n if self._threading and not targeted:\r\n self._blocking_threads.put((self.set_adprize_count, True))\r\n return\r\n if self.page is not NeobuxPage.VIEW:\r\n raise RuntimeError(\"Cannot acquire adprize count without viewing advertisements\")\r\n adprize = self.driver.find_element_by_id(\"adprize\").find_element_by_xpath(\"../div/div[2]\")\r\n self.ad_counts[\"adprize\"] = int(adprize.text)\r\n print(\"Adprize: %i\" % (self.ad_counts[\"adprize\"]))\r\n\r\n def click_adprize(self, targeted = False):\r\n \"\"\"Clicks through the adprize if the adprize count\r\n \r\n Sets the instance adprize count to the number of adprize. If the\r\n instance adprize count is greater than zero, then the driver clicks\r\n through the adprize. Updates the instance adprize count on each\r\n advertisement validation.\r\n\r\n Only call this function when the webdriver is on the view\r\n advertisements page.\r\n \"\"\"\r\n if self._threading and not targeted:\r\n self._blocking_threads.put((self.click_adprize, True))\r\n return\r\n if self.page is not NeobuxPage.VIEW:\r\n raise RuntimeError(\"Cannot click adprize without viewing advertisements\")\r\n adprize = self.driver.find_element_by_id(\"adprize\").find_element_by_xpath(\"../div/div[2]\")\r\n self.ad_counts[\"adprize\"] = int(adprize.text)\r\n print(\"Adprize: %i\" % (self.ad_counts[\"adprize\"]))\r\n if self.ad_counts[\"adprize\"] > 0:\r\n adprize.click()\r\n self.driver.switch_to.window(self.driver.window_handles[1])\r\n self.page = NeobuxPage.AD\r\n while True:\r\n print(\"Adprize remaining: %i \" % (self.ad_counts[\"adprize\"]), end= \"\\r\", flush=True)\r\n try:\r\n header = self.load.until(expected_conditions.element_to_be_clickable(Neobux.AD_HEADER))\r\n self.wait.until(expected_conditions.text_to_be_present_in_element(Neobux.AD_HEADER, \"Advertisement validated!\"))\r\n self.load.until(lambda d : header.find_elements_by_link_text(\"Next\"))\r\n self.ad_counts[\"adprize\"] = int(header.find_element_by_id(\"rmnDv\").text)\r\n next = header.find_element_by_link_text(\"Next\")\r\n next.click()\r\n except TimeoutException:\r\n if self.ad_counts[\"adprize\"] == 1:\r\n self.ad_counts[\"adprize\"] = 0\r\n close = header.find_element_by_link_text(\"Close\")\r\n close.click()\r\n self.driver.switch_to.window(self.driver.window_handles[0])\r\n self.page = NeobuxPage.VIEW\r\n break\r\n else:\r\n input(\"Press Enter to continue...\") #in case something other than nothing happens, I want to catch it for observation\r\n\r\n def __del__(self):\r\n \"\"\"Ensure webdriver cleanup upon clicker garbage collection\"\"\"\r\n self.driver.quit()\r\n\r\nclass NeobuxPage(Enum):\r\n NONE = \"\"\r\n HOME = \"home\"\r\n LOGIN = \"login\" #login page without captcha prompt\r\n CAPTCHA = \"captcha\" #login page with captcha prompt\r\n VERIFICATION = \"verification\"\r\n LOGIN_LOG = \"login_log\"\r\n DASHBOARD = \"dashboard\"\r\n STATISTICS = \"statistics\"\r\n VIEW = \"view\"\r\n AD = \"ad\"\r\n LOGOUT = \"logout\"\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n clicker = Neobux()\r\n clicker.launch()\r\n clicker.prompt_login()\r\n if clicker.captcha_image:\r\n clicker.captcha_image.show()\r\n clicker.prompt_captcha()\r\n clicker.log_in()\r\n if clicker.page is NeobuxPage.VERIFICATION:\r\n clicker.prompt_authentication_number()\r\n clicker.verify()\r\n print()\r\n clicker.view_ads()\r\n clicker.click_ads()\r\n print()\r\n clicker.click_adprize()\r\n except Exception:\r\n traceback.print_exc()\r\n input(\"Error occurred, press Enter to quit\")","sub_path":"Neobux.py","file_name":"Neobux.py","file_ext":"py","file_size_in_byte":31622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"68116026","text":"import FWCore.ParameterSet.Config as cms\nprocess = cms.Process(\"TtbarDiLeptonAnalyzer\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\nprocess.load(\"CATTools.CatAnalyzer.ttll.ttbarDileptonKinSolutionProducer_cfi\")\nprocess.load(\"CATTools.CatAnalyzer.ttll.ttbarDileptonKinSolutionAlgos_cff\")\nprocess.load(\"CATTools.Validation.ttllEventSelector_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False) )\n\nprocess.options.allowUnscheduled = cms.untracked.bool(True)\n#process.options.allowUnscheduled = cms.untracked.bool(False)\n\n\n## setting up arguements\nfrom FWCore.ParameterSet.VarParsing import VarParsing\noptions = VarParsing ('python')\noptions.register('isTT',False, VarParsing.multiplicity.singleton, VarParsing.varType.bool, \"isTT: 0 default\")\noptions.parseArguments()\n\nprocess.source = cms.Source(\"PoolSource\", fileNames = cms.untracked.vstring())\n#process.source.fileNames = ['/store/user/jhgoh/CATTools/sync/v7-6-3/MuonEG_Run2015D-16Dec2015-v1.root',]\n#process.source.fileNames = ['file:/xrootd/store/user/jhgoh/CATTools/sync/v7-6-3/TT_TuneCUETP8M1_13TeV-powheg-pythia8.root',]\n#process.source.fileNames = ['file:../../../catdata_20160315/catTuple.root']\nprocess.source.fileNames = ['root://cms-xrdr.sdfarm.kr:1094///xrd/store/group/CAT/TTTo2L2Nu_13TeV-powheg/v8-0-0_RunIISpring16MiniAODv2-PUSpring16_80X_mcRun2_asymptotic_2016_miniAODv2_v0_ext1-v1/160705_215520/0000/catTuple_1.root']\n\n#process.MessageLogger.debugModules = cms.untracked.vstring('cattree')\n#process.MessageLogger.destinations = cms.untracked.vstring('detailInfo')\n#process.MessageLogger.detailInfo = cms.untracked.PSet( threshold = cms.untracked.string('DEBUG'))\n\nfrom CATTools.CatAnalyzer.leptonSF_cff import *\n\nprocess.load(\"CATTools.CatAnalyzer.filters_cff\")\nprocess.load(\"CATTools.CatAnalyzer.ttll.ttllGenFilters_cff\")\n\nprocess.load(\"CATTools.CatAnalyzer.flatGenWeights_cfi\")\nprocess.agen = cms.EDAnalyzer(\"CATGenTopAnalysis\",\n weightIndex = cms.int32(-1),\n weight = cms.InputTag(\"flatGenWeights\"),\n channel = cms.InputTag(\"partonTop\",\"channel\"),\n modes = cms.InputTag(\"partonTop\", \"modes\"),\n partonTop = cms.InputTag(\"partonTop\"),\n pseudoTop = cms.InputTag(\"pseudoTop\"),\n filterTaus = cms.bool(False),\n)\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string(\"cmskin_quality_forAll.root\")\n)\nprocess.qualityAna = cms.EDAnalyzer(\"TTLLKinQualityAnalyzerForAll\",\n #solver = cms.PSet(algo = cms.string(\"CMSKin\"),), \n leptons = cms.InputTag(\"eventsTTLL\", \"leptons\"), ## lepton in LorentzVector\n jets = cms.InputTag(\"eventsTTLL\", \"jets\"), ## jet in LorentzVector\n met = cms.InputTag(\"eventsTTLL\", \"met\"), ## MET pt in float \n metphi = cms.InputTag(\"eventsTTLL\", \"metphi\"), ## MET phi in float\n applyJetCharge = cms.double(1.0),\n)\n\nprocess.qualityAna.solver = process.ttbarDileptonKinAlgoPSetCMSKin\n\nprocess.eventsTTLL.filters.ignoreTrig = cms.bool(True)\n\n\"\"\"\nif ( options.isTT ) : \n print \"This is TT Samples. Run agen and filter parto.\"\n process.p = cms.Path(\n process.agen + process.filterPartonTTLL* process.eventsTTLL * process.qualityAna\n )\nelse : \n\"\"\"\nprocess.p = cms.Path( process.eventsTTLL * process.qualityAna)\n\nif ( process.maxEvents.input <0 or process.maxEvents > 5000) :\n process.MessageLogger.cerr.FwkReport.reportEvery = 1000\nprocess.options.wantSummary = True\n\n\n\n","sub_path":"CatAnalyzer/test/JetCharge/run_CATJetChargeQualityForAll_cfg.py","file_name":"run_CATJetChargeQualityForAll_cfg.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"361187686","text":"import matplotlib.pyplot as plt\nfrom coreprogram import *\n\ntest1 = orders()\n\ndata = test1.tracepath\nfig = plt.figure(facecolor='white')\nax = fig.add_subplot(111, frameon=False)\nax.grid(True)\nax.plot(*zip(*data),marker='o')\nplt.show()","sub_path":"hiq.py","file_name":"hiq.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"315497457","text":"# Crie um programa que leia nomes ponha-os em uma lista (vetor) e então escolha um aleatóriamente #\r\n\r\nimport random\r\ncont = 1\r\nalunos = []\r\nfor i in range(0, 4):\r\n x = str(input('+++ Qual o nome do {}° aluno? > '.format(cont)))\r\n alunos.append(x)\r\n cont = cont + 1\r\n\r\nprint(\"O ALUNO ESCOLHIDO FOI \", random.choice(alunos))\r\n","sub_path":"Exe021.py","file_name":"Exe021.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"256429352","text":"from .funcs import *\n\n\nbm_list = ['甜幂纷飞 10'] # 要查询的吧名\n\ntry:\n # 生成吧名的url\n ba_url_list = []\n for item in bm_list:\n query_list = item.split() # 分割吧名和要查询的页数\n print('query_list', query_list)\n # 判断是否带页数,没有的话默认爬取该吧的前10页\n if len(query_list) == 2:\n baming = query_list[0]\n page_limit = int(query_list[1])\n else:\n baming = query_list[0]\n page_limit = 10\n\n # 构造吧链接\n ba_url = ba_url_create(ba_name=baming, page_num=0)\n\n # 打开吧url获取页数\n pages = get_ba_pages(ba_url)\n page_limit = 50 * page_limit\n\n # 判断页数是否超标\n if int(pages) < int(page_limit):\n print('pages', pages)\n else:\n pages = page_limit\n print('pages_limit', page_limit)\n\n pn_list = create_pn(pages)\n print('pn_list', pn_list)\n\n # 生成要打开的url list\n for x in pn_list:\n ba_query_url = ba_url_create(ba_name=baming, page_num=x)\n temp_list = [baming, ba_query_url, tb_name, conn]\n ba_url_list.append(temp_list)\n\n print('待查询的吧url数量:' + str(len(ba_url_list)))\n\n # 创建吧url存放数据库\n dro_sql = \"drop table if exists %s; \" % tb_name\n cre_sql = \"create table if not exists \" + str(tb_name) + \\\n \"(id int(30) not null primary key auto_increment, \" \\\n \"forum_name varchar(200), thread_id bigint(30), tid_url text);\"\n sql_caozuo(dro_sql, conn)\n sql_caozuo(cre_sql, conn)\n print('tb_name数据库创建成功')\n\n # 多线程逐页打开,获取tid和tid_url\n duoxiancheng(open_ba_url, ba_url_list)\n\nexcept Exception as e:\n print('主执行出错', e)\n\n\n\n\n","sub_path":"main_run.py","file_name":"main_run.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"278132558","text":"# Defines all the SourcePawn opcodes and provides helpers\n\n__all__ = ['sp_opcodes_list', 'opcodes']\n\n# Legend for Statuses:\n#\n# DONE -> code generation is done\n# !GEN -> code generation is deliberate skipped because:\n# (default): compiler does not generate\n# DEPRECATED: this feature no longer exists/supported\n# UNSUPPORTED: this opcode is not supported\n# TODO: done in case needed\n# VERIFIED -> code generation is checked as run-time working. prefixes:\n# ! errors are not checked yet.\n# - non-inline errors are not checked yet.\n# ~ assumed checked because of related variation, but not actually checked\n\nsp_opcodes_list = [\n 'invalid',\n 'load_pri',\n 'load_alt',\n 'load_s_pri',\n 'load_s_alt',\n 'lref_pri',\n 'lref_alt',\n 'lref_s_pri',\n 'lref_s_alt',\n 'load_i',\n 'lodb_i',\n 'const_pri',\n 'const_alt',\n 'addr_pri',\n 'addr_alt',\n 'stor_pri',\n 'stor_alt',\n 'stor_s_pri',\n 'stor_s_alt',\n 'sref_pri',\n 'sref_alt',\n 'sref_s_pri',\n 'sref_s_alt',\n 'stor_i',\n 'strb_i',\n 'lidx',\n 'lidx_b',\n 'idxaddr',\n 'idxaddr_b',\n 'align_pri', # !GEN :TODO: - only used for pack access, drop support in compiler first\n 'align_alt', # !GEN :TODO: - only used for pack access, drop support in compiler first\n 'lctrl', # !GEN\n 'sctrl', # !GEN\n 'move_pri',\n 'move_alt',\n 'xchg',\n 'push_pri',\n 'push_alt',\n 'push_r', # !GEN DEPRECATED\n 'push_c',\n 'push',\n 'push_s',\n 'pop_pri',\n 'pop_alt',\n 'stack',\n 'heap',\n 'proc',\n 'ret', # !GEN\n 'retn',\n 'call',\n 'call_pri', # !GEN\n 'jump',\n 'jrel', # !GEN\n 'jzer',\n 'jnz',\n 'jeq',\n 'jneq',\n 'jless', # !GEN\n 'jleq', # !GEN\n 'jgrtr', # !GEN\n 'jgeq', # !GEN\n 'jsless',\n 'jsleq',\n 'jsgrtr',\n 'jsgeq',\n 'shl',\n 'shr',\n 'sshr',\n 'shl_c_pri',\n 'shl_c_alt',\n 'shr_c_pri',\n 'shr_c_alt',\n 'smul',\n 'sdiv',\n 'sdiv_alt',\n 'umul', # !GEN\n 'udiv', # !GEN\n 'udiv_alt', # !GEN\n 'add',\n 'sub',\n 'sub_alt',\n 'dand',\n 'dor',\n 'xor',\n 'dnot',\n 'neg',\n 'invert',\n 'add_c',\n 'smul_c',\n 'zero_pri',\n 'zero_alt',\n 'zero',\n 'zero_s',\n 'sign_pri',\n 'sign_alt',\n 'eq',\n 'neq',\n 'less', # !GEN\n 'leq', # !GEN\n 'grtr', # !GEN\n 'geq', # !GEN\n 'sless',\n 'sleq',\n 'sgrtr',\n 'sgeq',\n 'eq_c_pri',\n 'eq_c_alt',\n 'inc_pri',\n 'inc_alt',\n 'inc',\n 'inc_s',\n 'inc_i',\n 'dec_pri',\n 'dec_alt',\n 'dec',\n 'dec_s',\n 'dec_i',\n 'movs',\n 'cmps', # !GEN\n 'fill',\n 'halt',\n 'bounds',\n 'sysreq_pri', # !GEN\n 'sysreq_c',\n 'file', # !GEN DEPRECATED\n 'line', # !GEN DEPRECATED\n 'symbol', # !GEN DEPRECATED\n 'srange', # !GEN DEPRECATED\n 'jump_pri', # !GEN\n 'switch_',\n 'casetbl',\n 'swap_pri',\n 'swap_alt',\n 'push_adr',\n 'nop',\n 'sysreq_n',\n 'symtag', # !GEN DEPRECATED\n 'dbreak',\n 'push2_c',\n 'push2',\n 'push2_s',\n 'push2_adr',\n 'push3_c',\n 'push3',\n 'push3_s',\n 'push3_adr',\n 'push4_c',\n 'push4',\n 'push4_s',\n 'push4_adr',\n 'push5_c',\n 'push5',\n 'push5_s',\n 'push5_adr',\n 'load_both',\n 'load_s_both',\n 'const_',\n 'const_s',\n 'sysreq_d', # !GEN UNSUPPORT\n 'sysreq_nd', # !GEN UNSUPPORT\n 'tracker_push_c',\n 'tracker_pop_setheap',\n 'genarray',\n 'genarray_z',\n 'stradjust_pri',\n 'stackadjust',\n 'endproc',\n 'fabs',\n 'float_',\n 'floatadd',\n 'floatsub',\n 'floatmul',\n 'floatdiv',\n 'rnd_to_nearest',\n 'rnd_to_floor',\n 'rnd_to_ceil',\n 'rnd_to_zero',\n 'floatcmp'\n]\n\n\"\"\"\n 'invalid',\n 'load_pri',\n 'load_alt',\n 'load_s_pri',\n 'load_s_alt',\n 'lref_pri',\n 'lref_alt',\n 'lref_s_pri',\n 'lref_s_alt',\n 'load_i',\n 'lodb_i',\n 'const_pri',\n 'const_alt',\n 'addr_pri',\n 'addr_alt',\n 'stor_pri',\n 'stor_alt',\n 'stor_s_pri',\n 'stor_s_alt',\n 'sref_pri',\n 'sref_alt',\n 'sref_s_pri',\n 'sref_s_alt',\n 'stor_i',\n 'strb_i',\n 'lidx',\n 'lidx_b',\n 'idxaddr',\n 'idxaddr_b',\n 'align_pri',\n 'align_alt',\n 'lctrl',\n 'sctrl',\n 'move_pri',\n 'move_alt',\n 'xchg',\n 'push_pri',\n 'push_alt',\n 'push_r',\n 'push_c',\n 'push',\n 'push_s',\n 'pop_pri',\n 'pop_alt',\n 'stack',\n 'heap',\n 'proc',\n 'ret',\n 'retn',\n 'call',\n 'call_pri',\n 'jump',\n 'jrel',\n 'jzer',\n 'jnz',\n 'jeq',\n 'jneq',\n 'jless',\n 'jleq',\n 'jgrtr',\n 'jgeq',\n 'jsless',\n 'jsleq',\n 'jsgrtr',\n 'jsgeq',\n 'shl',\n 'shr',\n 'sshr',\n 'shl_c_pri',\n 'shl_c_alt',\n 'shr_c_pri',\n 'shr_c_alt',\n 'smul',\n 'sdiv',\n 'sdiv_alt',\n 'umul',\n 'udiv',\n 'udiv_alt',\n 'add',\n 'sub',\n 'sub_alt',\n 'and',\n 'or',\n 'xor',\n 'not',\n 'neg',\n 'invert',\n 'add_c',\n 'smul_c',\n 'zero_pri',\n 'zero_alt',\n 'zero',\n 'zero_s',\n 'sign_pri',\n 'sign_alt',\n 'eq',\n 'neq',\n 'less',\n 'leq',\n 'grtr',\n 'geq',\n 'sless',\n 'sleq',\n 'sgrtr',\n 'sgeq',\n 'eq_c_pri',\n 'eq_c_alt',\n 'inc_pri',\n 'inc_alt',\n 'inc',\n 'inc_s',\n 'inc_i',\n 'dec_pri',\n 'dec_alt',\n 'dec',\n 'dec_s',\n 'dec_i',\n 'movs',\n 'cmps',\n 'fill',\n 'halt',\n 'bounds',\n 'sysreq_pri',\n 'sysreq_c',\n 'file', # obsolete\n 'line', # obsolete\n 'symbol', # obsolete\n 'srange', # obsolete\n 'jump_pri',\n 'switch',\n 'casetbl',\n 'swap_pri',\n 'swap_alt',\n 'push_adr',\n 'nop',\n 'sysreq_n',\n 'symtag', # obsolete\n 'break',\n 'push2_c',\n 'push2',\n 'push2_s',\n 'push2_adr',\n 'push3_c',\n 'push3',\n 'push3_s',\n 'push3_adr',\n 'push4_c',\n 'push4',\n 'push4_s',\n 'push4_adr',\n 'push5_c',\n 'push5',\n 'push5_s',\n 'push5_adr',\n 'load_both',\n 'load_s_both',\n 'const',\n 'const_s',\n\n 'sysreq_d',\n 'sysreq_nd',\n\n 'heap_i',\n 'push_h_c',\n 'genarray',\n\"\"\"\n\nclass SourcePawnOpcodes(object):\n def __init__(self):\n self._op_to_name = {}\n self._name_to_op = {}\n\n for opcode,name in enumerate(sp_opcodes_list):\n self._op_to_name[opcode] = name\n self._name_to_op[name] = opcode\n\n def __getitem__(self, item):\n return self._op_to_name[item]\n\n def get(self, k, d=None):\n return self._op_to_name.get(k, d)\n\n def __getattr__(self, item):\n try:\n return self._name_to_op[item]\n except KeyError:\n raise AttributeError('There is no \"%s\" opcode' % item)\n\nopcodes = SourcePawnOpcodes()\n","sub_path":"smx/opcodes.py","file_name":"opcodes.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"73338575","text":"def word_count(s):\n # Implement me.\n ignore = [\n '\"', ':', ';', ',', '.', '-', '+', '=', \n '/', '\\\\', '|', '[', ']', '{', '}', '(',\n ')', '*', '^', '&'\n ]\n\n count = dict()\n\n for word in s.split():\n word = word.lower()\n for char in ignore:\n word = word.replace(char, '')\n if len(word) == 0:\n return count\n if count.get(word):\n count[word] += 1\n else:\n count[word] = 1\n\n return count\n\nif __name__ == \"__main__\":\n print(word_count(\"\"))\n print(word_count(\"Hello\"))\n print(word_count('Hello, my cat. And my cat doesn\\'t say \"hello\" back.'))\n print(word_count('This is a test of the emergency broadcast network. This is only a test.'))","sub_path":"applications/word_count/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"188790640","text":"import sys, time;\nimport socket;\n\nMCAST_GRP = '224.0.0.1'\nMCAST_PORT = 5007\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP);\nsock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2);\n\nsentNo = 0;\nwhile 1:\n\ttry:\n\t\tdata = str(sentNo) + \": \" + repr(time.time()) + '\\n';\n\t\tprint(\"Sending sentNo: %d, data: %s\", (sentNo, data));\n\t\tsock.sendto(data, (MCAST_GRP, MCAST_PORT));\n\t\ttime.sleep(0.5);\n\t\tsentNo += 1;\n\texcept (KeyboardInterrupt, SystemExit):\n\t\traise;\n\texcept:\n\t\ttraceback.print_exc();\n\n# # Send UDP broadcast packets\n\n# UDP_PORT = 50000;\n# UDP_IP = '';\n\n# import sys, time;\n# import socket;\n\n# s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM);\n# # s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1);\n# s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1);\n# s.bind((UDP_IP, UDP_PORT));\n\n# while 1:\n# \ttry:\n# \t\tdata = repr(time.time()) + '\\n';\n# \t\ts.sendto(data, ('<broadcast>', UDP_PORT));\n# \t\ttime.sleep(2);\n# \texcept (KeyboardInterrupt, SystemExit):\n# \t\traise;\n# \texcept:\n# \t\ttraceback.print_exc();","sub_path":"src/PoC/broadcast_server.py","file_name":"broadcast_server.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"236521680","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport sys\nfrom ctypes import *\nimport math as m\nimport numpy as np\nfrom numpy.polynomial import Polynomial as P\nfrom CALCULOS.vector3 import *\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n#import OpenGL.GL as ogl\n\nfrom GRAFICOS.perfilLongitudinal import PerfilLongitudinal\n\ndef CriaDesenhoDXF(model, id_Dxf, mode=None): \n glNewList(id_Dxf, GL_COMPILE)\n try: \n # Verifica se a variavel LISTA_PVS ja estiver sido atribuida\n if hasattr(model, \"dadosDXF\"):\n glPushMatrix()\n glTranslatef(0.0, 0.0, -100)\n glScale(100, 100, 100)\n \n for ponto in model.dadosDXF.graficos[\"POINTS\"]:\n pass \n# glPushMatrix()\n# glColor3f(0.0,0.0,1.0)\n# glPointSize(10)\n# glLineWidth(1) \n# glBegin(GL_POINT)\n# glVertex3f(ponto[0],ponto[1],ponto[2]) \n# glEnd()\n# glPopMatrix()\n\n for linha in model.dadosDXF.graficos[\"LINES\"]:\n glColor3f(1.0,1.0,1.0)\n glLineWidth(1)\n glPushMatrix()\n glBegin(GL_LINES)\n glVertex3f(linha[0][0],linha[0][1],linha[0][2])\n glVertex3f(linha[1][0],linha[1][1],linha[1][2])\n glEnd()\n glPopMatrix()\n \n for circulo in model.dadosDXF.graficos[\"CIRCLES\"]: \n sides = 32 \n radius = circulo[1]\n glPushMatrix()\n glColor3f(1.0, 1.0, 1.0)\n glTranslate(circulo[0][0], circulo[0][1], circulo[0][2]) \n glBegin(GL_LINE_LOOP) \n for i in range(sides): \n cosine= radius * cos(i*2*pi/sides) \n sine = radius * sin(i*2*pi/sides) \n glVertex2f(cosine,sine)\n glEnd()\n glPopMatrix()\n \n for arc in model.dadosDXF.graficos[\"ARCS\"]:\n sides = 20 \n \n radius = arc[1]\n theta_ini = arc[2]\n theta_fin = arc[3]\n theta1 = m.radians(theta_ini) \n \n glPushMatrix()\n glColor3f(1.0, 1.0, 1.0)\n glTranslate(arc[0][0], arc[0][1], arc[0][2]) \n glBegin(GL_LINE_STRIP) \n for i in range(sides+1):\n if (theta_ini < theta_fin): \n theta = theta1 + m.radians((theta_fin-theta_ini)* (i/sides)) \n elif (theta_ini > theta_fin):\n theta = (360 - (theta_ini - theta_fin))\n theta = theta1 + m.radians((theta)* (i/sides)) \n glVertex2f(radius * cos(theta),radius * sin(theta))\n glEnd()\n glPopMatrix() \n \n for lwpolylinha in model.dadosDXF.graficos[\"LWPOLYLINES\"]: \n glPushMatrix()\n glColor3f(1.0,1.0,1.0)\n glLineWidth(1) \n if lwpolylinha[0] == 0: \n glBegin(GL_LINE_STRIP)\n for p in lwpolylinha[1]:\n glVertex3f(p[0],p[1],p[2]) \n glEnd()\n elif lwpolylinha[0] == 1: \n glBegin(GL_LINE_LOOP)\n for p in lwpolylinha[1]:\n glVertex3f(p[0],p[1],p[2]) \n glEnd()\n glPopMatrix() \n \n for polylinha in model.dadosDXF.graficos[\"POLYLINES\"]: \n glPushMatrix()\n glColor3f(1.0,1.0,1.0)\n glLineWidth(1) \n glBegin(GL_LINE_STRIP)\n for p in polylinha:\n if round((p[0]+p[1]+p[2]),0)== 0:\n pass\n else:\n glVertex3f(p[0],p[1],p[2]) \n glEnd()\n glPopMatrix()\n \n for texto in model.dadosDXF.graficos[\"TEXTS\"]: \n glPushMatrix()\n glColor3f(1.0,1.0,1.0)\n glTranslatef(* texto[0][1])\n DesenhaStrokText(texto[1])\n glPopMatrix() \n \n for texto in model.dadosDXF.graficos[\"MTEXTS\"]:\n \"\"\"\n MTEXT_TOP_LEFT\t = 1\n MTEXT_TOP_CENTER\t = 2\n MTEXT_TOP_RIGHT\t = 3\n MTEXT_MIDDLE_LEFT\t = 4\n MTEXT_MIDDLE_CENTER = 5\n MTEXT_MIDDLE_RIGHT\t = 6\n MTEXT_BOTTOM_LEFT = 7\n MTEXT_BOTTOM_CENTER = 8\n MTEXT_BOTTOM_RIGHT\t = 9\n \"\"\"\n \n glPushMatrix()\n glColor3f(1.0,1.0,1.0)\n glTranslatef(* texto[0])\n glRotatef(texto[8], 0, 0, 1)\n try:\n t = texto[1].split(\";\")[1]\n t = t.replace(\"}\", \" \")\n DesenhaStrokText(t,texto[10])\n except:\n pass\n glPopMatrix() \n \n for insert in model.dadosDXF.graficos[\"INSERTS\"]:\n glPushMatrix()\n glTranslate(* insert[0])\n glScale(* insert[1])\n glRotatef(insert[2], 0, 0, 1)\n \n for entidade in insert[3]:\n if entidade[0] == \"LINE\": \n glColor3f(1.0,1.0,1.0)\n glLineWidth(1)\n glPushMatrix()\n glBegin(GL_LINES)\n glVertex3f(* entidade[1][0])\n glVertex3f(* entidade[1][1])\n glEnd()\n glPopMatrix()\n \n if entidade[0] == \"CIRCLE\": \n sides = 32 \n radius = entidade[1][1]\n glPushMatrix()\n glColor3f(1.0, 1.0, 1.0)\n glTranslate(* entidade[1][0]) \n glBegin(GL_LINE_LOOP) \n for i in range(sides): \n cosine= radius * cos(i*2*pi/sides) \n sine = radius * sin(i*2*pi/sides) \n glVertex2f(cosine,sine)\n glEnd()\n glPopMatrix()\n \n if entidade[0] == \"ARC\":\n sides = 20 \n \n radius = entidade[1][1]\n theta_ini = entidade[1][2]\n theta_fin = entidade[1][3]\n theta1 = m.radians(theta_ini) \n \n glPushMatrix()\n glColor3f(1.0, 1.0, 1.0)\n glTranslate(* entidade[1][0]) \n glBegin(GL_LINE_STRIP) \n for i in range(sides+1):\n if (theta_ini < theta_fin): \n theta = theta1 + m.radians((theta_fin-theta_ini)* (i/sides)) \n elif (theta_ini > theta_fin):\n theta = (360 - (theta_ini - theta_fin))\n theta = theta1 + m.radians((theta)* (i/sides)) \n glVertex2f(radius * cos(theta),radius * sin(theta))\n glEnd()\n glPopMatrix() \n \n if entidade[0] == \"LWPOLYLINE\": \n glPushMatrix()\n glColor3f(1.0,1.0,1.0)\n glLineWidth(1) \n if entidade[1][0] == 0:\n glBegin(GL_LINE_STRIP)\n for p in entidade[1][1]: \n glVertex3f(p[0],p[1],p[2]) \n glEnd()\n elif entidade[1][0] == 1:\n glBegin(GL_LINE_LOOP)\n for pp in entidade[1][1]: \n glVertex3f(p[0],p[1],p[2]) \n glEnd()\n glPopMatrix()\n \n for polylinha in model.dadosDXF.graficos[\"POLYLINES\"]: \n glPushMatrix()\n glColor3f(1.0,1.0,1.0)\n glLineWidth(1) \n glBegin(GL_LINE_STRIP)\n for p in polylinha:\n if round((p[0]+p[1]+p[2]),0)== 0:\n pass\n else:\n glVertex3f(p[0],p[1],p[2]) \n glEnd()\n glPopMatrix()\n \n glPopMatrix()\n glPopMatrix()\n \n \n \n \n else: #Se a variavel LISTA_PVS ainda nao estiver sido\n pass #atribuida, nao faz nada \n \n except Exception as e:\n if True:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print (e)\n finally:\n glEndList()\n \n \ndef CriaDesenhoTerreno(model, id_Terreno, mode=None):\n '''\n Descrição:\n Funcao que cria o desenhos do terreno, bem como os triangulos da\n triangulacao e a borda do terreno.\n Apenas cria e armazena na glList(num->id_Terreno), para que possa ser\n chamada para o desenho na area de trabalho do programa\n\n Utilização:\n CriaDesenhoTerreno(model, id_Terreno, mode)\n\n Parâmetros:\n model\n tipo(classe->Model) que contem os dados do programa bem como as o\n objeto terreno que quarda od dados do Terreno.\n id_Terreno \n tipo(int) -> numero que sera utilizado para chamar a glList quando\n for necessario desenhar os elementos na area de trabalho.\n mode \n tipo(mode -> GL_RENDER) -> Variavel da API Opengl, utilizada para\n função de selecao pelo metodo picking.\n \n Return:\n Nao tem valor de retorno, as acoes sao feitas na API do Opengl que esta\n ATIVA no momento da chamada da funcao\n ''' \n glNewList(id_Terreno, GL_COMPILE)\n try:\n # Verifica se a variavel LISTA_PVS ja estiver sido atribuida\n if hasattr(model, \"terreno\"):\n if model.terreno.visivel == True: \n glColor3f(0.8, 0.2, 0.5)\n \n if mode == GL_RENDER:\n glPushMatrix()\n glTranslatef(0.0, 0.0, -100)\n #model.terreno.DesenhaTriangulacao()\n model.terreno.DesenhaCurvasDeNiveis()\n model.terreno.DesenhaBordaTerreno()\n# try:\n# if hasattr(model, \"Perfis\"):\n# model.Perfis.DesenhaPerfil()\n# model.Perfis.DesenhaPVs()\n# model.Perfis.DesenhaTrechos()\n# except Exception as e:\n# if model.emDesenvolvimento == True:\n# exc_type, exc_obj, exc_tb = sys.exc_info()\n# fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n# print(exc_type, fname, exc_tb.tb_lineno)\n# print (e)\n \n if mode == GL_RENDER:\n glPopMatrix()\n \n else: #Se a variavel LISTA_PVS ainda nao estiver sido\n pass #atribuida, nao faz nada \n \n except Exception as e:\n if model.emDesenvolvimento == True:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print (e)\n finally:\n glEndList()\n \ndef CriaDesenhoPVs(model, id_PVs, mode=GL_RENDER):\n '''\n Descrição:\n Funcao que cria os denhos das singularidades do tipo Poco de Visita (PV).\n Apenas cria e armazena na glList(num->id_PVs), para que possa ser\n chamada para o desenho na area de trabalho do programa\n\n Utilização:\n CriaDesenhoPV(model, id_PVs, mode)\n\n Parâmetros:\n model\n tipo(classe->Model) que contem os dados do programa bem como as listas com\n os PVs que serao desenhados\n id_PVs \n tipo(int) -> numero que sera utilizado para chamar a glList quando\n for necessario desenhar os elementos na area de trabalho.\n mode \n tipo(mode -> GL_RENDER) -> Variavel da API Opengl, utilizada para\n função de selecao pelo metodo picking.\n \n Return:\n Nao tem valor de retorno, as acoes sao feitas na API do Opengl que esta\n ATIVA pv momento da chamada da funcao\n '''\n if mode == GL_RENDER:\n glNewList(id_PVs, GL_COMPILE)\n try: \n # Verifica se a variavel LISTA_PVS ja estiver sido atribuida\n if hasattr(model.Estrututura, \"LISTA_PVS\"):\n for pv in model.Estrututura.LISTA_PVS:\n if pv.visivel == True: \n if (model.selected == -1 or model.selected == None):\n glColor3f(* pv.color)\n elif pv.numero not in model.GetNosSelecionados(): \n glColor3f(* pv.color) \n elif pv.numero in model.GetNosSelecionados():\n #Desenha o quadradinho de mover o elemento\n #Altera a cor para a cor de selecao\n glColor3f(1.0, 0.0, 0.0)\n \n glPushName(pv.numero)\n pv.DesenhaPV() \n \n if type(model.selected)==list:\n if pv.numero in model.GetNosSelecionados():\n pv.DesenhaOutrosElementos()\n \n glPushName(1)\n pv.DesenhaPontosArrasto()\n glPopName()\n \n glPushName(2)\n pv.DesenhaPontoRotacao()\n glPopName()\n\n glPushName(3)\n pv.Label.DesenhaPontoArrastoLabel()\n glPopName()\n \n glPopName()\n \n else: #Se a variavel LISTA_PVS ainda nao estiver sido\n pass #atribuida, nao faz nada \n \n except Exception as e:\n if model.emDesenvolvimento == True:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print (e)\n finally:\n if mode == GL_RENDER: \n glEndList()\n \n \ndef CriaDesenhoTubos(model, id_Barras, mode=GL_RENDER):\n \"\"\"Desenha as Barras na instancia de PyOpengl ativa\n\n model-> Classe Model-> Encontra-se as variaveis das regras de negocio\n do programa.\n \"\"\"\n if (mode == None or mode == GL_RENDER):\n glNewList(id_Barras, GL_COMPILE)\n try:\n # Se a variavel LISTA_TUBULACOES ja estiver sido atribuida\n if hasattr(model.Estrututura, \"LISTA_TUBULACOES\"):\n tubos_selecionados = model.GetBarrasSelecionadas()\n for tubo in model.Estrututura.LISTA_TUBULACOES:\n if tubo.visivel == True:\n if (type(model.selected) is list and tubo.numero in \\\n tubos_selecionados):\n #SE TUBO ESTIVER SELECIONADA FICARA NA COR VERMELHA\n glColor3f(1.0, 0.0, 0.0)\n else:\n #CASO NAO ESTEJA SELECIONADA FICARA NA COR AZUL\n glColor3f(0.0, 0.0, 1.0) \n \n glPushName(tubo.numero) \n tubo.Desenha()\n glPopName()\n \n else: #Se a variavel LISTA_TUBULACOES ainda nao estiver sido\n pass #atribuida, nao faz nada\n \n except Exception as e:\n if model.emDesenvolvimento == True:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print (e)\n finally:\n if (mode == None or mode == GL_RENDER):\n glEndList()\n \n\n\ndef CriaDesenhoPerfis(model, id_Perfis, mode=GL_RENDER):\n \"\"\"Desenha os Perfis Longitudinais na instancia de PyOpengl ativa\n\n model-> Classe Model-> Encontra-se as variaveis das regras de negocio\n do programa.\n \"\"\"\n if (mode == None or mode == GL_RENDER):\n glNewList(id_Perfis, GL_COMPILE)\n try:\n # Se a variavel 'Perfis' ja estiver sido atribuida\n if hasattr(model, \"LISTA_PERFIS\"):\n for perfil in model.LISTA_PERFIS: \n glPushName(perfil.numero)\n perfil.DesenhaPerfil()\n perfil.DesenhaPVs()\n perfil.DesenhaTrechos() \n if mode == GL_SELECT:\n perfil.DesenhaSnapsSelect()\n glPopName()\n \n else: #Se a variavel LISTA_TUBULACOES ainda nao estiver sido\n pass #atribuida, nao faz nada\n \n except Exception as e:\n if model.emDesenvolvimento == True:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print (e)\n finally:\n if (mode == None or mode == GL_RENDER):\n glEndList()\n\ndef CriaDesenhoEixosGlobais(model, id_EixosGlobais):\n \"\"\"Cria o Desenho dos Eixos GLOBAIS, cria\n e armazena na glList(id_EixosGlobais)\n \"\"\"\n glNewList(id_EixosGlobais, GL_COMPILE)\n tamanho = model.TAMANHO_EIXOS_GLOBAIS*100\n x,y,z = model.POSICAO_EIXOS_GLOBAIS\n x,y,z = x*100,y*100,z*100\n \n b = int(tamanho/6.66) #base da seta e tamanho da seta\n e = int(tamanho/20) #Diametro do Eixo\n d = e+10 #Distancia das LETRAS XYZ da ponta da seta\n\n glPushMatrix()\n glTranslate(x,y,z)\n \n glDisable(GL_BLEND)\n c = gluNewQuadric()\n\n #Eixo x\n glColor4f(0.0, 0.0, 1.0, 0.0) #Azul\n glPushMatrix()\n #glTranslate(100,100,0)\n glRotate(90, 0, 1, 0)\n gluCylinder(c, e, e, tamanho, 5, 5)\n glTranslate(0,0,tamanho)\n gluCylinder(c, b, 0, b, 5, 5)\n\n glRasterPos3f(0.0,0.0,d)\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(\"X\"))\n\n glPopMatrix()\n\n #Eixo y\n glColor4f(0.0, 1.0, 0.0, 0.0) #Verde\n glPushMatrix()\n #glTranslate(100,100,0)\n glRotate(-90, 1, 0, 0)\n gluCylinder(c, e, e, tamanho, 5, 5)\n glTranslate(0,0,tamanho)\n gluCylinder(c, b, 0, b, 5, 5)\n\n glRasterPos3f(0.0,0.0,d)\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(\"Y\"))\n\n\n glPopMatrix()\n glEnable(GL_BLEND)\n \n glPopMatrix()\n glEndList()\n \n \ndef CriaEixosDasBarra(model, id_glListEixosBarras):\n \"\"\"\n Cria o desenho dos eixos locais XYZ das barras e armazena em uma glList\n \"\"\"\n try:\n glNewList(id_glListEixosBarras, GL_COMPILE)\n tamanho_eixos = model.TAMANHO_EIXOS_BARRAS*100\n #Se a variavel LISTA_TUBULACOES ja estiver sido atribuida\n if hasattr(model.Estrututura, \"LISTA_TUBULACOES\"):\n for barra in model.Estrututura.LISTA_TUBULACOES:\n if barra.visivel == True:\n vetor_barra_x_global = [barra.PV2.pos[0] - barra.PV1.pos[0], barra.PV2.pos[1] - barra.PV1.pos[1], barra.PV2.pos[2] - barra.PV1.pos[2]]\n \n vetor_barra_x_normalizado = Vector3(vetor_barra_x_global[0], vetor_barra_x_global[1], vetor_barra_x_global[2]).normalise()\n \n vetor_barra_x_normalizado = np.array(vetor_barra_x_normalizado[:])\n \n vetor_barra_z_normalizado = np.array([0,0,1])\n \n \n vetor_barra_x_normalizado_local = [1, 0, 0]\n \n vetor_barra_x_normalizado_local = Vector3(vetor_barra_x_normalizado_local[0], vetor_barra_x_normalizado_local[1],vetor_barra_x_normalizado_local[2])\n vetor_barra_z_normalizado = Vector3(vetor_barra_z_normalizado[0], vetor_barra_z_normalizado[1], vetor_barra_z_normalizado[2])\n \n \n vec_x = Vector3(vetor_barra_x_normalizado_local[0], vetor_barra_x_normalizado_local[1], vetor_barra_x_normalizado_local[2]).normalise()\n vec_z = Vector3(vetor_barra_z_normalizado[0], vetor_barra_z_normalizado[1], vetor_barra_z_normalizado[2]).normalise()\n \n \n vetor_barra_y_normalizado = vec_x.cross(vec_z)\n \n \n \n vetor_y_global = np.dot(vetor_barra_y_normalizado, barra.Ri)\n vetor_z_global = np.dot(vec_z, barra.Ri)\n \n \n x_medio = (barra.PV2.pos[0] - barra.PV1.pos[0])/2\n y_medio = (barra.PV2.pos[1] - barra.PV1.pos[1])/2\n z_medio = (barra.PV2.pos[2] - barra.PV1.pos[2])/2\n \n \n glPushMatrix()\n glTranslatef(barra.PV1.pos[0]+x_medio, barra.PV1.pos[1]+y_medio, barra.PV1.pos[2]+z_medio )\n \n glColor3f(0.0,0.0,1.0)\n glLineWidth(4)\n glBegin(GL_LINES)\n glVertex3f(0,0,0)\n glVertex3f(vetor_barra_x_normalizado[0]*tamanho_eixos,vetor_barra_x_normalizado[1]*tamanho_eixos,vetor_barra_x_normalizado[2]*tamanho_eixos)\n glEnd()\n \n \n glLineWidth(4)\n glColor3f(0.0,1.0,0.0)\n glBegin(GL_LINES)\n glVertex3f(0,0,0)\n glVertex3f(-vetor_y_global[0]*tamanho_eixos,-vetor_y_global[1]*tamanho_eixos,-vetor_y_global[2]*tamanho_eixos)\n glEnd()\n \n glLineWidth(4)\n glColor3f(1.0,0.0,0.0)\n glBegin(GL_LINES)\n glVertex3f(0,0,0)\n glVertex3f(vetor_z_global[0]*tamanho_eixos,vetor_z_global[1]*tamanho_eixos,vetor_z_global[2]*tamanho_eixos)\n glEnd()\n \n glPopMatrix()\n else:\n pass\n else: #Se a variavel LISTA_TUBULACOES nao estiver atribuida ainda, \n pass #nao faz nada\n except Exception as e:\n if model.emDesenvolvimento == True:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n print (e)\n else:\n pass\n finally:\n glLineWidth(2)\n glEndList()\n \n\n\n\ndef MostraEscalaDeCores(xi, yi):\n xi = xi\n yi = yi\n\n dados = Cargas2Colors(-1000, 1000)\n\n glMatrixMode(GL_PROJECTION)\n glPushMatrix()\n glLoadIdentity()\n viewport = glGetIntegerv(GL_VIEWPORT)\n glOrtho(viewport[0], viewport[2], viewport[3], viewport[1], 0, 1)\n glLineWidth (1.0)\n\n for i in range(len(dados)):\n glBegin(GL_TRIANGLE_STRIP)\n glColor3ub(dados[i][1][0], dados[i][1][1], dados[i][1][2])\n glVertex3f(xi,(i*30)+yi, 0)\n glVertex3f(xi+25,(i*30)+yi, 0)\n glVertex3f(xi,(i*30)+yi+30, 0)\n glVertex3f(xi+25,(i*30)+yi+30, 0)\n glEnd()\n\n if i == 0:\n pass\n else:\n glColor3ub(0, 0, 0)\n glRasterPos3f(xi+40,(i*30)+yi+5, 0)\n DesenhaTexto(GLUT_BITMAP_HELVETICA_12, (str(dados[i][0])))\n\n glPopMatrix()\n\n\ndef Cargas2Colors(c_min, c_max):\n carga_maxima = c_max\n carga_minima = c_min\n\n cores= [[102, 153, 255,1],\n [153, 153, 255,2],\n [153, 204, 255,3],\n [153, 255, 255,4],\n [102, 255, 204,5],\n [153, 255, 153,6],\n [204, 255, 153,7],\n [255, 255, 000,8],\n [255, 208, 134,10],\n [255, 192, 127,11],\n [255, 161, 127,12],\n [255, 124, 119,13],\n [235, 131, 185,14],\n [237, 000, 220,15]]\n\n incremento = (abs(carga_minima)+abs(carga_maxima))/14\n\n cargas = [round(carga_minima+(incremento*i),0) for i in range(15)]\n\n cargas.sort(reverse=True)\n\n return zip(cargas, cores)\n\ndef DesenhaTexto(FONT, Texto):\n \"\"\"FUNCAO PARA DESENHAR CARACTERES BITMAP\n \n UTILIZAR A FORMA ABAIXO PARA DESENHAR\n #glRasterPos3f(pv.pos[0]-50,pv.pos[1]-20, pv.pos[2]+20)\n #DesenhaTexto(GLUT_BITMAP_HELVETICA_12, texto)\n \"\"\"\n for letra in Texto:\n glutBitmapCharacter(FONT, ord(letra))\n\n\ndef DesenhaStrokText(palavra, scale=1.0):\n glPointSize(1)\n glLineWidth(1)\n glScalef(scale/100, scale/100, scale/100)\n \n largura = 0\n for letra in palavra:\n largura += glutStrokeWidth(GLUT_STROKE_ROMAN, ord(letra) )\n glutStrokeCharacter(GLUT_STROKE_ROMAN, ord(letra))\n \n\ndef DesenhaTodosElementos(model, id_TodosElentos):\n glNewList(id_TodosElentos, GL_COMPILE)\n \n glInitNames()\n \n glPushName(2) # 2 - Para desenho das Tubulacoes\n CriaDesenhoTubos(model, 2, GL_SELECT)\n glPopName()\n \n glPushName(1) # 1 - Para desenho dos PVs\n CriaDesenhoPVs(model, 1, GL_SELECT) \n glPopName()\n \n glPushName(3) # 3 - Para desenho dos Perfis\n CriaDesenhoPerfis(model, 3, GL_SELECT) \n glPopName()\n \n glEndList()","sub_path":"MODEL/formasOpengl.py","file_name":"formasOpengl.py","file_ext":"py","file_size_in_byte":27285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"205085262","text":"from django.conf.urls import url\nfrom django.urls import path, include, re_path\n\nimport authapp.views as authapp\n\napp_name = 'auth'\n\nurlpatterns = [\n # path('accounts/', include('django.contrib.auth.urls')),\n # path('login/', authapp.login, name='login'),\n path('login/', authapp.Login.as_view(extra_context={'title': 'Авторизация'}), name='login'),\n path('logout/', authapp.logout, name='logout'),\n path('register/applicant',\n authapp.ApplicantRegistration.as_view(extra_context={'title': 'Регистрация', 'reg_type': 'applicant'}),\n name='register_applicant'),\n path('register/company',\n authapp.CompanyRegistration.as_view(extra_context={'title': 'Регистрация', 'reg_type': 'company'}),\n name='register_company'),\n\n re_path(r'^verify/(?P<email>.+)/(?P<activation_key>\\w+)/$', authapp.verify, name='verify'),\n\n]\n","sub_path":"authapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"594793160","text":"import random\nnumber_guess = random.randint(1,10)\ntimes_tried = 0\n\nistrue = True\nwhile istrue:\n user_guess = int(input('Guess a number between 1 and 10, you will have 10 tries: > '))\n if number_guess == user_guess:\n print('You Won The Game!!')\n istrue = False\n else:\n times_tried += 1\n print(f'You Have Tried {times_tried} Times')\n if user_guess > number_guess:\n print('You Are Too High')\n if user_guess < number_guess:\n print('You Are Too Low')\n continue\n ","sub_path":"code/chad/python/lab_12/guess_numberv3.py","file_name":"guess_numberv3.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"476002815","text":"import numpy as np\r\nimport math\r\nfrom mcvapi.mcvbase import MCVBase\r\n\r\n\r\nclass BlobImage(MCVBase):\r\n\r\n IMPORT = \"from mcvapi.blobs.blob_image import BlobImage\"\r\n\r\n def load(self, data, **kwargs):\r\n super(BlobImage, self).load(data, **kwargs)\r\n self._param_blobimage_margin = data.get('blobimage_margin', 0)\r\n \r\n\r\n def save(self, data, **kwargs):\r\n data = super().save(data, **kwargs)\r\n data['blobimage_margin'] = self._param_blobimage_margin\r\n return data\r\n\r\n def process(self, blobs, **kwargs):\r\n _in_original_image = kwargs['oimage']\r\n for blob in blobs:\r\n p1, p2 = blob._bounding\r\n margin = self._param_blobimage_margin\r\n cut_x, cut_y, cut_xx, cut_yy = p1[0]-margin, p1[1]-margin, p2[0]+margin, p2[1]+margin\r\n if cut_x<0: cut_x=0\r\n if cut_y<0: cut_y=0\r\n if cut_xx>_in_original_image.shape[1]: cut_xx=_in_original_image.shape[1]\r\n if cut_yy>_in_original_image.shape[0]: cut_yy=_in_original_image.shape[0]\r\n\r\n partial_image = _in_original_image[cut_y:cut_yy, cut_x:cut_xx]\r\n \r\n blob._oimage = partial_image\r\n \r\n \r\n return blobs\r\n\r\n def processflow(self, blobs, **kwargs):\r\n blobs = super(BlobImage, self).processflow(blobs, **kwargs)\r\n return BlobImage.process(self, blobs, **kwargs)\r\n\r\n","sub_path":"mcvapi/blobs/blob_image.py","file_name":"blob_image.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"4842333","text":"import sys\nimport cv2\nfrom keras.models import load_model\nimport numpy as np\n\nfrom utils.datasets import get_labels\nfrom utils.inference import detect_faces\nfrom utils.inference import draw_text\nfrom utils.inference import draw_bounding_box\nfrom utils.inference import apply_offsets\nfrom utils.inference import load_detection_model\nfrom utils.inference import load_image\nfrom utils.preprocessor import preprocess_input\n\ndef getFaceEmotion(image_path):\n # parameters for loading data and images\n base ='C://Users/lenovo/Desktop/moodify/components/emotion-classification'\n detection_model_path = base+'/trained_models/detection_models/haarcascade_frontalface_default.xml'\n emotion_model_path = base+'/trained_models/emotion_models/fer2013_denseNet.59-0.68.hdf5'\n emotion_labels = get_labels('fer2013')\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n # hyper-parameters for bounding boxes shape\n emotion_offsets = (0, 0)\n\n # loading models\n face_detection = load_detection_model(detection_model_path)\n emotion_classifier = load_model(emotion_model_path, compile=False)\n\n # getting input model shapes for inference\n emotion_target_size = emotion_classifier.input_shape[1:3]\n\n # loading images\n rgb_image = load_image(image_path, grayscale=False)\n gray_image = load_image(image_path, grayscale=True)\n gray_image = np.squeeze(gray_image)\n gray_image = gray_image.astype('uint8')\n\n faces = detect_faces(face_detection, gray_image)\n for face_coordinates in faces:\n x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)\n gray_face = gray_image[y1:y2, x1:x2]\n\n try:\n gray_face = cv2.resize(gray_face, (emotion_target_size))\n except:\n continue\n\n gray_face = preprocess_input(gray_face, True)\n gray_face = np.expand_dims(gray_face, 0)\n gray_face = np.expand_dims(gray_face, -1)\n emotion_label_arg = np.argmax(emotion_classifier.predict(gray_face))\n emotion_text = emotion_labels[emotion_label_arg]\n\n color = (255, 0, 0)\n draw_bounding_box(face_coordinates, rgb_image, color)\n draw_text(face_coordinates, rgb_image, emotion_text, color, 0, -10, 0.5, 2)\n\n bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)\n cv2.imwrite('predicted_test_image.png', bgr_image)\n return emotion_text\n","sub_path":"moodify/components/emotion-classification/src/image_emotion_gender_demo.py","file_name":"image_emotion_gender_demo.py","file_ext":"py","file_size_in_byte":2344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"12076988","text":"#!/usr/bin/env python\n\n\"\"\"\nSynthesize the codes from a text file giving repeated code indices.\n\nAuthor: Herman Kamper\nContact: kamperh@gmail.com\nDate: 2021\n\"\"\"\n\nfrom pathlib import Path\nimport argparse\nimport json\nimport librosa\nimport numpy as np\nimport pyloudnorm\nimport sys\nimport torch\n\nfrom model import Encoder, Decoder\nfrom preprocess import preemphasis\n\n\n#-----------------------------------------------------------------------------#\n# UTILITY FUNCTIONS #\n#-----------------------------------------------------------------------------#\n\ndef check_argv():\n \"\"\"Check the command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=__doc__.strip().split(\"\\n\")[0], add_help=False\n )\n parser.add_argument(\n \"checkpoint\", type=str, help=\"model checkpoint\"\n )\n parser.add_argument(\n \"code_indices_fn\", type=str, help=\"text file with code indices\"\n )\n parser.add_argument(\n \"--speaker\", default=\"V002\", choices=[\"V001\", \"V002\"],\n help=\"speaker identifier (default: %(default)s)\"\n )\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n return parser.parse_args()\n\n\n#-----------------------------------------------------------------------------#\n# MAIN FUNCTION #\n#-----------------------------------------------------------------------------#\n\ndef main():\n args = check_argv()\n\n # Code indices\n code_indices_fn = Path(args.code_indices_fn)\n print(\"Reading: {}\".format(code_indices_fn))\n code_indices = np.loadtxt(code_indices_fn, dtype=np.int)\n\n # Speakers\n with open(Path(\"datasets/2019/english/speakers.json\")) as f:\n speakers = sorted(json.load(f))\n\n # Model\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n encoder = Encoder(\n in_channels=80, channels=768, n_embeddings=512, embedding_dim=64,\n jitter=0.5\n )\n decoder = Decoder(\n in_channels=64, conditioning_channels=128, n_speakers=102,\n speaker_embedding_dim=64, mu_embedding_dim=256, rnn_channels=896,\n fc_channels=256, bits=8, hop_length=160,\n )\n decoder.to(device)\n\n print(\"Reading: {}\".format(args.checkpoint))\n checkpoint_path = args.checkpoint\n checkpoint = torch.load(\n checkpoint_path, map_location=lambda storage, loc: storage\n )\n encoder.load_state_dict(checkpoint[\"encoder\"])\n decoder.load_state_dict(checkpoint[\"decoder\"])\n encoder.eval()\n decoder.eval()\n\n # Codes\n embedding = encoder.codebook.embedding.cpu().numpy()\n codes = np.array([embedding[code_indices]])\n\n # Synthesize\n z = torch.FloatTensor(codes).to(device)\n speaker = torch.LongTensor([speakers.index(args.speaker)]).to(device)\n with torch.no_grad():\n output = decoder.generate(z, speaker)\n\n wav_fn = Path(code_indices_fn.stem).with_suffix(\".wav\")\n print(\"Writing: {}\".format(wav_fn))\n librosa.output.write_wav(wav_fn, output.astype(np.float32), sr=16000)\n\n # # Loadness\n # meter = pyloudnorm.Meter(16000)\n # output_loudness = meter.integrated_loudness(output)\n # output = pyloudnorm.normalize.loudness(output, output_loudness, ref_loudness)\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"synthesize_codes.py","file_name":"synthesize_codes.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"256317793","text":"import pandas as pd\nimport sklearn.model_selection\nimport sklearn.svm\n\n#電力データ\ned = [pd.read_csv(\n 'shikoku_electricity_%d.csv' % year,\n skiprows=3,\n names=['DATE','TIME','consumption'],\n parse_dates={'date_hour':['DATE','TIME']},\n index_col=\"date_hour\")\n for year in [2012,2013,2014,2015,2016]\n]\n\nelec_data = pd.concat(ed) #5年分のデータを連結\n\n#気象データ\ntmp=pd.read_csv(\n u'47891_高松.csv',\n parse_dates={'date_hour': [\"日時\"]},\n index_col=\"date_hour\",\n na_values=\"X\"\n)\n\ndel tmp[\"時\"] #不要な列の削除\n\n#列名を変換するためのdictionary\ncolumns = {\n \"降水量(mm)\": \"rain\",\n \"気温(℃)\": \"temperature\",\n \"日照時間(h)\": \"sunhour\",\n \"湿度(%)\": \"humid\",\n}\ntmp.rename(columns=columns, inplace=True)\n\n#二つを統合。 気温 vs 消費電力\ntakamatsu = elec_data.join(tmp[\"temperature\"]).dropna().as_matrix()\n\ntakamatsu_elec =takamatsu[:,0:1]\ntakamatsu_wthr =takamatsu[:,1:]\n\n\n#気温から消費電力を予測する\n\n#交差検証 全体を5分割する。\nkf=sklearn.model_selection.KFold(n_splits=5)\nfor train, test in kf.split(takamatsu_wthr):\n x_train = takamatsu_wthr[train]\n x_test = takamatsu_wthr[test]\n y_train = takamatsu_elec[train]\n y_test = takamatsu_elec[test]\n\n # 1列n行を1行n列に変換。\n y_train = y_train.ravel()\n y_test = y_test.ravel()\n\n #Support Vector Regression\n model=sklearn.svm.SVR()\n model.fit(x_train, y_train) #学習\n\n print(\"Linear: Training score = %f, Testing score = %f\" % (model.score(x_train,y_train),model.score(x_test,y_test)))\n","sub_path":"機械学習入門実践編/MainWork1.py","file_name":"MainWork1.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163563604","text":"#!/usr/bin/env python3\n# Set up imports and paths\nimport time\nfrom mindaffectBCI.noisetag import NoiseTag\nfrom gpiozero import LED \n\n#---------------------------------------------------------------------\ndef draw():\n \"\"\"draw the LED state\"\"\"\n #print(\"Background state\"+str(backgroundstate))\n # get the updated stimulus state info\n global nt, leds, framerate\n nt.updateStimulusState()\n stimulus_state,target_state,objIDs,sendEvents=nt.getStimulusState()\n\n # BODGE: sleep to limit the stimulus update rate\n time.sleep(1/framerate)\n # update the state of each LED to match the stimulusstate\n for idx in range(len(fp.objects)): \n # get the background state of this cell\n bs = stimulusstate[idx] if stimulus_state else None\n if not bs is None and bs>0 :\n leds[idx].on()\n else :\n leds[idx].off()\n # send info on updated display state\n nt.sendStimulusState()\n\ndef selectionHandler(objID):\n print(\"Selection: objID=%d\"%(objID))\n\n#------------------------------------------------------------------------\n# Initialization : display\ndef init():\n framerate=1/60\n\n numleds=2\n leds=[]\n objIDs=[]\n for i in range(len(leds)):\n leds.append(LED(leds[i]))\n objIDs.append(i+1)\n\n nt=Noisetag()\n nt.startExpt(objIDs,nCal=10,nPred=10,\n cueduration=4,duration=10,feedbackduration=4)\n # register function to call if selection is made\n nt.addSelectionHandler(selectionHandler)\n\nif __name__==\"__main__\":\n init()\n while True :\n draw()\n","sub_path":"examples/presentation/rpigpio.py","file_name":"rpigpio.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"427127045","text":"#coding:utf-8\n\nimport os,re,datetime,time\nimport xlrd\nfrom werkzeug.utils import secure_filename\nimport csv\nimport pandas as pd\nfrom tbd import msrun\n\n\n# def saveFile(_file):\n# # _file = _file.filename \n# basepath = os.path.dirname(__file__)\n# upload_path = os.path.join(basepath,'static/upload',secure_filename(_file))\n# _file.save(upload_path)\n# return upload_path \n\n\n\ndef operateExcel(_file):\n allowed_extensions = ['xls','xlsx']\n if '.' in _file and _file.rsplit('.')[-1] in allowed_extensions:\n data = xlrd.open_workbook(_file)\n table = data.sheets()[0]\n nrows = table.nrows\n row_list = [table.row_values(i) for i in range(0,nrows)]\n return row_list\n else:\n if '.' in _file and _file.rsplit('.')[-1] == 'csv':\n with open(_file,'r',encoding='936') as myFile:\n reader = csv.reader(myFile)\n print(reader)\n row_list = []\n for row in reader: \n for i in range(len(row)):#��现了等于号的问题,以下是去除等于号和后面的特殊标签\n if '=' in row[i]:\n row[i] = str(re.findall(r'\"[\\s\\S]+\"',row[i])).replace(\"'\",\"\")\n row[i] = row[i].replace('[\"','')\n row[i] = row[i].replace('\"]','') \n else:\n \n pass\n row_list.append(row)\n print(row)\n return row_list\n return '文件不支持'\n\n\n\n\n\ndef toLead(filename,fields,row_list):\n detail_count = {'success_num':0,'failed_num':0}\n for i in row_list:\n b = [str(e) for e in i]\n sql = 'insert into %s(%s) values(%s)' %(filename,','.join(fields),'\"' + '\",\"'.join(b) +'\"')\n # print(sql)\n operate_c = msrun.Create(sql) \n if operate_c == 'insert successful!':\n detail_count['success_num'] += 1\n else:\n detail_count['failed_num'] += 1\n # print(sql)\n return detail_count\n \n#京东FBP售后单导入模块\ndef keshenRefundtoLead(filename,fields,row_list):\n detail_count = {'success_num':0,'failed_num':0,'total_num':0,'abandon_num':0}\n for i in row_list: \n _rt_leadlist = [str(e) for e in i] #小列表中,循环每个元素,定义为字符串类型,组合新的小列表。\n if _rt_leadlist[17] == '' or _rt_leadlist[17] == '运单号':\n detail_count['abandon_num'] += 1\n else:\n sql = 'select product from assist_ks_jdfbprefund where express_id = \"%s\"'%(_rt_leadlist[17])\n _product_list = msrun.Query(sql)\n if not _product_list:\n sql = 'insert into %s(%s) values(%s)' %(filename,','.join(fields),'\"' + '\",\"'.join(_rt_leadlist) +'\"')\n operate_c = msrun.Create(sql) \n if operate_c == 'insert successful!':\n detail_count['success_num'] += 1\n else:\n detail_count['failed_num'] += 1\n else:\n _products = []\n for i in _product_list:\n _products.append(i['product'])\n if _rt_leadlist[3] in _products:\n detail_count['abandon_num'] += 1\n else:\n sql = 'insert into %s(%s) values(%s)' %(filename,','.join(fields),'\"' + '\",\"'.join(_rt_leadlist) +'\"')\n operate_c = msrun.Create(sql) \n if operate_c == 'insert successful!':\n detail_count['success_num'] += 1\n else:\n detail_count['failed_num'] += 1\n return detail_count\n\n\n\n#顺丰委托单导入模块\ndef keshensftoLead(filename,fields,row_list):\n detail_count = {'success_num':0,'failed_num':0,'total_num':0,'abandon_num':0}\n _rt_errorlist = []\n for i in row_list: \n _rt_leadlist = [str(e) for e in i] #小列表中,循环每个元素,定义为字符串类型,组合新的小列表。\n if re.match(r'^[0-9/]+$',_rt_leadlist[6]):\n sql = 'select id from assist_ks_sfconsignor where express_id = \"%s\"'%(_rt_leadlist[6])\n if not msrun.Query(sql):\n if re.match(r'^[56]',_rt_leadlist[6]):\n print('核实出来了')\n _rt_errorlist.append(_rt_leadlist[6])\n else:\n print('我没核实出来')\n print(_rt_leadlist[6])\n pass\n sql = 'insert into %s(%s) values(%s)' %(filename,','.join(fields),'\"' + '\",\"'.join(_rt_leadlist) +'\"')\n operate_c = msrun.Create(sql) \n if operate_c == 'insert successful!':\n detail_count['success_num'] += 1\n else:\n detail_count['failed_num'] += 1\n else:\n detail_count['abandon_num'] += 1\n \n else:\n detail_count['abandon_num'] += 1\n print(_rt_errorlist)\n return detail_count,_rt_errorlist\n\n\n#中央维修导入模块\ndef centreRepairtoLead(row_list):\n fields = ['id','order_status','warehouse','是否已处理过','technician','warranty_type','malfunction_type','express_type','sn','换新序列号','发货订单编号','final_tag','关联订单号','shop','purchasing_time','创建时间','creator','audit_time','auditor','finished_time','保修金额','warranty_quantity','完成人','最后修改时间','外部推送编号','推送错误信息','nickname','寄件客户姓名','寄件客户固话','寄件客户手机','寄件客户邮编','寄件客户省市县','寄件客户地址','收件物流公司','收件物流单号','收件备注','return_name','寄回客户固话','return_mobile','寄回邮编','return_area','return_address','寄件指定物流公司','寄件物流单号','寄件备注','productid','保修货品名称','product','discribe','warranty']\n filename = 'centre_repair_data'\n detail_count = {'success_num':0,'failed_num':0,'total_num':0,'abandon_num':0}\n for i in row_list: \n _rt_leadlist = [str(e) for e in i] #小列表中,循环每个元素,定义为字符串类型,组合新的小列表。\n if re.match(r'^[BX0-9]+$',_rt_leadlist[0]):\n sql_verify = 'select id from centre_repair_data where id = \"%s\"'%(_rt_leadlist[0])\n print(sql_verify)\n if not msrun.Query(sql_verify):\n sql_insert = 'insert into %s(%s) values(%s)' %(filename,','.join(fields),'\"' + '\",\"'.join(_rt_leadlist) +'\"')\n print(sql_insert)\n operate_c = msrun.Create(sql_insert) \n if operate_c == 'insert successful!':\n detail_count['success_num'] += 1\n else:\n print(sql_insert)\n detail_count['failed_num'] += 1\n else:\n detail_count['abandon_num'] += 1\n \n else:\n detail_count['abandon_num'] += 1\n create_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n sql_modify = 'update centre_repair_data set operate_status = 0,create_time = \"%s\" where create_time is null' %(create_time)\n print(sql_modify)\n print(msrun.Modify(sql_modify) + '我是更新中央维修主表的操作状态和时间')\n tagged_num,tagged_failed_num,insert_repeate_s_num,insert_repeate_f_num = msrun.centreRepairOperateRepeat()\n return detail_count,tagged_num,tagged_failed_num,insert_repeate_s_num,insert_repeate_f_num\n\n\ndef salesSummarizeToLead(dataname,fields,_rt_list,id_occur):\n detail_count = {'success_num':0,'failed_num':0,'abandon_num':0}\n for row in _rt_list:\n print(row) \n fields_verify = ['sales_date','warehouse','shop','productid','ordertype']\n sql_verify = 'select id from sales where date_format(sales_date,%s)=%s and warehouse = %s and shop = %s and productid = %s and ordertype = %s'%(\"'%s'\"%('%Y-%m-%d'),\"'%s'\"%(row[0]),\"'%s'\"%(row[1]),\"'%s'\"%(row[2]),\"'%s'\"%(row[3]),\"'%s'\"%(row[8]))\n print(sql_verify)\n if not msrun.Query(sql_verify):\n for i in range(len(row)):\n row[i] = str(row[i])\n sql = 'insert into %s (%s) values(%s) '%(dataname,','.join(fields),'\"' + '\",\"'.join(row) +'\"')\n print(sql)\n salse_insert = msrun.Create(sql)\n if salse_insert == 'insert successful!':\n detail_count['success_num'] += 1\n else:\n print(sql)\n detail_count['failed_num'] += 1\n else:\n detail_count['abandon_num'] += 1\n sql_update = 'update sales set operator = %s,create_time = now() where operator is null'%(id_occur)\n print(msrun.Modify(sql_update) + '我是销售汇总表更新时间和操作人')\n sql_platform = 'update sales a,primary_warec b set a.platform = b.platform where a.warehouse = b.warehouse and a.platform is null'\n print(msrun.Modify(sql_platform) + '我是销售汇总表更新平台')\n print(sql_platform)\n return detail_count\n\n\ndef stockSummarizeToLead(dataname,fields,row_list,date):\n detail_count = {'success_num':0,'failed_num':0,'abandon_num':0}\n for i in row_list:\n b = [str(e) for e in i]\n fields_verify = 'select id from %s where date_format(stock_date,%s)=%s and warehouse = %s and productid = %s and quantity_stock = %s '%(dataname,\"'%s'\"%('%Y-%m-%d'),\"'%s'\"%(date),\"'%s'\"%(i[0]),\"'%s'\"%(i[1]),\"'%s'\"%(i[2]))\n if msrun.Query(fields_verify):\n detail_count['abandon_num'] += 1\n pass\n else: \n sql = 'insert into %s(%s) values(%s)' %(dataname,','.join(fields),'\"' + '\",\"'.join(b) +'\"')\n # print(sql)\n operate_c = msrun.Create(sql) \n if operate_c == 'insert successful!':\n detail_count['success_num'] += 1\n else:\n detail_count['failed_num'] += 1\n # print(sql)\n return detail_count","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":10144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"637761103","text":"# Copyright (c) 2021. yoshida-lab. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\nimport argparse\nfrom os import remove, rename\nfrom pathlib import Path\n\nfrom tqdm import tqdm\n\nfrom .utils import config\n\n\ndef migrate(args_):\n data_path = Path('~/.xenonpy/dataset').expanduser().absolute()\n user_path = Path(config('userdata')).expanduser().absolute()\n\n def migrate_(f):\n path = data_path / f\n if not path.exists():\n return\n if args_.keep:\n path_ = user_path / f\n rename(str(path), str(path_))\n else:\n remove(str(path))\n\n for file in tqdm(['mp_inorganic.pkl.pd_', 'mp_structure.pkl.pd_', 'oqmd_inorganic.pkl.pd_',\n 'oqmd_structure.pkl.pd_'], desc='migrating'):\n migrate_(file)\n\n\nparser = argparse.ArgumentParser(\n prog='XenonPy',\n description='''\n XenonPy is a Python library that implements a comprehensive set of\n machine learning tools for materials informatics.\n ''')\nsubparsers = parser.add_subparsers()\n\nparser_migrate = subparsers.add_parser('migrate', help='see `migrate -h`')\nparser_migrate.add_argument(\n '-k',\n '--keep',\n action='store_true',\n help=\n 'Keep files fetched from `yoshida-lab/dataset`. These files will be moved to `userdata` dir.'\n)\nparser_migrate.set_defaults(handler=migrate)\n\nargs = parser.parse_args()\nif hasattr(args, 'handler'):\n args.handler(args)\nelse:\n parser.print_help()\n","sub_path":"xenonpy/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"344981879","text":"from dataLoader import loadFromXML\nfrom errorNotifier import errorMessage\nfrom threading import Lock, Thread \nimport socket\nimport pickle\nimport struct\nimport requests\nimport time\n\n#Constants \nSERVER_STRING = \"http://localhost:2000\"\nUSER_CONNECT = 'user_connect'\nNODE_CONNECT = 'node_connect'\n\nclass ServerNode(object):\n\t#The address parameter is a tuple that should \n\t#be built as follows (ip_address, tcp_port, udp_port)\n\tdef __init__(self, xmlFileLocation, adjacentNodesAddresses, multicastGroup, socketAddress):\n\t\t# Load employee data from xml\n\t\temployeeList = loadFromXML(xmlFileLocation)\n\t\t# Determine number of adjacent nodes\n\t\tnumberOfAdjacentNodes = len(adjacentNodesAddresses)\n\n\t\t# Update the server with the info \n\t\tThread(target = self.updateServer, args = (socketAddress, numberOfAdjacentNodes, )).start()\n\n\t\t# Start multi thread\n\t\tThread(target = self.multicastWorker, args = (socketAddress, multicastGroup, numberOfAdjacentNodes, )).start()\n\n\t\t# Start tcp thread\n\t\tThread(target = self.tcpWorker, args = (socketAddress, employeeList, adjacentNodesAddresses, )).start()\n\n\tdef updateServer(self, socketAddress, numberOfAdjacentNodes):\n\t\t#Extract the ip address and tcp port \n\t\ttcpAddress = (socketAddress[0], socketAddress[1])\n\t\t#Generate the info necessary \n\t\tinformation = pickle.dumps((tcpAddress, numberOfAdjacentNodes))\n\n\t\t#Try to update server until you succede \n\t\twhile 1:\n\t\t\ttry:\n\t\t\t\tresult = requests.put(SERVER_STRING, data = information)\n\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\ttime.sleep(60)\n\t\t\t\tcontinue\n\n\tdef multicastWorker(self, socketAddress, multicastGroup, numberOfAdjacentNodes):\n\t\t#Extract tcp address and udp address\n\t\ttcpAddress = (socketAddress[0], socketAddress[1])\n\t\tudpAddress = (socketAddress[0], socketAddress[2])\n\n\t\t#Create a udp socket\n\t\ttry:\n\t\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\t\texcept:\n\t\t\terrorMessage(\"Udp socket failed to create\")\n\t\t\treturn \n\n\t\t# Tell the operating system to add the socket to the multicast group\n\t\t# on all interfaces.\n\t\tgroup = socket.inet_aton(multicastGroup)\n\t\tmreq = struct.pack('4sL', group, socket.INADDR_ANY)\n\t\tsock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)\n\t\tsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n\n\t\t#Bind socket to udp_address\n\t\ttry:\n\t\t\tsock.bind(udpAddress)\n\t\texcept:\n\t\t\terrorMessage(\"Udp socket binding failed\")\n\t\t\treturn\n\n\t\twhile 1:\n\t\t\t# Get the message from sender\n\t\t\tmessage = sock.recv(1024)\n\n\t\t\t# Unpickle the message\n\t\t\taddress = pickle.loads(message)\n\n\t\t\t# Sent the data back to the user on the specified addr\n\t\t\tinformation = pickle.dumps((numberOfAdjacentNodes, tcpAddress))\n\t\t\tsock.sendto(information, address)\n\n\tdef tcpWorker(self, socketAddress, employeeList, adjacentNodesAddresses):\n\t\t#Extract tcp address\n\t\ttcpAddress = (socketAddress[0], socketAddress[1])\n\t\t\n\t\t#Create empty list of employees\n\t\tlistEmpl = []\n\n\t\t#Create a tcp socket\n\t\ttry:\n\t\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\texcept:\n\t\t\terrorMessage(\"Tcp socket failed to create\")\n\t\t\treturn\n\n\t\t#Bind socket to given host and port\n\t\ttry:\n\t\t\tsock.bind(tcpAddress) \n\t\texcept:\n\t\t\terrorMessage(\"Tcp socket failed to bind\")\n\t\t\treturn\n\n\t\twhile 1:\n\n\t\t\t#Clear the list of employees\n\t\t\tlistEmpl[:] = []\n\n\t\t\t#Listen for max 1 connection\n\t\t\tsock.listen(1)\n\n\t\t\t#Get info from conectee\n\t\t\tconn, addr = sock.accept()\n\t\t\t\n\t\t\t#Wait to receive information\n\t\t\tcode = conn.recv(20)\n\n\t\t\t#If code stands for user connect, gather data from other nodes\n\t\t\tif code == USER_CONNECT:\n\t\t\t\tlistEmpl = self.gatherData(adjacentNodesAddresses)\n\n\t\t\tlistEmpl.extend(employeeList)\t\n\n\t\t\t#Transform the data into a byte stream\n\t\t\tdataStream = pickle.dumps(listEmpl)\n\n\t\t\t#Send stream of data to user\n\t\t\tconn.send(dataStream)\n\t\t\tconn.close()\n\n\tdef gatherData(self, adjacentNodesAddresses):\n\t\t#Temporary list\n\t\ttempList = []\n\t\t#Gather data from all adjacent nodes\n\t\tfor nodeAddress in adjacentNodesAddresses:\n\t\t\tt = Thread(target = self.getInfoFromNode, args=(nodeAddress, tempList))\n\t\t\tt.start()\n\t\t\tt.join()\n\n\t\treturn tempList\n\n\tdef getInfoFromNode(self, node, tempList):\n\t\t#Get a lock\n\t\tlock = Lock()\n\n\t\t#Generate error message for node\n\t\tmsg = \"Error connecting to node \", node\n\n\t\t#Try connecting to it \n\t\ttry:\n\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\ts.connect(node)\n\t\texcept:\n\t\t\terrorMessage(msg)\n\t\t\treturn\n\n\t\t#Say hello and get the data it has\n\t\ts.send(NODE_CONNECT)\n\t\tdata = s.recv(2048)\n\n\t\tif data is not None:\n\t\t\twith lock:\n\t\t\t\ttempList.extend(pickle.loads(data))\n\n\t\ts.close()\n","sub_path":"PAD/Lab2/server/Libs/serverNode.py","file_name":"serverNode.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"554808696","text":"from nltk.corpus import wordnet\nimport spacy\nnlp = spacy.load(\"en_core_web_lg\")\n\n\n# synonims by nltk corpus\ndef get_syn_words(item):\n word = item\n synonyms = []\n for syn in wordnet.synsets(word):\n for l in syn.lemmas():\n synonyms.append(l.name())\n print(\"synonym for \" , word)\n synonym = set(synonyms)\n for s in synonym:\n print(s)\n similer_words = check_similarity(synonym, word)\n return similer_words\n# //synonims by nltk corpus\n\n\ndef check_similarity(wordtest, check_word):\n similar_words = []\n try:\n token1 = nlp(check_word)\n for word in wordtest:\n token2 = nlp(word)\n if token2.has_vector == True and token1.similarity(token2) > 0.69 and token1.similarity(token2) < 1:\n # if token1.similarity(token2) > 0.69 and token1.similarity(token2) < 1:\n print(\"Similarity:\", token1.similarity(token2), word, token2.has_vector)\n similar_words.append(word)\n\n\n return similar_words\n except BaseException as e:\n print(\"Error on_data: %s\" % str(e))\n","sub_path":"SynonymTest.py","file_name":"SynonymTest.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"596748429","text":"#!/usr/bin/python\n#-*-coding:utf-8 -*-\n# author: zhou yong xia\n# creation date: 2018-09-29\n# description: opencv在视频中抓取指定帧号的图像时,有时会抓不到该帧图像\n\n\nimport cv2\n\n\n\ninput_file = '/home/zyx/ftp/anonymous/DukeMTMCvideos/camera1/00001.MTS'\ncapture = cv2.VideoCapture(input_file)\nframes = [44, 31, 1, 5, 10, 29, 15, 35]\nif capture.isOpened():\n for num in frames:\n cur_frame_num = num\n capture.set(cv2.CAP_PROP_POS_FRAMES, cur_frame_num)\n frame_num = capture.get(cv2.CAP_PROP_POS_FRAMES)\n set_num = cur_frame_num\n while frame_num > cur_frame_num:\n set_num -= 1\n print('set_num=', set_num)\n capture.set(cv2.CAP_PROP_POS_FRAMES, set_num)\n frame_num = capture.get(cv2.CAP_PROP_POS_FRAMES)\n\n while frame_num < cur_frame_num:\n rval, frame = capture.read()\n frame_num += 1\n\n print('need frame=%d, grabed frame=%d' % (cur_frame_num, frame_num))\n\n rval, frame = capture.read()\n '''\n if rval:\n #frame_num = capture.get(cv2.CAP_PROP_POS_FRAMES)\n #print('need frame=%d, grabed frame=%d' % (cur_frame_num, frame_num-1))\n else:\n print('failed to grab cur_frame_num=', cur_frame_num)\n '''\n print(\"end of grabing images\")\nelse:\n print('cannot open the video', input_file)\n\n\n\n","sub_path":"test_grab_image_from_video.py","file_name":"test_grab_image_from_video.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"137745901","text":"from tkinter import *\r\n#from tkinter import ttk\r\nimport os\r\n\r\ndef imprimir():\r\n escala=str(sc_escala.get())\r\n print(escala)\r\n \r\napp = Tk()\r\napp.title('')\r\n\r\napp.geometry('300x500')\r\napp.configure(background='#404040')\r\n\r\n#Dimensoes da janela \r\nlargura = 300\r\naltura = 500\r\n\r\n#Resolução da tela\r\nlargura_tela = app.winfo_screenwidth()\r\naltura_tela = app.winfo_screenheight()\r\n\r\n#posição na janela\r\nposx = largura_tela/2 - largura/2\r\nposy = altura_tela/2 - altura/2\r\n\r\n#Definir a centralização da janela no monitor\r\napp.geometry('%dx%d+%d+%d'% (largura,altura,posx,posy))\r\n\r\nlbl = Label(app,text='Escala',bg='#404040',fg='#fff',font='Times 20 bold italic')\r\nlbl.place(x=10,y=10, width=280, height=30)\r\n\r\nsc_escala=Scale(app,from_=0,to=100,orient=HORIZONTAL) \r\nsc_escala.place(x=10,y=50, width=280,height=60)\r\nsc_escala.set(50)\r\n\r\nbotao = Button(app,text=\"Imprimir Senha\", command=imprimir)\r\nbotao.place(x=10,y=120,width=280,height=30)\r\n\r\n\r\napp.mainloop()","sub_path":"escala.py","file_name":"escala.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"205879395","text":"#!/usr/bin/env python3 \nimport json,pickle,math,matplotlib,sys,os,string,subprocess\nfrom sys import platform as sys_pf\nif sys_pf == 'darwin':\n import matplotlib\n matplotlib.use(\"TkAgg\")\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport tkinter as tk\nimport tkinter.ttk\nimport itertools\nfrom matplotlib import pyplot as plt\nfrom scipy import stats\nfrom sklearn.manifold import TSNE\nfrom sklearn.preprocessing import MinMaxScaler\nsys.path.insert(0, '../dataprocessing/')\nfrom miscFunctions import sortSINumerically,reindexDataFrame,setMaxWidth,returnSpecificExtensionFiles,returnTicks\nsys.path.insert(0, '../plotting/')\nfrom plottingGUI import checkUncheckAllButton,selectLevelValuesPage\nimport facetPlotLibrary as fpl\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.widgets import RectangleSelector\nfrom umap import UMAP\nfrom sklearn.manifold import Isomap\nfrom sklearn.decomposition import PCA\n#import clusterPlottingLibrary as cpl\nimport itertools\nfrom dataFrameValueSelectionGUI import DataSelectionHomePage\nimport operateOnDataSelection as ods\nimport interactiveGUIElements as ipe\n\nidx = pd.IndexSlice\n\nclass DimensionReductionHomePage(tk.Frame):\n num_args = 2\n def __init__(self, master,fName,bp,shp):\n global folderName,backpage,secondaryhomepage\n folderName = fName\n backpage = bp\n secondaryhomepage = shp\n tk.Frame.__init__(self, master)\n \n mainWindow = tk.Frame(self)\n mainWindow.pack(side=tk.TOP,padx=10,pady=10)\n \n self.preprocessedDict = {}\n def getUpdateData(event):\n self.DimRedCombo['values'] = self.preprocessedDict[self.PreprocessedCombo.get()]\n\n maxFnLen = 0\n maxDfLen = 0\n for scaledData in os.listdir('outputData/analysisFiles/scaledData'):\n dimreds = []\n if '.DS' not in scaledData:\n if len(scaledData) > maxFnLen:\n maxFnLen = len(scaledData)\n for dimRed in os.listdir('outputData/analysisFiles/reducedData'):\n if scaledData.split('-')[0] == dimRed.split('-')[0]:\n dimreds.append(dimRed)\n if len(dimRed) > maxDfLen:\n maxDfLen = len(dimRed)\n if not isinstance(dimreds,list):\n dimreds = [dimreds]\n self.preprocessedDict[scaledData] = ['new']+dimreds\n\n l1 = tk.Label(mainWindow, text=\"\"\"Select Preprocessed Subset: \"\"\").grid(row=0,column=0,sticky=tk.W)\n self.PreprocessedCombo = tkinter.ttk.Combobox(mainWindow,values = list(self.preprocessedDict.keys()))\n self.PreprocessedCombo['width'] = maxFnLen \n self.PreprocessedCombo.bind('<<ComboboxSelected>>', getUpdateData)\n self.PreprocessedCombo.grid(row = 0,column = 1,sticky=tk.W)\n if len(self.PreprocessedCombo['values']) == 1:\n self.PreprocessedCombo.set(self.PreprocessedCombo['values'][0])\n \n l2 = tk.Label(mainWindow, text=\"Dimensional Reduction Type: \").grid(row=1,column=0,sticky=tk.W,pady=(0,10))\n v2 = tk.StringVar()\n v2.set('umap')\n rb2a = tk.Radiobutton(mainWindow,text=\"umap\",padx = 20, variable=v2, value='umap')\n rb2b = tk.Radiobutton(mainWindow,text=\"tsne\",padx = 20, variable=v2, value='tsne')\n rb2d = tk.Radiobutton(mainWindow,text=\"isomap\",padx = 20, variable=v2, value='isomap')\n rb2e = tk.Radiobutton(mainWindow,text=\"pca\",padx = 20, variable=v2, value='pca')\n rb2a.grid(row=1,column=1,sticky=tk.W)\n rb2b.grid(row=2,column=1,sticky=tk.W)\n rb2d.grid(row=3,column=1,sticky=tk.W)\n rb2e.grid(row=4,column=1,sticky=tk.W)\n \n \"\"\"\n l1 = tk.Label(mainWindow, text=\"Select Preprocessed Data Subset: \").grid(row=0,column=0,sticky=tk.W)\n \n dropVar = tk.StringVar()\n fileList = returnSpecificExtensionFiles('postProcessedData/scaledData','.pkl',True)\n dropMenu = tk.OptionMenu(mainWindow,dropVar,*fileList)\n dropVar.set(fileList[0])\n setMaxWidth(fileList,dropMenu)\n dropMenu.grid(row=1,column=0,sticky=tk.W)\n \n \"\"\"\n\n l3 = tk.Label(mainWindow, text=\"\"\"Action: \"\"\").grid(row=6,column=0,sticky=tk.W,pady=(0,10))\n v3 = tk.StringVar()\n v3.set('create')\n rb3a = tk.Radiobutton(mainWindow,text=\"create\",padx = 20, variable=v3, value='create')\n rb3b = tk.Radiobutton(mainWindow,text=\"plot\",padx = 20, variable=v3, value='plot')\n rb3a.grid(row=6,column=1,sticky=tk.W)\n rb3b.grid(row=7,column=1,sticky=tk.W)\n \n self.DimRedCombo = tkinter.ttk.Combobox(mainWindow,state='readonly')\n self.DimRedCombo['width'] = maxDfLen\n self.DimRedCombo.grid(row = 7,column = 2)\n \n def collectInputs():\n dataSelectionFileName = self.PreprocessedCombo.get()\n print('outputData/analysisFiles/scaledData/'+dataSelectionFileName)\n scaledData = pickle.load(open('outputData/analysisFiles/scaledData/'+dataSelectionFileName,'rb'))\n dataSubsetTitle = dataSelectionFileName.split('-scaledBy')[0]\n if v3.get() == 'create':\n master.switch_frame(InteractiveDimensionReductionPage,scaledData,dataSubsetTitle,v2.get(),[])\n else:\n reductionFileName = self.DimRedCombo.get()\n reducedData = pickle.load(open('outputData/analysisFiles/reducedData/'+reductionFileName,'rb'))\n master.switch_frame(InteractiveDimensionReductionPage,scaledData,dataSubsetTitle,v2.get(),reducedData)\n\n buttonWindow = tk.Frame(self)\n buttonWindow.pack(side=tk.TOP,pady=10)\n\n tk.Button(buttonWindow, text=\"OK\",command=lambda: collectInputs()).grid(row=5,column=0)\n tk.Button(buttonWindow, text=\"Back\",command=lambda: master.switch_frame(backpage,folderName,secondaryhomepage)).grid(row=5,column=1)\n tk.Button(buttonWindow, text=\"Quit\",command=quit).grid(row=5,column=2)\n\nclass InteractiveDimensionReductionPage(tk.Frame):\n def __init__(self, master,scaledData,dataSubsetTitle,dimRedType,plottingReduction):\n tk.Frame.__init__(self, master)\n \n #Initialize 1x1 canvas for interactive plots\n plotFrame = tk.Frame(self)\n plotFrame.grid(row=0,column=0,columnspan=3)\n fig = plt.figure(figsize=(10, 7))\n gs = fig.add_gridspec(1, 1)\n fig.subplots_adjust(left=0.25)\n offset = -0.1\n levelPlotAxis = fig.add_subplot(gs[0])\n self.canvas = FigureCanvasTkAgg(fig,master=plotFrame)\n self.canvas.draw()\n self.canvas.get_tk_widget().pack()\n \n #Dimensional reduction plot (can be colored/resized/restyled by different level values in dropdowns)\n #Level parameter controls\n levelPlotWindow = tk.Frame(self)\n levelPlotWindow.grid(row=1,column=0,sticky=tk.N)\n levelParameterList = ['hue','style','size']\n levelParameterValueDict = {}\n #Dimensional reduction parameter controls\n dimRedNumericParameterWindow = tk.Frame(self)\n dimRedNumericParameterWindow.grid(row=1,column=1,sticky=tk.N)\n sliderList = ipe.createParameterAdjustmentSliders(dimRedNumericParameterWindow,ods.dimReductionNumericParameterDict[dimRedType],ods.dimReductionNumericParameterBounds)\n dimRedQualitativeParameterWindow = tk.Frame(self)\n dimRedQualitativeParameterWindow.grid(row=1,column=2,sticky=tk.N)\n radiobuttonList,radiobuttonVars = ipe.createParameterSelectionRadiobuttons(dimRedQualitativeParameterWindow,ods.dimReductionQualitativeParameterDict[dimRedType],ods.dimReductionQualitativeParameterValues)\n #Plot\n numericParametersForDimensionReduction = ipe.getSliderValues(sliderList,ods.dimReductionNumericParameterDict[dimRedType])\n qualitativeParametersForDimensionReduction = ipe.getRadiobuttonValues(radiobuttonVars)\n self.currentReductionParameters = numericParametersForDimensionReduction.copy()\n self.currentReductionParameters.update(qualitativeParametersForDimensionReduction.copy())\n if len(plottingReduction) == 0:\n self.reducedData = ods.reduceDimensions(scaledData,dimRedType,[],self.currentReductionParameters) \n else:\n self.reducedData = plottingReduction.copy()\n self.reducedDataWithFeatures = pd.concat([self.reducedData,scaledData],axis=1)\n levelList = list(self.reducedData.index.names)+['None']\n featureList = list(scaledData.columns)\n for level in levelParameterList:\n if level == 'hue' or level == 'size':\n levelParameterValueDict[level] = levelList.copy()+featureList.copy()\n else:\n levelParameterValueDict[level] = levelList.copy()\n plottingDfReduced = self.reducedData.reset_index()\n plottingDfReducedWithFeatures = self.reducedDataWithFeatures.reset_index()\n kwargs,defaultDict = ipe.getDefaultKwargs(plottingDfReduced)\n dropdownList,dropdownVarsDict = ipe.createParameterSelectionDropdowns(levelPlotWindow,levelParameterList,levelParameterValueDict,defaultDict)\n ipe.updateDropdownControlledPlot(self.canvas,levelPlotAxis,plottingDfReducedWithFeatures,dropdownVarsDict,'Dimension 1','Dimension 2',legendoffset=offset)\n \n self.originalxlims = levelPlotAxis.get_xlim()\n self.originalylims = levelPlotAxis.get_ylim()\n self.currentxlims = levelPlotAxis.get_xlim()\n self.currentylims = levelPlotAxis.get_ylim()\n\n def updateDimRedPlot(sliderList,radiobuttonVars):\n levelPlotAxis.clear()\n numericParametersForDimensionReduction = ipe.getSliderValues(sliderList,ods.dimReductionNumericParameterDict[dimRedType])\n qualitativeParametersForDimensionReduction = ipe.getRadiobuttonValues(radiobuttonVars)\n self.currentReductionParameters = numericParametersForDimensionReduction.copy()\n self.currentReductionParameters.update(qualitativeParametersForDimensionReduction.copy())\n self.reducedData = ods.reduceDimensions(scaledData,dimRedType,[],self.currentReductionParameters)\n plottingDfReduced = self.reducedData.reset_index()\n self.reducedDataWithFeatures = pd.concat([self.reducedData,scaledData],axis=1)\n plottingDfReducedWithFeatures = self.reducedDataWithFeatures.reset_index()\n ipe.updateDropdownControlledPlot(self.canvas,levelPlotAxis,plottingDfReducedWithFeatures,dropdownVarsDict,'Dimension 1','Dimension 2',legendoffset=offset)\n levelPlotAxis.set_xlim(self.currentxlims)\n levelPlotAxis.set_ylim(self.currentylims)\n self.canvas.draw()\n \n #Click and drag widget\n def line_select_callback(eclick, erelease):\n 'eclick and erelease are the press and release events'\n x1, y1 = eclick.xdata, eclick.ydata\n x2, y2 = erelease.xdata, erelease.ydata\n\n def toggle_selector(event):\n print(' Key pressed.')\n if event.key in ['Q', 'q'] and toggle_selector.RS.active:\n print(' RectangleSelector deactivated.')\n toggle_selector.RS.set_active(False)\n if event.key in ['A', 'a'] and not toggle_selector.RS.active:\n print(' RectangleSelector activated.')\n toggle_selector.RS.set_active(True)\n rectpropsdict = {'facecolor':'grey','alpha':0.2,'edgecolor':'grey'}\n toggle_selector.RS = RectangleSelector(levelPlotAxis, line_select_callback,drawtype='box', useblit=True,button=[1, 3], minspanx=5, minspany=5,spancoords='pixels',interactive=True,rectprops=rectpropsdict)\n self.ts = toggle_selector.RS\n self.canvas.mpl_connect('key_press_event', toggle_selector)\n \n def zoomIn():\n clusterSelectionBox = toggle_selector.RS.corners\n ll = np.array([clusterSelectionBox[0][0], clusterSelectionBox[1][0]]) # lower-left\n ur = np.array([clusterSelectionBox[0][2], clusterSelectionBox[1][2]]) # upper-right\n \n inidx = np.all(np.logical_and(ll <= self.reducedData.values, self.reducedData.values <= ur), axis=1)\n inbox = self.reducedData.loc[inidx]\n bufferval = 0.1\n xlims = [min(inbox['Dimension 1'])-bufferval,max(inbox['Dimension 1'])+bufferval]\n ylims = [min(inbox['Dimension 2'])-bufferval,max(inbox['Dimension 2'])+bufferval]\n self.currentxlims = xlims\n self.currentylims = ylims\n levelPlotAxis.set_xlim(xlims)\n levelPlotAxis.set_ylim(ylims)\n self.canvas.draw()\n \n def zoomOut():\n levelPlotAxis.set_xlim(self.originalxlims)\n levelPlotAxis.set_ylim(self.originalylims)\n self.currentxlims = self.originalxlims \n self.currentylims = self.originalylims\n self.canvas.draw()\n \n def update():\n ipe.updateDropdownControlledPlot(self.canvas,levelPlotAxis,pd.concat([self.reducedData,scaledData],axis=1).reset_index(),dropdownVarsDict,'Dimension 1','Dimension 2') \n levelPlotAxis.set_xlim(self.currentxlims)\n levelPlotAxis.set_ylim(self.currentylims)\n self.canvas.draw()\n\n def exportDimRed():\n ods.savePostProcessedFile(self.reducedData,dataSubsetTitle,'reduce',dimRedType,self.currentReductionParameters)\n print('Dimensional Reduction Saved!')\n if plotAllDimReds.get():\n if 'Event' in plottingDfReducedWithFeatures.columns or 'event' in plottingDfReducedWithFeatures.columns:\n sizeParam = {'s':5}\n else:\n sizeParam = {}\n for feature in pd.unique(scaledData.columns):\n g = sns.relplot(data=plottingDfReducedWithFeatures,x='Dimension 1',y='Dimension 2',hue=feature,palette='coolwarm',alpha=0.7,**sizeParam)\n leg = g._legend\n if max(plottingDfReducedWithFeatures[feature]) > 100 and len(sizeParam) > 0:\n a,b = returnTicks([-1000,100,10000,100000])\n for t, l in zip(leg.texts[1:],(b)):\n t.set_text(l)\n else:\n for t in leg.texts:\n # truncate label text to 4 characters\n if t.get_text() == '1.2000000000000002':\n t.set_text('1.0')\n else:\n if '.' in t.get_text():\n if t.get_text().replace('.','',1).isdigit():\n decimalIndex = t.get_text().find('.')\n t.set_text(round(float(t.get_text()),2))\n reducedName = ods.getFileName(dataSubsetTitle,'reduce',dimRedType,self.currentReductionParameters,fileExtension = '.png')[0]\n subprocess.run(['mkdir','plots/'+reducedName[:-4]+'-featureColoredPlots'])\n plt.savefig('plots/'+reducedName[:-4]+'-featureColoredPlots/'+str(feature)+'.png',bbox_inches='tight')\n plt.clf()\n print(feature+' plot saved')\n\n tk.Button(self, text=\"Update plot styling\",command=lambda: update()).grid(row=2,column=0)\n tk.Button(self, text=\"Zoom in\",command=lambda: zoomIn()).grid(row=4,column=0)\n tk.Button(self, text=\"Zoom out\",command=lambda: zoomOut()).grid(row=5,column=0)\n tk.Button(self, text=\"Update hyperparameters\",command=lambda: updateDimRedPlot(sliderList,radiobuttonVars)).grid(row=2,column=1,columnspan=2)\n\n tk.Button(self,text='Save plot',command=lambda: fig.savefig('plots/'+ods.getFileName(dataSubsetTitle,'reduce',dimRedType,self.currentReductionParameters,plotParameterDict=ipe.getDropdownValues(dropdownVarsDict),fileExtension='.png')[0],bbox_inches='tight')).grid(row=3,column=0)\n tk.Button(self,text='Save dimensional reduction',command=lambda: exportDimRed(),font='Helvetica 14 bold').grid(row=3,column=1,columnspan=2)\n plotAllDimReds = tk.BooleanVar()\n plotAllDimRedsButton = tk.Checkbutton(self,text='Save all feature-colored dimension reduction plots?',variable=plotAllDimReds)\n plotAllDimRedsButton.select()\n plotAllDimRedsButton.grid(row=4,column=1,columnspan=2)\n \n def okCommand():\n exportDimRed()\n master.switch_frame(backpage,folderName,secondaryhomepage)\n\n #Default save and quit buttons\n buttonWindow = tk.Frame(self)\n buttonWindow.grid(row=6,column=0,columnspan=2)\n try:\n k=backpage\n except NameError: \n backpage,folderName,secondaryhomepage = pickle.load(open('misc/dimRedPlottingParamList.pkl','rb'))\n \n tk.Button(buttonWindow, text=\"OK\",command=lambda: master.switch_frame(backpage,folderName,secondaryhomepage)).grid(row=0,column=0)\n tk.Button(buttonWindow, text=\"Back\",command=lambda: master.switch_frame(DimensionReductionHomePage,folderName,backpage,secondaryhomepage)).grid(row=0,column=1)\n tk.Button(buttonWindow, text=\"Quit\",command=lambda: quit()).grid(row=0,column=2)\n\n","sub_path":"programs/analysis/dimensionReductionGUI.py","file_name":"dimensionReductionGUI.py","file_ext":"py","file_size_in_byte":17292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"59037725","text":"from flask import Flask\nfrom flask import request\nimport json\nimport time\nimport requests\nfrom collections import defaultdict\n\nclients = defaultdict(dict)\nurls = ['http://google.com']\napp = Flask(__name__)\napp.debug = True\n\n\n@app.route('/')\ndef root():\n return app.send_static_file('index.html')\n\n\n@app.route('/<path:path>')\ndef static_proxy(path):\n return app.send_static_file(path)\n\n\n@app.route('/clients')\ndef client():\n return json.dumps(clients)\n\n\n@app.route('/urls', methods=['POST', 'GET'])\ndef url():\n global urls\n if request.method == 'POST':\n form = json.loads(request.data)\n if form['url'].startswith('http'):\n urls.append(form['url'])\n else:\n return json.dumps({'error': 'not url'})\n urls = list(set(urls))\n return json.dumps(urls), 200\n\n\n@app.route('/listen', methods=['POST', 'GET'])\ndef listen():\n global clients\n if request.method == 'POST':\n clients[request.remote_addr]['heartbeat'] = time.time()\n form = request.form\n clients[request.remote_addr]['cpuload'] = form['cpuload']\n clients[request.remote_addr]['hostname'] = form['hostname']\n return 'OK', 200\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5555)\n","sub_path":"router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"518715135","text":"import os\nimport sys\n\nmax_col = 39\nmax_row = 22\nselect = 1\n\nclass File:\n\tdef __init__(self, path):\n\t\tself.path = path\n\n\thead = 0\n\tpath = \"\"\n\tstream = []\n\t\n\tdef read_file(self):\n\t\ttry:\n\t\t\twith open(self.path) as fp:\n\t\t\t\tself.stream = fp.readlines()\n\t\texcept:\n\t\t\tprint(\"Wrong file!\")\n\t\t\tsys.exit(0)\n\n\t\tfor i in range(0, len(self.stream)):\n\t\t\tself.stream[i] = self.stream[i][:-1]\n\ndef get_col():\n\tsz = os.get_terminal_size()\n\treturn sz.columns\n\ndef get_row():\n\tsz = os.get_terminal_size()\n\treturn sz.lines\n\ndef print_half(File, i):\n\tline = \"\"\n\tif len(File.stream) > File.head + i:\n\t\tline = File.stream[File.head + i]\n\t\t\t\n\t\tif len(line) < max_col:\n\t\t\tj = len(line)\n\t\t\twhile j < max_col:\n\t\t\t\tline += \" \"\n\t\t\t\tj +=1\n\telse:\n\t\tj = 0\n\t\twhile j < max_col:\n\t\t\tline += \" \"\n\t\t\tj +=1\n\treturn line[:max_col]\n\ndef print_file(File1, File2):\n\tstr1 = \"\"\n\tstr2 = \"\"\n\n\tfor i in range(0, max_row):\n\t\tstr1 = print_half(File1, i)\n\t\tstr2 = print_half(File2, i)\n\n\t\tprint(str1, \"|\", str2)\n\t\n\ti = 0\n\thr = \"\"\n\twhile i < max_col * 2:\n\t\thr += \"-\"\n\t\ti +=1\n\tprint(hr)\n\nif __name__ == '__main__':\n\tmax_col = int(get_col() / 2 -2)\n\tmax_row = int(get_row() -2)\n\n\tpath1 = input(\"Input file1 path : \")\n\tFile1 = File(path1)\n\tFile1.read_file()\n\n\tpath2 = input(\"Input file2 path : \")\n\tFile2 = File(path2)\n\tFile2.read_file()\n\n\tcmd = \"\"\n\twhile cmd != \"q\":\n\t\tprint_file(File1, File2)\n\n\t\tif select == 1:\n\t\t\tfile_name = path1 + \" ] \"\n\t\telse:\n\t\t\tfile_name = path2 + \" ] \"\n\t\tcmd = input(file_name)\n\n\t\tif select == 1:\n\t\t\tif cmd == \"w\" or cmd == \"\\x1b[A\":\n\t\t\t\tif File1.head != 0:\n\t\t\t\t\tFile1.head -=1\n\n\t\t\tif cmd == \"s\" or cmd == \"\\x1b[B\":\n\t\t\t\tFile1.head +=1\n\n\t\telse:\n\t\t\tif cmd == \"w\" or cmd == \"\\x1b[A\":\n\t\t\t\tif File2.head != 0:\n\t\t\t\t\tFile2.head -=1\n\n\t\t\tif cmd == \"s\" or cmd == \"\\x1b[B\":\n\t\t\t\tFile2.head +=1\n\n\t\tif cmd == \"a\" or cmd == \"\\x1b[D\":\n\t\t\tselect = 1\n\n\t\tif cmd == \"d\" or cmd == \"\\x1b[C\":\n\t\t\tselect = 2\n\t\n\tos.system(\"clear\")\n\t\t\n\t\n\n","sub_path":"doubleViewer/doubleViewer.py","file_name":"doubleViewer.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"191401108","text":"import sys\r\nfrom socket import timeout\r\ntry:\r\n import urllib.request as urllib_request\r\n from urllib.parse import urlencode\r\n from urllib.error import URLError\r\nexcept ImportError: # Python 2\r\n import urllib2 as urllib_request\r\n from urllib2 import URLError\r\n from urllib import urlencode\r\n\r\nclass SendGridClient(object):\r\n\r\n \"\"\"SendGrid API.\"\"\"\r\n\r\n def __init__(self, username, password, **opts):\r\n \"\"\"\r\n Construct SendGrid API object.\r\n\r\n Args:\r\n username: SendGrid username\r\n password: SendGrid password\r\n user: Send mail on behalf of this user (web only)\r\n\r\n \"\"\"\r\n self.username = username\r\n self.password = password\r\n self.host = opts.get('host', 'https://api.sendgrid.com')\r\n self.port = opts.get('port', '443')\r\n self.endpoint = '/api/mail.send.json'\r\n self.mail_url = self.host + ':' + self.port + self.endpoint\r\n # urllib cannot connect to SSL servers using proxies\r\n self.proxies = opts.get('proxies', None)\r\n\r\n def _build_body(self, message):\r\n if sys.version_info < (3,0):\r\n message.from_email = message.from_email.encode('utf-8')\r\n message.from_name = message.from_name.encode('utf-8')\r\n message.subject = message.subject.encode('utf-8')\r\n message.text = message.text.encode('utf-8')\r\n message.html = message.html.encode('utf-8')\r\n message.reply_to = message.reply_to.encode('utf-8')\r\n\r\n\r\n values = {\r\n 'api_user': self.username,\r\n 'api_key': self.password,\r\n 'to[]': message.to,\r\n 'toname[]': message.to_name,\r\n 'bcc[]': message.bcc,\r\n 'from': message.from_email,\r\n 'fromname': message.from_name,\r\n 'subject': message.subject,\r\n 'text': message.text,\r\n 'html': message.html,\r\n 'replyto': message.reply_to,\r\n 'headers': message.headers,\r\n 'date': message.date,\r\n 'x-smtpapi': message.json_string()\r\n }\r\n for k in list(values.keys()):\r\n if not values[k]:\r\n del values[k]\r\n for filename in message.files:\r\n if message.files[filename]:\r\n values['files[' + filename + ']'] = message.files[filename]\r\n return values\r\n\r\n def send(self, message):\r\n try:\r\n if self.proxies:\r\n proxy_support = urllib_request.ProxyHandler(self.proxies)\r\n opener = urllib_request.build_opener(proxy_support)\r\n urllib_request.install_opener(opener)\r\n data = urlencode(self._build_body(message), True).encode('utf-8')\r\n req = urllib_request.Request(self.mail_url, data)\r\n response = urllib_request.urlopen(req, timeout = 10)\r\n body = response.read()\r\n return response.getcode(), body\r\n except URLError as e:\r\n return e.code, e.read()\r\n except timeout as e:\r\n return 408, e\r\n","sub_path":"flask/lib/python2.7/site-packages/sendgrid/sendgrid.py","file_name":"sendgrid.py","file_ext":"py","file_size_in_byte":3072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"390401082","text":"import os\nimport subprocess\nimport configparser\nimport re\n\ndef find_ansible_config_file():\n cfg_files = []\n for root, _, files in os.walk(\".\", topdown=False):\n for f in files:\n if f == \"ansible.cfg\":\n full_path = os.path.join(root, f)\n distance = len(full_path.split('/'))\n cfg_files.append({'path': full_path, 'distance': distance})\n if cfg_files:\n cfg_files.sort(key=lambda v: v[\"distance\"])\n return cfg_files[0]['path']\n return \"./\"\n\n\n\ndef list_vault_identities():\n config_file = find_ansible_config_file()\n if not config_file:\n return None\n config = configparser.ConfigParser()\n config.read(config_file)\n identity_list_line = config[\"defaults\"][\"vault_identity_list\"]\n # Extract possible options\n # vault_identity_list = dev@./.dev_vault , test@./.test_vault\n vault_ids = re.findall(\"(\\w+)@\", identity_list_line)\n return vault_ids\n\n\ndef encrypt():\n if is_encrypted():\n print(\"File is already encrypted\")\n return\n\n current_buffer = vim.current.buffer.name\n\n ansible_dir = os.path.dirname(find_ansible_config_file())\n if ansible_dir != '.':\n vault_ids = list_vault_identities()\n vault_ids_str = \", \".join(vault_ids)\n vault_id = vim.eval(f'input(\"Enter the vault-id ({vault_ids_str})> \")')\n if vault_id not in vault_ids:\n print(f\"{vault_id} is not in {vault_ids}\")\n return\n cmd = f\"ansible-vault encrypt --encrypt-vault-id {vault_id} {current_buffer}\"\n else:\n current_buffer = '\\n'.join(vim.current.buffer[:])\n cmd = f\"echo '{current_buffer}' | ansible-vault encrypt --output=-\"\n result = subprocess.run(\n cmd, shell=True, cwd=ansible_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n if result.returncode != 0:\n print(result.stderr)\n return\n vim.current.buffer[:] = result.stdout.splitlines()\n\n\ndef decrypt():\n if not is_encrypted():\n print(\"File is already decrypted\")\n return\n\n ansible_dir = os.path.dirname(find_ansible_config_file())\n current_buffer = '\\n'.join(vim.current.buffer[:])\n #current_buffer = vim.current.buffer.name\n cmd = f\"echo '{current_buffer}' | ansible-vault decrypt --output=-\"\n result = subprocess.run(\n cmd, shell=True, cwd=ansible_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n\n if result.returncode != 0:\n print(f'echoerr \"{result.stderr}\"')\n return\n vim.current.buffer[:] = result.stdout.splitlines()\n\n\ndef is_encrypted():\n for row in vim.current.buffer[:]:\n if \"$ANSIBLE_VAULT\" in row:\n return True\n return False\n","sub_path":"lib/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"268213629","text":"# -*- coding: utf-8 -*-\n# Create your views here.\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import auth\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.template.context import RequestContext\nfrom django.http import Http404, HttpResponseRedirect\n\nfrom books.models import *\nfrom books.forms import *\n\ndef home(request):\n books = Book.objects.all()\n\n return render_to_response('books.html',\n {'books':books},\n context_instance=RequestContext(request))\n\ndef login(request):\n if 'next' in request.GET and request.GET['next']:\n next = request.GET['next']\n else:\n next = None\n\n form = LoginForm(request.POST or None)\n\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n\n user = auth.authenticate(username=username, password=password)\n\n if user and user.is_active:\n auth.login(request, user)\n\n if next:\n return HttpResponseRedirect(next)\n else:\n return HttpResponseRedirect('/')\n\n return render_to_response('login.html', {\n 'title' : u'Авторизация',\n 'form' : form,\n }, context_instance=RequestContext(request))\n\ndef logout(request):\n auth.logout(request)\n\n return HttpResponseRedirect(reverse('home'))\n\n@login_required\ndef book(request, book_id):\n book = get_object_or_404(Book, id = book_id)\n\n return render_to_response('book.html',\n {'book':book},\n context_instance=RequestContext(request))\n\n","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"351372224","text":"\"\"\"Un objet est issu d'une classe.\nLa classe est une forme de type de donnée, sauf qu'elle permet de définir des fonctions et variables propres au type.\nLes fonctions définies dans une classe sont appelées des méthodes.\nOn appelle une méthode d'un objet grâce à : objet.methode()\"\"\"\n\n# Exemple\nchaine = str() # Crée une chaîne vide\n# On aurait obtenu le même résultat en tapant chaine = \"\"\n\nwhile chaine.lower() != \"q\":\n\tprint(\"Tapez 'Q' pour quitter...\")\n\tchaine = input()\nprint(\"Merci.\")\n\n# Format. Première syntaxe.\nprenom = \"Philippe\"\nnom = \"Bihin\"\nage = 47\nprint(\"Je m'appelle {0} {1} ({3} {0} pour l'administration) et j'ai {2} ans.\".format(prenom, nom, age, nom.upper()))\n\n# Autre exemple sans chiffre dans les accolades\ndate = \"Dimanche 24 juillet 2011\"\nheure = \"17:00\"\nprint(\"Cela s'est produit le {}, à {}.\".format(date, heure))\n\n# Format. Deuxième syntaxe\n# exemple de formatage d'une adresse\nadresse = \"\"\"\n{no_rue} {nom_rue}\n {code_postal} {nom_ville} ({pays})\n\"\"\".format(no_rue=5, nom_rue=\"rue des Postes\", code_postal=75003, nom_ville=\"Paris\", pays=\"France\")\nprint(adresse)\n\n","sub_path":"cours/objets.py","file_name":"objets.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"621404738","text":"from scrapy.crawler import Crawler\nfrom scrapy import signals\n\nfrom .http import SeleniumRequest\nfrom .drivers import DriverPool\n\nclass RequestTypeMiddleware:\n def __init__(self, drivers_number: int, gfw, mode, ua):\n self.driver_pool = DriverPool(drivers_number, gfw, mode, ua)\n\n @classmethod\n def from_crawler(cls, crawler: Crawler):\n drivers_number = crawler.settings.getint(\"SELENIUM_POOL_SIZE\", 1)\n gfw_proxy = crawler.settings.get('GFW_PROXY')\n REQUEST_TYPE = crawler.settings.get('REQUEST_TYPE')\n user_agent = crawler.settings.get('USER_AGENT')\n middleware = cls(drivers_number,gfw_proxy,REQUEST_TYPE,user_agent)\n crawler.signals.connect(middleware.spider_closed, signals.spider_closed)\n return middleware\n\n def process_request(self, request: SeleniumRequest, spider):\n try:\n request.is_selenium\n except AttributeError:\n return None\n return self.driver_pool.get_response(request)\n\n def spider_closed(self, spider):\n self.driver_pool.close()\n","sub_path":"scrapy_async_selenium/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"613439036","text":"#!/usr/bin/env python\n\nfrom __future__ import (absolute_import, division, print_function, unicode_literals)\n\nimport os\nimport pytest\nimport datetime as dt\nimport numpy as np\nimport datetime\nimport netCDF4 as nc\nimport pprint as pp\nfrom gridded.grids import Grid, Grid_U, Grid_S, Grid_R\n\n\n@pytest.fixture()\ndef sg_data():\n base_dir = os.path.dirname(__file__)\n s_data = os.path.join(base_dir, 'data')\n filename = os.path.join(s_data, 'staggered_sine_channel.nc')\n return filename, nc.Dataset(filename)\n\n@pytest.fixture()\ndef sg_topology():\n return None\n\n@pytest.fixture()\ndef sg():\n return Grid.from_netCDF(sg_data()[0], sg_data()[1], grid_topology=sg_topology())\n\n@pytest.fixture()\ndef ug_data():\n base_dir = os.path.dirname(__file__)\n s_data = os.path.join(base_dir, 'data')\n filename = os.path.join(s_data, 'tri_ring.nc')\n return filename, nc.Dataset(filename)\n\n@pytest.fixture()\ndef ug_topology():\n return None\n\n@pytest.fixture()\ndef ug():\n return Grid.from_netCDF(ug_data()[0], ug_data()[1], grid_topology=ug_topology())\n\n\nclass TestPyGrid_S:\n def test_construction(self, sg_data, sg_topology):\n filename = sg_data[0]\n dataset = sg_data[1]\n grid_topology = sg_topology\n sg = Grid_S.from_netCDF(filename, dataset, grid_topology=grid_topology)\n assert sg.filename == filename\n\n sg2 = Grid_S.from_netCDF(filename)\n assert sg2.filename == filename\n\n sg3 = Grid.from_netCDF(filename, dataset, grid_topology=grid_topology)\n sg4 = Grid.from_netCDF(filename)\n print(sg3.shape)\n print(sg4.shape)\n assert sg == sg3\n assert sg2 == sg4\n\n\nclass TestPyGrid_U:\n def test_construction(self, ug_data, ug_topology):\n filename = ug_data[0]\n dataset = ug_data[1]\n grid_topology = ug_topology\n ug = Grid_U.from_netCDF(filename, dataset, grid_topology=grid_topology)\n# assert ug.filename == filename\n# assert isinstance(ug.node_lon, nc.Variable)\n# assert ug.node_lon.name == 'lonc'\n\n ug2 = Grid_U.from_netCDF(filename)\n assert ug2.filename == filename\n# assert isinstance(ug2.node_lon, nc.Variable)\n# assert ug2.node_lon.name == 'lon'\n\n ug3 = Grid.from_netCDF(filename, dataset, grid_topology=grid_topology)\n ug4 = Grid.from_netCDF(filename)\n print(ug3.shape)\n print(ug4.shape)\n assert ug == ug3\n assert ug2 == ug4\n\n\n@pytest.fixture()\ndef rg_data():\n lons = np.array((0,10,20,30,40,55))\n lats = np.array((0,2,3,4,5,7,9))\n return lons, lats\n\n@pytest.fixture()\ndef example_rg():\n lons = np.array((0,10,20,30,40,55))\n lats = np.array((0,2,3,4,5,7,9))\n rg = Grid_R(node_lon=lons,\n node_lat=lats)\n return rg\n\nclass TestGrid_R:\n def test_construction(self, rg_data):\n node_lon = rg_data[0]\n node_lat = rg_data[1]\n rg = Grid_R(node_lon=node_lon,\n node_lat=node_lat)\n\n def test_locate_faces(self, example_rg):\n points = np.array(([5,1],[6,1],[7,1],[-1,0],[42,0]))\n idxs = example_rg.locate_faces(points)\n answer = np.array(([0,0],[0,0],[0,0],[-1,-1],[4,0]))\n assert np.all(idxs == answer)\n\n points = np.array([5,1])\n idxs = example_rg.locate_faces(points)\n answer = np.array([0,0])\n assert np.all(idxs == answer)\n\n def test_interpolation(self, example_rg):\n example_rg.node_lon = np.array([0,1,2,5])\n example_rg.node_lat = np.array([0,1,2,12])\n points = np.array(([0.5,0.5],[3.5,2],[-1,0],[0,-1]))\n v1 = np.mgrid[0:4,0:4][1]\n val = example_rg.interpolate_var_to_points(points, v1, method='linear')\n assert np.all(np.isclose(val,np.array([0.5,2,0,0])))\n\n points = np.array([3.5,2])\n val = example_rg.interpolate_var_to_points(points, v1, method='linear')\n assert np.all(np.isclose(val,np.array([2])))\n","sub_path":"gridded/tests/test_grid.py","file_name":"test_grid.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"118518061","text":"import json\n\nfrom nose.tools import eq_\n\nfrom infoeducatie.pages.tests.base_pages_test import PageSetupTest\nfrom infoeducatie.extensions import db\n\nfrom utils.tests import fixtures\n\nload_fixture = fixtures('pages', 'page.retrieve')\n\nclass PagesRetrieveTest(PageSetupTest):\n db = db\n\n def test_page_retrieve_with_valid_data(self):\n '''\n Basic page retrieval with valid data\n '''\n response = self.client.get(\"/api/1/pages/1\", content_type = 'application/json')\n assert_response = load_fixture('response_valid_data')\n\n eq_(json.loads(response.data), assert_response)\n eq_(response.status_code, 200)\n\n def test_page_retrieve_missing_page(self):\n '''\n Retrieve not existing page\n '''\n response = self.client.get(\"/api/1/pages/123\", content_type = 'application/json')\n assert_response = load_fixture('response_missing_page')\n\n eq_(json.loads(response.data), assert_response)\n eq_(response.status_code, 404)","sub_path":"infoeducatie/pages/tests/test_retrieve_page.py","file_name":"test_retrieve_page.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"503083980","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom util import *\n\n#не производить реальную рассылку - только выводить на экран\nTEST = False\nONE = False #послать 1 sms-ку\nalready_sent = False\none_phone = '79276064175' #ilnur\n#one_phone = '79379903835' #deyarov\n\n#если services пустые = (), то рассылка идёт по всем 3м - услугам(интернет, TV, phone)\n#если не массив не пустой то только по указанным\n#services = (u'интернет', u'телевидение', u'телефон')\nservices = (u'телевидение',)\n\n#smtp\ndelay = 5\nsmtprelay = '10.0.0.2'\nfromaddr = \"deyarov@egs63.ru\"\n#toaddrs = \"sm00th1980@mail.ru\"\ntoaddrs = \"send@send.smsc.ru\"\nctype = \"plain\"\ncharset = 'utf-8'\n\n#sms-center\nprefix = u'egs63:Holidex100:::0,0,EGS,0'\n\ntext_remember = u'%s:%s:Уважаемый абонент, напоминаем что на вашем договоре \\\nкабельного телевидения #%s образовалась задолженность в размере %.2f руб. \\\nВ случае неуплаты до 11.05.12 данная услуга будет отключена.'\n\n#database\nhost = '10.0.0.2'\nuser = 'bill'\npasswd = 'bgbilling'\ndb = 'bgbilling'\n\n#request\nabon_id = 22\nphone_id = 1\n\ncursor = connect(host=host, user=user, passwd=passwd, db=db)\nrecords = getRecords(cursor=cursor, abon_id=abon_id, phone_id=phone_id, year=2012, month=5)\n\nsendMgr = SendMgr(fromaddr = fromaddr, \\\n toaddrs = toaddrs, \\\n ctype = ctype, \\\n charset = charset, \\\n smtprelay = smtprelay, \\\n delay = delay, \\\n TEST = TEST, \\\n excluded_contracts = ())\n\nfor record in records:\n text = None\n phone = getMobilePhone8(record)\n\n if phone is not None:\n contract = record[0]\n fio = record[1]\n service = getTypeService(record)\n month = getMonth()\n balance = float(record[4])\n ap_charge = float(record[5])\n raz_charge = float(record[6])\n date_start = record[7]\n address = record[8]\n ap = getAP(cursor=cursor, contract=contract, service=service)\n\n #рассылаем только по указанным услугам\n if service in services:\n if ap is not None and isDebitor(record):\n if abs(ap) <= abs(balance):\n text = text_remember % ( prefix, phone, contract, -1*balance )\n\n if text is not None:\n if ONE:\n if not already_sent:\n text = text_remember % ( prefix, one_phone, contract, -1*balance )\n\n sendMgr.setText(text)\n\n #рассылаем по всем контрактам в данной услуге\n sendMgr.send(contract=contract)\n already_sent = True\n else:\n pass \n else:\n sendMgr.setText(text)\n\n #рассылаем по всем контрактам в данной услуге\n sendMgr.send(contract=contract)\n","sub_path":"sms_tv.py","file_name":"sms_tv.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"646757823","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 8 23:51:39 2021\r\n\r\n@author: Raza_Jutt\r\n\"\"\"\r\n\r\nimport random \r\n\r\ncards = [x for x in range(1,53)]\r\n\r\nlabels = {1: \"Ace\",\r\n 2: \"2\",\r\n 3: \"3\",\r\n 4: \"4\",\r\n 5: \"5\",\r\n 6: \"6\",\r\n 7: \"7\",\r\n 8: \"8\",\r\n 9: \"9\",\r\n 10: \"10\",\r\n 11: \"Jack\",\r\n 12: \"Queen\",\r\n 13: \"King\"}\r\n\r\ncard_type = { 1 : \"hearts \", \r\n 2 : \"clubs \", \r\n 3 : \"diamonds\", \r\n 4 : \"spades \"}\r\n\r\ndef display_Cards(cards):\r\n for i in cards:\r\n if i<=13 :\r\n print(card_type[1]+\" ----- \"+labels[i])\r\n elif i>13 and i<=26:\r\n print(card_type[2]+\" ----- \"+labels[i-13])\r\n elif i>26 and i<=39:\r\n print(card_type[3]+\" ----- \"+labels[i-26])\r\n else:\r\n print(card_type[4]+\" ----- \"+labels[i-39])\r\n \r\n \r\ndef shaffle_deck(display=0): # display 0=don't show and 1=show cards \r\n random.shuffle(cards)\r\n if display:\r\n print(\"_____Shuffled Deck_____\")\r\n display_Cards(cards)\r\n return cards\r\n \r\ndef consecutive_Queen(cards):\r\n for i in range(0,51):\r\n if cards[i]==12 or cards[i] ==25 or cards[i] ==38 or cards[i] ==51:\r\n if cards[i+1] == 12 or cards[i+1] == 25 or cards[i+1] == 38 or cards[i+1] == 51:\r\n return 1,i\r\n return 0,0\r\n\r\n#### Question A 2 Queen consecutively\r\n\r\ndef main(numberOFexperiments=1):\r\n print(\"#############################################\")\r\n count = 0\r\n for i in range(numberOFexperiments):\r\n shaffle_deck() ## Shaffle deck\r\n situation , atNumber = consecutive_Queen(cards)\r\n if situation:\r\n count += 1\r\n print(\" \")\r\n print(\" Experiment : \"+ str(i+1))\r\n print(\" At location: \"+ str(atNumber))\r\n print(\" \")\r\n display_Cards(cards)\r\n \r\n print(\" \")\r\n print(\" Number of Occurrence : \"+ str(count))\r\n print(\" Totall Experiment : \"+ str(numberOFexperiments))\r\n print(\" Probability of two Consecutive Queens : \",end=(\"\"))\r\n print(count/numberOFexperiments)\r\n print(\" \")\r\n\r\n\r\n\r\n#### Attention Plese ###\r\n### for Q2 Part: A call by default once experiment happend\r\n\r\nmain() # part A\r\n","sub_path":"Q2_A.py","file_name":"Q2_A.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"386533128","text":"# -*- coding: utf-8 -*-\r\n\r\n# Adenine (A), Thymine (T), Cytosine (C), Guanine (G), Uracil (U)\r\n# \r\n# DNA complementary base pairing rule\r\n# A -> T\r\n# C -> G\r\n# G -> C\r\n# T -> A\r\n\r\n# RNA complementary base pairing rule\r\n# A -> U\r\n# C -> G\r\n# G -> C\r\n# T -> A\r\n\r\n\r\ntemp_DNA = \"ATGCCGCTAAACTGACATXXTCAGATC\" # Template DNA Sequence\r\ntemp_RNA = \"AUGCCGCUAAACUGACAUXXUCAGAUC\" # use this sequence for the Revers \r\nreve_Sequence = '' # Reverse RNA Sequence\r\ncomp_RNA = '' # Complementary RNA Sequence\r\nreve_comp_RNA = '' # Reverse-Complement RNA Sequence\r\n\r\n#===== a- Reverse RNA Sequence ===============================================\r\n# You need to write code here to reverse the RNA sequenc\r\nreve_Sequence = temp_RNA[::-1]\r\nprint(temp_RNA)\r\nprint(reve_Sequence)\r\n#=============================================================================\r\n\r\n#===== b- Complementary RNA Sequence =========================================\r\ncomp_RNA = \"\" # Make sure the comp_RNA is empty\r\n# You need 'for' loob to read the template DNA sequenc and 'if'\r\n# statments to add the complement base to the 'comp_RNA'\r\nfor base in temp_DNA:\r\n if base == \"A\":\r\n comp_RNA = comp_RNA + 'U'\r\n elif base == \"C\":\r\n comp_RNA = comp_RNA + 'G'\r\n elif base == \"G\":\r\n comp_RNA = comp_RNA + 'C'\r\n elif base == \"T\":\r\n comp_RNA = comp_RNA + 'A'\r\n else:\r\n comp_RNA = comp_RNA + '?'\r\nprint(temp_DNA)\r\nprint(comp_RNA)\r\n# ============================================================================\r\n\r\n# c- Reverse-Complement RNA Sequence =========================================\r\nreve_comp_RNA = \"\" # Make sure reve_comp_RNA is empty\r\n# Like the previous code but in reverse order.\r\nfor base in temp_DNA:\r\n if base == \"A\":\r\n reve_comp_RNA = reve_comp_RNA + 'T'\r\n elif base == \"C\":\r\n reve_comp_RNA = reve_comp_RNA + 'G'\r\n elif base == \"G\":\r\n reve_comp_RNA = reve_comp_RNA + 'C'\r\n elif base == \"T\":\r\n reve_comp_RNA = reve_comp_RNA + 'A'\r\n else:\r\n reve_comp_RNA = reve_comp_RNA + '?'\r\nprint(temp_DNA)\r\nprint(reve_comp_RNA)\r\n# ============================================================================","sub_path":"task_7_solution.py","file_name":"task_7_solution.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"230313790","text":"from flask import *\nfrom stockmarketpredictingapp import *\n#from flask_sqlalchemy import SQLAlchemy\napp = Flask(__name__)\n#app.config['SQLALCHEMY_DATA']\n\n#@app.route('/', methods = ['POST'])\n#def homepage():\n# if request.method == 'POST':\n# form = request.form #json format? ish\n# stock = form['stock'] #id in html\n# return render_template('index.html')\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/', methods = ['POST'])\ndef getStock():\n stock = request.form['stock'] #id in html\n startdate = request.form['startdate']\n enddate = request.form['enddate']\n prediction = predict(stock, startdate, enddate)\n \n return render_template('result.html', stock = stock, prediction = prediction, enddate = enddate)\n\n@app.route('/<name>', methods = ['POST', 'GET'])\ndef getResult(name):\n task.content = request.form['submit']\n try:\n return redirect('/')\n except:\n return 'There was an issue returning to the home page'\n\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"191073133","text":"# _________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright (c) 2014 Sandia Corporation.\n# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,\n# the U.S. Government retains certain rights in this software.\n# This software is distributed under the BSD License.\n# _________________________________________________________________________\n\nimport sys\nimport gc\nimport random\nimport weakref\nimport posixpath\n\nfrom math import fabs, ceil\nimport copy\n\nfrom pyomo.core import *\nfrom pyomo.repn import GeneralCanonicalRepn\nfrom pyomo.pysp.phutils import *\nfrom pyomo.core.base import BasicSymbolMap, CounterLabeler\n\nfrom six import iterkeys, iteritems, itervalues, advance_iterator, PY3\nfrom six.moves import xrange\nusing_py3 = PY3\n\nclass ScenarioTreeInstanceFactory(object):\n\n def __init__(self, model, data, verbose=False):\n\n self._model_spec = model\n self._data_spec = data\n self._verbose = verbose\n\n self._model_directory = None\n self._model_filename = None\n self._model_archive = None\n self._data_directory = None\n self._data_filename = None\n self._data_archive = None\n self._tmpdirs = []\n # Define the above by inspecting model_spec and data_spec\n try:\n self._extract_model_and_data_locations()\n except:\n self.close()\n raise\n\n self._model_object = None\n self._model_callback = None\n self._scenario_tree_instance = None\n self._scenario_tree = None\n # Define the above by inspecting self._model_filename\n try:\n self._import_model_and_data()\n except:\n self.close()\n raise\n\n def __getstate__(self):\n self.close()\n raise NotImplementedError(\"Do not deepcopy or serialize this class\")\n def __setstate__(self,d):\n self.close()\n raise NotImplementedError(\"Do not deepcopy or serialize this class\")\n\n def close(self):\n for dirname in self._tmpdirs:\n if os.path.exists(dirname):\n shutil.rmtree(dirname, True)\n if self._model_archive is not None:\n self._model_archive.close()\n if self._data_archive is not None:\n self._data_archive.close()\n\n def model_directory(self):\n return self._model_directory\n\n def data_directory(self):\n return self._data_directory\n\n #\n # construct a scenario instance - just like it sounds!\n #\n def _construct_scenario_instance(self,\n scenario_name,\n scenario_tree,\n preprocess=False,\n flatten_expressions=False,\n report_timing=False):\n\n if not scenario_tree.contains_scenario(scenario_name):\n raise ValueError(\"ScenarioTree does not contain scenario \"\n \"with name %s.\" % (scenario_name))\n\n scenario = scenario_tree.get_scenario(scenario_name)\n node_name_list = [n._name for n in scenario._node_list]\n\n if self._verbose:\n print(\"Creating instance for scenario=%s\" % (scenario_name))\n\n scenario_instance = None\n\n try:\n\n if self._model_callback is not None:\n\n assert self._model_object is None\n scenario_instance = self._model_callback(scenario_name, node_name_list)\n\n elif self._model_object is not None:\n\n if scenario_tree._scenario_based_data:\n\n scenario_data_filename = \\\n os.path.join(self._data_directory,\n str(scenario_name))\n # JPW: The following is a hack to support initialization\n # of block instances, which don't work with .dat\n # files at the moment. Actually, it's not that bad of\n # a hack - it just needs to be extended a bit, and\n # expanded into the node-based data read logic (where\n # yaml is completely ignored at the moment.\n if os.path.exists(scenario_data_filename+'.dat'):\n scenario_data_filename = scenario_data_filename + \".dat\"\n data = None\n elif os.path.exists(scenario_data_filename+'.yaml'):\n import yaml\n scenario_data_filename = scenario_data_filename + \".yaml\"\n yaml_input_file=open(scenario_data_filename,\"r\")\n data = yaml.load(yaml_input_file)\n yaml_input_file.close()\n else:\n raise RuntimeError(\"Cannot find the scenario data for \"\n + scenario_data_filename)\n if self._verbose:\n print(\"Data for scenario=%s loads from file=%s\"\n % (scenario_name, scenario_data_filename))\n if data is None:\n scenario_instance = \\\n self._model_object.create(filename=scenario_data_filename,\n preprocess=False,\n report_timing=report_timing)\n else:\n scenario_instance = \\\n self._model_object.create(data,\n preprocess=False,\n report_timing=report_timing)\n else:\n\n data_files = []\n for node_name in node_name_list:\n node_data_filename = \\\n os.path.join(self._data_directory,\n str(node_name)+\".dat\")\n if not os.path.exists(node_data_filename):\n raise RuntimeError(\"Node data file=\"\n +node_data_filename+\n \" does not exist or cannot be accessed\")\n data_files.append(node_data_filename)\n\n scenario_data = DataPortal(model=self._model_object)\n for data_file in data_files:\n if self._verbose:\n print(\"Node data for scenario=%s partially \"\n \"loading from file=%s\"\n % (scenario_name, data_file))\n scenario_data.load(filename=data_file)\n\n scenario_instance = self._model_object.create(scenario_data,\n preprocess=False,\n report_timing=report_timing)\n else:\n raise RuntimeError(\"Unable to construct scenario instance. \"\n \"Neither a reference model or callback \"\n \"is defined.\")\n\n if preprocess or flatten_expressions:\n scenario_instance.preprocess()\n\n if flatten_expressions:\n # IMPT: The model *must* be preprocessed in order for linearization to work. This is because\n # linearization relies on the canonical expression representation extraction routine,\n # which in turn relies on variables being identified/categorized (e.g., into \"Used\").\n scenario_instance.preprocess()\n linearize_model_expressions(scenario_instance)\n\n # apply each of the post-instance creation plugins. this\n # really shouldn't be associated (in terms of naming) with the\n # pyomo script - this should be rectified with a workflow\n # re-work. it is unclear how this interacts, or doesn't, with\n # the preprocessors.\n ep = ExtensionPoint(IPyomoScriptModifyInstance)\n for ep in ExtensionPoint(IPyomoScriptModifyInstance):\n ep.apply(options=None,\n model=reference_model,\n instance=scenario_instance)\n\n except:\n exception = sys.exc_info()[1]\n raise RuntimeError(\"Failed to create model instance \"\n \"for scenario=%s; Source error=%s\"\n % (scenario_name, exception))\n\n return scenario_instance\n\n def construct_instances_for_scenario_tree(self,\n scenario_tree,\n preprocess=False,\n flatten_expressions=False,\n report_timing=False):\n\n if scenario_tree._scenario_instance_factory is not self:\n raise RuntimeError(\"Can not construct scenario tree instances. \"\n \"The scenario tree was not generated by this \"\n \"instance factory.\")\n\n # the construction of instances takes little overhead in terms\n # of memory potentially lost in the garbage-collection sense\n # (mainly only that due to parsing and instance\n # simplification/prep-processing). to speed things along,\n # disable garbage collection if it enabled in the first place\n # through the instance construction process.\n # IDEA: If this becomes too much for truly large numbers of\n # scenarios, we could manually collect every time X\n # instances have been created.\n re_enable_gc = False\n if gc.isenabled() is True:\n re_enable_gc = True\n gc.disable()\n\n if scenario_tree._scenario_based_data:\n if self._verbose is True:\n print(\"Scenario-based instance initialization enabled\")\n else:\n if self._verbose is True:\n print(\"Node-based instance initialization enabled\")\n\n scenario_instances = {}\n for scenario in scenario_tree._scenarios:\n\n scenario_instance = \\\n self._construct_scenario_instance(\n scenario._name,\n scenario_tree,\n preprocess=preprocess,\n flatten_expressions=flatten_expressions)\n\n scenario_instances[scenario._name] = scenario_instance\n # name each instance with the scenario name\n scenario_instance.name = scenario._name\n\n if re_enable_gc is True:\n gc.enable()\n\n return scenario_instances\n\n def _extract_model_and_data_locations(self):\n\n model_filename = None\n model_archive = None\n model_archive_inputs = (None,None)\n model_unarchived_dir = None\n try:\n # un-archive the model directory if necessary\n normalized_model_spec = None\n model_archive_subdir = None\n modelname = \"ReferenceModel.py\"\n if not os.path.exists(self._model_spec):\n normalized_model_spec, _, model_archive_subdir = \\\n ArchiveReader.normalize_name(self._model_spec).rpartition(',')\n if model_archive_subdir.endswith('.py') or \\\n model_archive_subdir.endswith('.pyc'):\n modelname = os.path.basename(model_archive_subdir)\n model_archive_subdir = os.path.dirname(model_archive_subdir)\n model_archive_subdir = model_archive_subdir.strip()\n if model_archive_subdir == '':\n model_archive_subdir = None\n else:\n normalized_model_spec = ArchiveReader.normalize_name(self._model_spec)\n\n if ArchiveReader.isArchivedFile(normalized_model_spec):\n model_archive = ArchiveReaderFactory(normalized_model_spec,\n subdir=model_archive_subdir)\n self._model_archive = model_archive\n model_archive_inputs = (normalized_model_spec,model_archive_subdir)\n model_unarchived_dir = model_archive.normalize_name(\n tempfile.mkdtemp(prefix='pysp_unarchived',\n dir=os.path.dirname(normalized_model_spec)))\n self._tmpdirs.append(model_unarchived_dir)\n print(\"Model directory unarchiving to: %s\" % model_unarchived_dir)\n model_archive.extractall(path=model_unarchived_dir)\n model_filename = \\\n posixpath.join(model_unarchived_dir, modelname)\n else:\n if model_archive_subdir is not None:\n model_unarchived_dir = posixpath.join(normalized_model_spec,\n model_archive_subdir)\n else:\n model_unarchived_dir = normalized_model_spec\n\n if not os.path.isfile(model_unarchived_dir):\n model_filename = \\\n posixpath.join(model_unarchived_dir, modelname)\n else:\n model_filename = model_unarchived_dir\n if not os.path.exists(model_filename):\n raise RuntimeError(\"Model input does not exist: \"\n +str(model_filename))\n except:\n print(\"***ERROR: Failed to locate reference \"\n \"model file with specification string: \"\n +self._model_spec)\n raise\n\n self._model_filename = model_filename\n self._model_directory = os.path.dirname(model_filename)\n\n data_filename = None\n data_archive = None\n try:\n # un-archive the data directory if necessary\n normalized_data_spec = None\n data_archive_subdir = None\n data_unarchived_dir = None\n dataname = \"ScenarioStructure.dat\"\n if not os.path.exists(self._data_spec):\n normalized_data_spec, _, data_archive_subdir = \\\n ArchiveReader.normalize_name(self._data_spec).rpartition(',')\n if data_archive_subdir.endswith('.dat'):\n dataname = os.path.basename(data_archive_subdir)\n data_archive_subdir = os.path.dirname(data_archive_subdir)\n data_archive_subdir = data_archive_subdir.strip()\n if data_archive_subdir == '':\n data_archive_subdir = None\n else:\n normalized_data_spec = ArchiveReader.normalize_name(self._data_spec)\n\n if ArchiveReader.isArchivedFile(normalized_data_spec):\n if (normalized_data_spec == model_archive_inputs[0]) and \\\n ((model_archive_inputs[1] is None) or \\\n ((data_archive_subdir is not None) and \\\n (data_archive_subdir.startswith(model_archive_inputs[1]+'/')))):\n # The scenario tree data has already been extracted with the\n # model archive, no need to extract again\n print(\"Data directory found in unarchived model directory\")\n data_unarchived_dir = model_unarchived_dir\n if data_archive_subdir is not None:\n if model_archive_inputs[1] is not None:\n data_unarchived_dir = \\\n posixpath.join(data_unarchived_dir,\n os.path.relpath(data_archive_subdir,\n start=model_archive_inputs[1]))\n else:\n data_unarchived_dir = posixpath.join(data_unarchived_dir,\n data_archive_subdir)\n else:\n data_archive = ArchiveReaderFactory(normalized_data_spec,\n subdir=data_archive_subdir)\n data_unarchived_dir = \\\n data_archive.normalize_name(\n tempfile.mkdtemp(prefix='pysp_unarchived',\n dir=os.path.dirname(normalized_data_spec)))\n self._tmpdirs.append(data_unarchived_dir)\n print(\"Data directory unarchiving to: %s\" % data_unarchived_dir)\n data_archive.extractall(path=data_unarchived_dir)\n\n data_filename = \\\n posixpath.join(data_unarchived_dir, dataname)\n else:\n if data_archive_subdir is not None:\n data_unarchived_dir = posixpath.join(normalized_data_spec,\n data_archive_subdir)\n else:\n data_unarchived_dir = normalized_data_spec\n\n if not os.path.isfile(data_unarchived_dir):\n data_filename = \\\n posixpath.join(data_unarchived_dir, dataname)\n else:\n data_filename = data_unarchived_dir\n if not os.path.exists(data_filename):\n raise RuntimeError(\"Scenario data input does not exist: \"\n +str(data_filename))\n except:\n print(\"***ERROR: Failed to locate scenario tree structure \"\n \"file with specification string: \"\n +self._data_spec)\n raise\n\n self._data_filename = data_filename\n self._data_directory = os.path.dirname(data_filename)\n\n def _import_model_and_data(self):\n from pyomo.pysp.ph import _OLD_OUTPUT\n if not _OLD_OUTPUT:\n module_name, model_import = load_external_module(self._model_filename)\n else:\n model_import = import_file(self._model_filename)\n\n self._model_object = None\n self._model_callback = None\n if \"pysp_instance_creation_callback\" in dir(model_import):\n callback = model_import.pysp_instance_creation_callback\n if not hasattr(callback,\"__call__\"):\n raise TypeError(\"'pysp_instance_creation_callback' object is not callable \"\n \"in model file: \"+self._model_filename)\n self._model_callback = callback\n elif \"model\" in dir(model_import):\n model = model_import.model\n if not isinstance(model,Block):\n raise TypeError(\"'model' object has incorrect type \"\n \"in model file: \"+self._model_filename)\n self._model_object = model\n else:\n raise AttributeError(\"No 'model' or 'pysp_instance_creation_callback' \"\n \"object found in model file: \"+self._model_filename)\n\n self._scenario_tree_instance = \\\n scenario_tree_model.create(filename=self._data_filename)\n\n def generate_scenario_tree(self,\n downsample_fraction=1.0,\n include_scenarios=None,\n bundles_file=None,\n random_bundles=None,\n random_seed=None):\n\n scenario_tree_instance = self._scenario_tree_instance\n bundles_file_path = None\n if bundles_file is not None:\n # we interpret the scenario bundle specification in one of\n # two ways. if the supplied name is a file, it is used\n # directly. otherwise, it is interpreted as the root of a\n # file with a .dat suffix to be found in the instance\n # directory.\n if os.path.exists(os.path.expanduser(bundles_file)):\n bundles_file_path = \\\n os.path.expanduser(bundles_file)\n else:\n bundles_file_path = \\\n os.path.join(self._data_directory,\n bundles_file+\".dat\")\n\n if self._verbose:\n if bundles_file_path is not None:\n print(\"Scenario tree bundle specification filename=\"\n +bundles_file_path)\n\n scenario_tree_instance = scenario_tree_instance.clone()\n scenario_tree_instance.Bundling._constructed = False\n scenario_tree_instance.Bundles._constructed = False\n scenario_tree_instance.BundleScenarios._constructed = False\n scenario_tree_instance.load(bundles_file_path)\n\n #\n # construct the scenario tree\n #\n scenario_tree = ScenarioTree(scenariotreeinstance=scenario_tree_instance,\n scenariobundlelist=include_scenarios)\n\n # compress/down-sample the scenario tree, if operation is\n # required. and the\\ option exists!\n if (downsample_fraction is not None) and \\\n (downsample_fraction < 1.0):\n scenario_tree.downsample(downsample_fraction,\n random_seed,\n self._verbose)\n\n #\n # create random bundles, if the user has specified such.\n #\n if (random_bundles is not None) and \\\n (random_bundles > 0):\n if bundles_file is not None:\n raise ValueError(\"Cannot specify both random \"\n \"bundles and a bundles filename\")\n\n num_scenarios = len(scenario_tree._scenarios)\n if random_bundles > num_scenarios:\n raise ValueError(\"Cannot create more random bundles \"\n \"than there are scenarios!\")\n\n print(\"Creating \"+str(random_bundles)+\n \" random bundles using seed=\"\n +str(random_seed))\n\n scenario_tree.create_random_bundles(self._scenario_tree_instance,\n random_bundles,\n random_seed)\n\n scenario_tree._scenario_instance_factory = self\n\n return scenario_tree\n\nclass ScenarioTreeNode(object):\n\n \"\"\" Constructor\n \"\"\"\n\n VARIABLE_FIXED = 0\n VARIABLE_FREED = 1\n\n def __init__(self, name, conditional_probability, stage):\n\n # self-explanatory!\n self._name = name\n\n # the stage to which this tree node belongs.\n self._stage = stage\n\n # defines the tree structure\n self._parent = None\n\n # a collection of ScenarioTreeNodes\n self._children = []\n\n # conditional on parent\n self._conditional_probability = conditional_probability\n\n # a collection of all Scenario objects passing through this node in the tree\n self._scenarios = []\n\n # the cumulative probability of scenarios at this node.\n # cached for efficiency.\n self._probability = 0.0\n\n # information relating to all variables blended at this node, whether\n # of the standard or derived varieties.\n self._variable_ids = {} # maps id -> (name, index)\n self._name_index_to_id = {} # maps (name,index) -> id\n self._variable_datas = {} # maps id -> list of (vardata,probability) across all scenarios\n\n # keep track of the variable indices at this node, independent of type.\n # this is useful for iterating. maps variable name to a list of indices.\n self._variable_indices = {}\n\n # variables are either standard or derived - but not both.\n # partition the ids into two sets, as we deal with these\n # differently in algorithmic and reporting contexts.\n self._standard_variable_ids = set()\n self._derived_variable_ids = set()\n # A temporary solution to help wwphextension and other code\n # for when pyomo instances no longer live on the master node\n # when using PHPyro\n self._discrete = set()\n\n # a list of _VarData objects, representing the cost variables\n # for each scenario passing through this tree node.\n # NOTE: This list actually contains tuples of\n # (_VarData, scenario-probability) pairs.\n self._cost_variable_datas = []\n\n # general use statistics for the variables at each node.\n # each attribute is a map between the variable name and a\n # parameter (over the same index set) encoding the corresponding\n # statistic computed over all scenarios for that node. the\n # parameters are named as the source variable name suffixed\n # by one of: \"NODEMIN\", \"NODEAVG\", and \"NODEMAX\".\n # NOTE: the averages are probability_weighted - the min/max\n # values are not.\n # NOTE: the parameter names are basically irrelevant, and the\n # convention is assumed to be enforced by whoever populates\n # these parameters.\n self._minimums = {}\n self._averages = {}\n self._maximums = {}\n # This gets pushed into PHXBAR on the instances\n self._xbars = {}\n # This gets pushed into PHBLEND on the instances\n self._blend = {}\n self._wbars = {}\n # node variables ids that are fixed (along with the value to fix)\n self._fixed = {}\n # variable ids currently out of sync with instance data\n # variable_id -> VARIABLE_FIXED | VARIABLE_FREED\n self._fix_queue = {}\n\n # solution (variable) values for this node. assumed to be distinct\n # from self._averages, as the latter are not necessarily feasible.\n # keys are variable ids.\n self._solution = {}\n\n #\n # given a set of scenario instances, compute the set of indices\n # for non-anticipative variables at this node, as defined by the\n # input match templates.\n #\n\n def updateVariableIndicesAndValues(self, variable_name, match_templates,\n derived=False, id_labeler=None, name_index_to_id_map=None):\n\n # ensure that the variable exists on each scenario instance,\n # and that there is at least one index match per template.\n\n # To avoid calling extractVariableIndices more than necessary\n # we take the last scenario in the next loop as our\n # \"representative\" scenario from which we use the\n # new_match_indices list\n new_match_indices = None\n var_component = {}\n symbolmap = {}\n scenario = None\n isVar = False\n for scenario in self._scenarios:\n\n scenario_instance = scenario._instance\n\n if scenario_instance is None:\n continue\n\n instance_variable = scenario_instance.find_component(variable_name)\n if instance_variable is None:\n raise RuntimeError(\"The component=%s associated with stage=%s \"\n \"is not present in instance=%s\"\n % (variable_name,\n self._stage._name,\n scenario_instance.name))\n isVar = (instance_variable.type() is Var)\n\n if derived is False:\n if not isVar:\n raise RuntimeError(\"The component=%s \"\n \"associated with stage=%s \"\n \"is present in instance=%s \"\n \"but is not a variable - type=%s\"\n % (variable_name,\n self._stage._name,\n scenario_instance.name,\n type(instance_variable)))\n else:\n if not (isVar or (instance_variable.type() is Expression)):\n raise RuntimeError(\"The derived component=%s \"\n \"associated with stage=%s \"\n \"is present in instance=%s \"\n \"but is not a Var or Expression \"\n \"- type=%s\"\n % (variable_name,\n self._stage._name,\n scenario_instance.name,\n type(instance_variable)))\n\n new_match_indices = []\n\n for match_template in match_templates:\n\n indices = extractVariableIndices(instance_variable, match_template)\n\n # validate that at least one of the indices in the\n # variable matches to the template - otherwise, the\n # template is bogus. with one exception: if the\n # variable is empty (the index set is empty), then\n # don't warn - the instance was designed this way.\n if (len(indices) == 0) and (len(instance_variable) > 0):\n raise ValueError(\"No indices match template=%s \"\n \"for variable=%s in scenario=%s\"\n % (match_template,\n variable_name,\n scenario._name))\n\n new_match_indices.extend(indices)\n\n var_component[scenario._name] = scenario_instance.find_component(variable_name)\n\n if (id_labeler is not None) or (name_index_to_id_map is not None):\n # Tag each instance with a ScenarioTreeSymbolMap. This\n # will allow us to identify common blended variables\n # within a node across scenario instances without\n # having to do an expensive name lookup each time.\n this_symbolmap = getattr(scenario_instance,\"_ScenarioTreeSymbolMap\", None)\n if this_symbolmap is None:\n this_symbolmap = scenario_instance._ScenarioTreeSymbolMap = BasicSymbolMap()\n symbolmap[scenario._name] = this_symbolmap\n\n # find a representative scenario instance belonging to (or\n # passing through) this node in the tree. the first scenario\n # is as good as any.\n # NOTE: At some point we should check that the index sets\n # across all scenarios at a node actually match for each\n # variable.\n\n self._variable_indices.setdefault(variable_name, []).extend(new_match_indices)\n\n # cache some stuff up-front - we're accessing these attributes a lot in the loops below.\n if derived == False:\n variable_ids_to_update = self._standard_variable_ids\n else:\n variable_ids_to_update = self._derived_variable_ids\n\n self_variable_ids = self._variable_ids\n self_variable_datas = self._variable_datas\n\n if (id_labeler is not None) or (name_index_to_id_map is not None):\n\n for index in sorted(new_match_indices):\n\n # create the ScenarioTree integer id for this variable\n # across all scenario instances, or look it up if a\n # map has been provided.\n scenario_tree_id = None\n if id_labeler != None:\n scenario_tree_id = id_labeler()\n elif name_index_to_id_map != None:\n scenario_tree_id = name_index_to_id_map[variable_name, index]\n\n variable_ids_to_update.add(scenario_tree_id)\n\n self_variable_ids[scenario_tree_id] = (variable_name,index)\n self._name_index_to_id[(variable_name,index)] = scenario_tree_id\n self_variable_datas[scenario_tree_id] = []\n for scenario in self._scenarios:\n vardata = var_component[scenario._name][index]\n symbolmap[scenario._name].updateSymbol(vardata,scenario_tree_id)\n self_variable_datas[scenario_tree_id].append((vardata, scenario._probability))\n # We are trusting that each instance variable has the same\n # domain (as we always do)\n if isVar:\n rep_domain = self_variable_datas[scenario_tree_id][0][0].domain\n if isinstance(rep_domain, IntegerSet) or \\\n isinstance(rep_domain, BooleanSet):\n self._discrete.add(scenario_tree_id)\n\n #\n # same as the above, but specialized to cost variables.\n #\n\n def updateCostVariableIndexAndValue(self, cost_variable_name, cost_variable_index):\n\n # ensure that the cost variable exists on each scenario\n # instance, and that the index is valid. if so, add it to the\n # list of _VarDatas for scenarios at this tree node.\n for scenario in self._scenarios:\n scenario_instance = scenario._instance\n cost_variable = scenario_instance.find_component(cost_variable_name)\n\n if cost_variable is None:\n raise ValueError(\"Cost variable=%s associated with \"\n \"stage=%s is not present in model=%s; \"\n \"scenario tree construction failed\"\n % (cost_variable_name,\n self._stage._name,\n scenario_instance.name))\n if not cost_variable.type() in [Var,Expression]:\n raise RuntimeError(\"The component=%s associated with stage=%s \"\n \"is present in model=%s but is not a \"\n \"variable or expression - type=%s\"\n % (cost_variable_name,\n self._stage._name,\n scenario_instance.name,\n cost_variable.type()))\n if cost_variable_index not in cost_variable:\n raise RuntimeError(\"The index %s is not defined for cost \"\n \"variable=%s on model=%s\"\n % (cost_variable_index,\n cost_variable_name,\n scenario_instance.name))\n self._cost_variable_datas.append((cost_variable[cost_variable_index],scenario._probability))\n\n #\n # given a set of scenario instances, compute the set of indices being\n # blended for each variable at this node. populates the _variable_indices\n # and _variable_values attributes of a tree node.\n #\n\n def populateVariableIndicesAndValues(self,\n id_labeler=None,\n name_index_to_id_map=None,\n initialize_solution_data=True):\n\n self._variable_indices = {}\n self._variable_datas = {}\n self._standard_variable_ids = set()\n self._derived_variable_ids = set()\n\n stage_variables = self._stage._variables\n for variable_name in sorted(iterkeys(stage_variables)):\n self.updateVariableIndicesAndValues(variable_name,\n stage_variables[variable_name],\n derived=False,\n id_labeler=id_labeler,\n name_index_to_id_map=name_index_to_id_map)\n\n stage_derived_variables = self._stage._derived_variables\n for variable_name in sorted(iterkeys(stage_derived_variables)):\n self.updateVariableIndicesAndValues(variable_name,\n stage_derived_variables[variable_name],\n derived=True,\n id_labeler=id_labeler,\n name_index_to_id_map=name_index_to_id_map)\n\n self.updateCostVariableIndexAndValue(self._stage._cost_variable[0],\n self._stage._cost_variable[1])\n\n if not initialize_solution_data:\n return\n\n # Create a fully populated scenario tree node.\n if not self.is_leaf_node():\n self._minimums = dict.fromkeys(self._variable_ids,0)\n self._maximums = dict.fromkeys(self._variable_ids,0)\n # this is the true variable average at the node (unmodified)\n self._averages = dict.fromkeys(self._variable_ids,0)\n # this is the xbar used in the PH objective.\n self._xbars = dict.fromkeys(self._standard_variable_ids,None)\n # this is the blend used in the PH objective\n self._blend = dict.fromkeys(self._standard_variable_ids,None)\n # For the dual ph algorithm\n self._wbars = dict.fromkeys(self._standard_variable_ids,None)\n\n for scenario in self._scenarios:\n\n scenario._w[self._name] = \\\n dict.fromkeys(self._standard_variable_ids,None)\n scenario._rho[self._name] = \\\n dict.fromkeys(self._standard_variable_ids,None)\n\n for scenario in self._scenarios:\n scenario._x[self._name] = \\\n dict.fromkeys(self._variable_ids,None)\n\n #\n # copies the parameter values values from the _averages attribute\n # into the _solution attribute - only for active variable values.\n # for leaf nodes, simply copies the values from the _VarValue objects\n # at that node - because there are no statistics maintained.\n #\n\n def snapshotSolutionFromAverages(self):\n\n self._solution = {}\n\n if self.is_leaf_node():\n\n self._solution.update(self._scenarios[0]._x[self._name])\n\n else:\n\n self._solution.update(self._averages)\n\n #\n # computes the solution values from the composite scenario\n # solutions at this tree node.\n #\n\n # Note: Trying to work this function out of the code. The only solution\n # we should get used to working with is that stored on the scenario\n # objects\n def XsnapshotSolutionFromInstances(self):\n\n self._solution = {}\n\n for variable_id in self._standard_variable_ids:\n\n var_datas = self._variable_datas[variable_id]\n # the following loop is just a sanity check.\n for var_data, scenario_probability in var_datas:\n # a variable that is fixed will be flagged as unused.\n if (var_data.stale) and (not var_data.fixed):\n # Note: At this point the only way to get the name of the scenario\n # for this specific vardata in general is to print its full cname\n # This will either be \"MASTER\", the bundle name, or the scenario name\n # The important thing is that we always have the scenario name somewhere\n # in the variable name we print\n model_name = var_data.model().cname(True)\n full_name = model_name+\".\"+var_data.cname(True)\n if not self.is_leaf_node():\n print(\"CAUTION: Encountered variable=%s \"\n \"on node %s that is not in use within its \"\n \"respective scenario instance but the scenario tree \"\n \"specification indicates that non-anticipativity is to \"\n \"be enforced; the variable should either be eliminated \"\n \"from the model or from the scenario tree specification.\"\n % (full_name, self._name))\n else:\n print(\"CAUTION: Encountered variable=%s \"\n \"on leaf node %s that is not in use within \"\n \"its respective scenario instance. This can be indicative \"\n \"of a modeling error; the variable should either be \"\n \"eliminated from the model or from the scenario tree \"\n \"specification.\" % (full_name, self._name))\n\n # if a variable is stale, it could be because it is fixed - in which case, we want to snapshot the average value\n\n avg = sum(scenario_probability * value(var_data) for var_data, scenario_probability in var_datas if (var_data.stale is False) or (var_data.fixed is True))\n\n # the node probability is allowed to be zero in the scenario tree specification.\n # this is useful in cases where one wants to temporarily ignore certain scenarios.\n # in this case, just skip reporting of variables for that node.\n if self._probability > 0.0:\n avg /= self._probability\n\n self._solution[variable_id] = avg\n\n for variable_id in self._derived_variable_ids:\n\n var_datas = self._variable_datas[variable_id]\n\n avg = sum(scenario_probability * value(var_data) for var_data, scenario_probability in var_datas)\n\n # the node probability is allowed to be zero in the scenario tree specification.\n # this is useful in cases where one wants to temporarily ignore certain scenarios.\n # in this case, just skip reporting of variables for that node.\n if self._probability > 0.0:\n avg /= self._probability\n\n self._solution[variable_id] = avg\n\n def snapshotSolutionFromScenarios(self):\n\n self._solution = {}\n\n for variable_id in self._standard_variable_ids:\n\n var_values = [(scenario._x[self._name][variable_id],scenario._probability) \\\n for scenario in self._scenarios]\n\n avg = 0.0\n # the following loop is just a sanity check.\n for scenario in self._scenarios:\n scenario_probability = scenario._probability\n var_value = scenario._x[self._name][variable_id]\n is_fixed = scenario.is_variable_fixed(self, variable_id)\n is_stale = scenario.is_variable_stale(self, variable_id)\n # a variable that is fixed will be flagged as unused.\n if is_stale and (not is_fixed):\n variable_name, index = self._variable_ids[variable_id]\n full_name = variable_name+indexToString(index)\n if not self.is_leaf_node():\n print(\"CAUTION: Encountered variable=%s \"\n \"on node %s that is not in use within its \"\n \"respective scenario %s but the scenario tree \"\n \"specification indicates that non-anticipativity is to \"\n \"be enforced; the variable should either be eliminated \"\n \"from the model or from the scenario tree specification.\"\n % (full_name, self._name, scenario._name))\n else:\n print(\"CAUTION: Encountered variable=%s \"\n \"on leaf node %s that is not in use within \"\n \"its respective scenario %s. This can be indicative \"\n \"of a modeling error; the variable should either be \"\n \"eliminated from the model or from the scenario tree \"\n \"specification.\" % (full_name, self._name, scenario._name))\n else:\n avg += scenario_probability*var_value\n\n # the node probability is allowed to be zero in the scenario tree specification.\n # this is useful in cases where one wants to temporarily ignore certain scenarios.\n # in this case, just skip reporting of variables for that node.\n if self._probability > 0.0:\n avg /= self._probability\n\n self._solution[variable_id] = avg\n\n for variable_id in self._derived_variable_ids:\n\n # if any of the variable values are None (not reported), it will\n # trigger an exception. if this happens, trap it and simply remove\n # the solution from the tree node for this specific variable.\n # NOTE: This handling is a bit inconsistent relative to the above\n # logic for handling non-derived variables, in terms of\n # monitoring stale/fixed flags - for no good reason.\n try:\n avg = sum(scenario._probability * scenario._x[self._name][variable_id] \\\n for scenario in self._scenarios)\n\n # the node probability is allowed to be zero in the scenario tree specification.\n # this is useful in cases where one wants to temporarily ignore certain scenarios.\n # in this case, just skip reporting of variables for that node.\n if self._probability > 0.0:\n avg /= self._probability\n\n self._solution[variable_id] = avg\n except:\n if variable_id in self._solution:\n del self._solution[variable_id]\n\n #\n # a utility to compute the cost of the current node plus the expected costs of child nodes.\n #\n\n def computeExpectedNodeCost(self):\n\n stage_name = self._stage._name\n if any(scenario._stage_costs[stage_name] is None \\\n for scenario in self._scenarios):\n return None\n\n my_cost = self._scenarios[0]._stage_costs[stage_name]\n # Don't assume the node has converged, this can\n # result in misleading output\n # UPDATE: It turns out this entire function is misleading\n # it will be removed\n \"\"\"\n my_cost = sum(scenario._stage_costs[stage_name] * scenario._probability \\\n for scenario in self._scenarios)\n my_cost /= sum(scenario._probability for scenario in self._scenarios)\n \"\"\"\n # This version implicitely assumes convergence (which can be garbage for ph)\n\n children_cost = 0.0\n for child in self._children:\n child_cost = child.computeExpectedNodeCost()\n if child_cost is None:\n return None\n else:\n children_cost += (child._conditional_probability * child_cost)\n return my_cost + children_cost\n\n #\n # a simple predicate to check if this tree node belongs to the\n # last stage in the scenario tree.\n #\n def is_leaf_node(self):\n\n return self._stage.is_last_stage()\n\n #\n # a utility to determine if the input variable name/index pair is\n # a derived variable.\n #\n def is_derived_variable(self, variable_name, variable_index):\n return (variable_name, variable_index) in self._name_index_to_id\n\n #\n # a utility to extract the value for the input name/index pair.\n #\n def get_variable_value(self, name, index):\n\n try:\n variable_id = self._name_index_to_id[(name,index)]\n except KeyError:\n raise ValueError(\"No ID for variable=%s, index=%s \"\n \"is defined for scenario tree \"\n \"node=%s\" % (name, index, self._name))\n\n try:\n return self._solution[variable_id]\n except KeyError:\n raise ValueError(\"No value for variable=%s, index=%s \"\n \"is defined for scenario tree \"\n \"node=%s\" % (name, index, self._name))\n\n #\n # fix the indicated input variable / index pair to the input value.\n #\n def fix_variable(self, variable_id, fix_value):\n\n self._fix_queue[variable_id] = (self.VARIABLE_FIXED, fix_value)\n\n #\n # free the indicated input variable / index pair to the input value.\n #\n def free_variable(self, variable_id):\n\n self._fix_queue[variable_id] = (self.VARIABLE_FREED, None)\n\n def is_variable_discrete(self, variable_id):\n\n return variable_id in self._discrete\n\n def is_variable_fixed(self, variable_id):\n\n return variable_id in self._fixed\n\n def push_xbar_to_instances(self):\n arbitrary_instance = self._scenarios[0]._instance\n assert arbitrary_instance != None\n\n # Note: the PHXBAR Param is shared amongst the\n # scenarios in a tree node, so it's only\n # necessary to grab the Param from an arbitrary\n # scenario for each node and update once\n xbar_parameter_name = \"PHXBAR_\"+str(self._name)\n xbar_parameter = arbitrary_instance.find_component(xbar_parameter_name)\n xbar_parameter.store_values(self._xbars)\n\n def push_fix_queue_to_instances(self):\n have_instances = (self._scenarios[0]._instance != None)\n\n for variable_id, (fixed_status, new_value) in iteritems(self._fix_queue):\n if fixed_status == self.VARIABLE_FREED:\n assert new_value is None\n if have_instances:\n for var_data, scenario_probability in \\\n self._variable_datas[variable_id]:\n var_data.free()\n del self._fixed[variable_id]\n elif fixed_status == self.VARIABLE_FIXED:\n if have_instances:\n for var_data, scenario_probability in \\\n self._variable_datas[variable_id]:\n var_data.fix(new_value)\n self._fixed[variable_id] = new_value\n else:\n raise ValueError(\"Unexpected fixed status %s for variable with \"\n \"scenario tree id %s\" % (fixed_status,\n variable_id))\n\n self.clear_fix_queue()\n\n def push_all_fixed_to_instances(self):\n have_instances = (self._scenarios[0]._instance != None)\n\n for variable_id, fix_value in iteritems(self._fixed):\n if have_instances:\n for var_data, scenario_probability in \\\n self._variable_datas[variable_id]:\n var_data.fix(fix_value)\n self._fixed[variable_id] = fix_value\n\n self.push_fix_queue_to_instances()\n\n def has_fixed_in_queue(self):\n return any((v[0] == self.VARIABLE_FIXED) \\\n for v in itervalues(self._fix_queue))\n\n def has_freed_in_queue(self):\n return any((v[0] == self.VARIABLE_FREED) \\\n for v in itervalues(self._fix_queue))\n\n def clear_fix_queue(self):\n\n self._fix_queue.clear()\n\nclass ScenarioTreeStage(object):\n\n \"\"\" Constructor\n \"\"\"\n def __init__(self, *args, **kwds):\n\n self._name = \"\"\n\n # a collection of ScenarioTreeNode objects associated with this stage.\n self._tree_nodes = []\n\n # the parent scenario tree for this stage.\n self._scenario_tree = None\n\n # a map between a variable name and a list of original index\n # match templates, specified as strings. we want to maintain\n # these for a variety of reasons, perhaps the most important\n # being that for output purposes. specific indices that match\n # belong to the tree node, as that may be specific to a tree\n # node.\n self._variables = {}\n\n # same as above, but for derived stage variables.\n self._derived_variables = {}\n\n # a tuple consisting of (1) the name of the variable that\n # stores the stage-specific cost in all scenarios and (2) the\n # corresponding index *string* - this is converted in the tree\n # node to a real index.\n self._cost_variable = (None, None)\n\n #\n # add a new variable to the stage, which will include updating the\n # solution maps for each associated ScenarioTreeNode.\n #\n def add_variable(self, variable_name, new_match_template, create_variable_ids=True):\n\n labeler = None\n if create_variable_ids is True:\n labeler = self._scenario_tree._id_labeler\n\n existing_match_templates = self._variables.setdefault(variable_name, [])\n existing_match_templates.append(new_match_template)\n\n for tree_node in self._tree_nodes:\n tree_node.updateVariableIndicesAndValues(variable_name, new_match_template,\n derived=False,\n id_labeler=labeler)\n\n #\n # a simple predicate to check if this stage is the last stage in\n # the scenario tree.\n #\n def is_last_stage(self):\n\n return self == self._scenario_tree._stages[-1]\n\nclass Scenario(object):\n\n \"\"\" Constructor\n \"\"\"\n def __init__(self, *args, **kwds):\n\n self._name = None\n # allows for construction of node list\n self._leaf_node = None\n # sequence from parent to leaf of ScenarioTreeNodes\n self._node_list = []\n # the unconditional probability for this scenario, computed from the node list\n self._probability = 0.0\n # the Pyomo instance corresponding to this scenario.\n self._instance = None\n self._instance_cost_expression = None\n self._instance_objective = None\n self._objective_sense = None\n self._objective_name = None\n\n # The value of the (possibly augmented) objective function\n self._objective = None\n # The value of the original objective expression\n # (which should be the sum of the stage costs)\n self._cost = None\n # The individual stage cost values\n self._stage_costs = {}\n # The value of the ph weight term piece of the objective (if it exists)\n self._weight_term_cost = None\n # The value of the ph proximal term piece of the objective (if it exists)\n self._proximal_term_cost = None\n # The value of the scenariotree variables belonging to this scenario\n # (dictionary nested by node name)\n self._x = {}\n # The value of the weight terms belonging to this scenario\n # (dictionary nested by node name)\n self._w = {}\n # The value of the rho terms belonging to this scenario\n # (dictionary nested by node name)\n self._rho = {}\n\n # This set of fixed or reported stale variables\n # in each tree node\n self._fixed = {}\n self._stale = {}\n\n #\n # a utility to compute the stage index for the input tree node.\n # the returned index is 0-based.\n #\n\n def node_stage_index(self, tree_node):\n return self._node_list.index(tree_node)\n\n def is_variable_fixed(self, tree_node, variable_id):\n\n return variable_id in self._fixed[tree_node._name]\n\n def is_variable_stale(self, tree_node, variable_id):\n\n return variable_id in self._stale[tree_node._name]\n\n def update_solution_from_instance(self):\n\n results = {}\n scenario_instance = self._instance\n scenariotree_sm_bySymbol = \\\n scenario_instance._ScenarioTreeSymbolMap.bySymbol\n self._objective = self._instance_objective(exception=False)\n self._cost = self._instance_cost_expression(exception=False)\n for tree_node in self._node_list:\n stage_name = tree_node._stage._name\n cost_variable_name, cost_variable_index = \\\n tree_node._stage._cost_variable\n stage_cost_component = self._instance.find_component(cost_variable_name)\n self._stage_costs[stage_name] = \\\n stage_cost_component[cost_variable_index](exception=False)\n\n self._weight_term_cost = \\\n scenario_instance.PHWEIGHT_EXPRESSION(exception=False) \\\n if (hasattr(scenario_instance,\"PHWEIGHT_EXPRESSION\") and \\\n (scenario_instance.PHWEIGHT_EXPRESSION is not None)) \\\n else None\n self._proximal_term_cost = \\\n scenario_instance.PHPROXIMAL_EXPRESSION(exception=False) \\\n if (hasattr(scenario_instance,\"PHPROXIMAL_EXPRESSION\") and \\\n (scenario_instance.PHPROXIMAL_EXPRESSION is not None)) \\\n else None\n\n for tree_node in self._node_list:\n # Some of these might be Expression objects so we use the\n # __call__ method rather than directly accessing .value\n # (since we want a number)\n self._x[tree_node._name].update(\n (variable_id,\n scenariotree_sm_bySymbol[variable_id](exception=False)) \\\n for variable_id in tree_node._variable_ids)\n scenario_fixed = self._fixed[tree_node._name] = set()\n scenario_stale = self._stale[tree_node._name] = set()\n for variable_id in tree_node._variable_ids:\n vardata = scenariotree_sm_bySymbol[variable_id]\n if vardata.is_expression():\n continue\n if vardata.fixed:\n scenario_fixed.add(variable_id)\n if vardata.stale:\n scenario_stale.add(variable_id)\n\n def push_solution_to_instance(self):\n\n scenario_instance = self._instance\n scenariotree_sm_bySymbol = \\\n scenario_instance._ScenarioTreeSymbolMap.bySymbol\n for tree_node in self._node_list:\n stage_name = tree_node._stage._name\n cost_variable_name, cost_variable_index = \\\n tree_node._stage._cost_variable\n stage_cost_component = \\\n self._instance.find_component(cost_variable_name)[cost_variable_index]\n # Some of these might be Expression objects so we check\n # for is_expression before changing.value\n if not stage_cost_component.is_expression():\n stage_cost_component.value = self._stage_costs[stage_name]\n\n for tree_node in self._node_list:\n # Some of these might be Expression objects so we check\n # for is_expression before changing.value\n for variable_id, var_value in iteritems(self._x[tree_node._name]):\n compdata = scenariotree_sm_bySymbol[variable_id]\n if not compdata.is_expression():\n compdata.value = var_value\n\n for variable_id in self._fixed[tree_node._name]:\n vardata = scenariotree_sm_bySymbol[variable_id]\n vardata.fix()\n\n for variable_id in self._stale[tree_node._name]:\n vardata = scenariotree_sm_bySymbol[variable_id]\n vardata.stale = True\n\n def package_current_solution(self, translate_ids=None, node_names=None):\n\n if node_names is None:\n node_names = [n._name for n in self._node_list]\n\n results = {}\n results['objective'] = self._objective\n results['cost'] = self._cost\n results['stage costs'] = copy.deepcopy(self._stage_costs)\n results['weight term cost'] = self._weight_term_cost\n results['proximal term cost'] = self._proximal_term_cost\n if translate_ids is None:\n results['x'] = copy.deepcopy(self._x)\n results['fixed'] = copy.deepcopy(self._fixed)\n results['stale'] = copy.deepcopy(self._stale)\n else:\n resx = results['x'] = {}\n for tree_node_name, tree_node_x in iteritems(self._x):\n if tree_node_name not in node_names:\n continue\n tree_node_translate_ids = translate_ids[tree_node_name]\n resx[tree_node_name] = \\\n dict((tree_node_translate_ids[scenario_tree_id],val) \\\n for scenario_tree_id, val in \\\n iteritems(tree_node_x))\n resfixed = results['fixed'] = {}\n for tree_node_name, tree_node_fixed in iteritems(self._fixed):\n if tree_node_name not in node_names:\n continue\n tree_node_translate_ids = translate_ids[tree_node_name]\n resfixed[tree_node_name] = \\\n set(tree_node_translate_ids[scenario_tree_id] \\\n for scenario_tree_id in tree_node_fixed)\n resstale = results['stale'] = {}\n for tree_node_name, tree_node_stale in iteritems(self._stale):\n if tree_node_name not in node_names:\n continue\n tree_node_translate_ids = translate_ids[tree_node_name]\n resstale[tree_node_name] = \\\n set(tree_node_translate_ids[scenario_tree_id] \\\n for scenario_tree_id in tree_node_stale)\n return results\n\n def update_current_solution(self, results):\n\n self._objective = results['objective']\n self._cost = results['cost']\n assert len(results['stage costs']) == len(self._stage_costs)\n self._stage_costs.update(results['stage costs'])\n self._weight_term_cost = results['weight term cost']\n self._proximal_term_cost = results['proximal term cost']\n for node in self._node_list:\n if node._name in results['x']:\n node_x = results['x'][node._name]\n self._x[node._name].update(node_x)\n else:\n self._x[node._name].update((i,None) for i in self._x[node._name])\n\n self._fixed[node._name].clear()\n if node._name in results['fixed']:\n self._fixed[node._name].update(results['fixed'][node._name])\n\n self._stale[node._name].clear()\n if node._name in results['stale']:\n self._stale[node._name].update(results['stale'][node._name])\n\n def push_w_to_instance(self):\n assert self._instance != None\n for tree_node in self._node_list[:-1]:\n weight_parameter_name = \"PHWEIGHT_\"+str(tree_node._name)\n weight_parameter = self._instance.find_component(weight_parameter_name)\n weight_parameter.store_values(self._w[tree_node._name])\n\n def push_rho_to_instance(self):\n assert self._instance != None\n\n for tree_node in self._node_list[:-1]:\n rho_parameter_name = \"PHRHO_\"+str(tree_node._name)\n rho_parameter = self._instance.find_component(rho_parameter_name)\n rho_parameter.store_values(self._rho[tree_node._name])\n\n #\n # a utility to determine the stage to which the input variable belongs.\n #\n\n def variableNode(self, variable, index):\n\n tuple_to_check = (variable.cname(),index)\n\n for this_node in self._node_list:\n\n if tuple_to_check in this_node._name_index_to_id:\n return this_node\n\n raise RuntimeError(\"The variable=\"+str(variable.cname())+\", index=\"+indexToString(index)+\" does not belong to any stage in the scenario tree\")\n\n #\n # a utility to determine the stage to which the input constraint \"belongs\".\n # a constraint belongs to the latest stage in which referenced variables\n # in the constraint appears in that stage.\n # input is a constraint is of type \"Constraint\", and an index of that\n # constraint - which might be None in the case of non-indexed constraints.\n # currently doesn't deal with SOS constraints, for no real good reason.\n # returns an instance of a ScenarioTreeStage object.\n # IMPT: this method works on the canonical representation (\"repn\" attribute)\n # of a constraint. this implies that pre-processing of the instance\n # has been performed.\n # NOTE: there is still the issue of whether the contained variables really\n # belong to the same model, but that is a different issue we won't\n # address right now (e.g., what does it mean for a constraint in an\n # extensive form binding instance to belong to a stage?).\n #\n\n def constraintNode(self, constraint, index, repn=None):\n\n deepest_node_index = -1\n deepest_node = None\n\n vardata_list = None\n if isinstance(constraint, SOSConstraint):\n vardata_list = constraint[index].get_members()\n\n else:\n if repn is None:\n parent_instance = constraint.parent()\n repn = getattr(parent_instance,\"canonical_repn\",None)\n if (repn is None):\n raise ValueError(\"Unable to find canonical_repn ComponentMap \"\n \"on constraint parent block %s for constraint %s\"\n % (parent_instance.cname(True), constraint.cname(True)))\n\n canonical_repn = repn.get(constraint[index])\n if canonical_repn is None:\n raise RuntimeError(\"Method constraintStage in class \"\n \"ScenarioTree encountered a constraint \"\n \"with no canonical representation \"\n \"- was preprocessing performed?\")\n\n if isinstance(canonical_repn, GeneralCanonicalRepn):\n raise RuntimeError(\"Method constraintStage in class \"\n \"ScenarioTree encountered a constraint \"\n \"with a general canonical encoding - \"\n \"only linear canonical encodings are expected!\")\n\n vardata_list = canonical_repn.variables\n\n for var_data in vardata_list:\n\n var_node = self.variableNode(var_data.parent_component(), var_data.index())\n var_node_index = self._node_list.index(var_node)\n\n if var_node_index > deepest_node_index:\n deepest_node_index = var_node_index\n deepest_node = var_node\n\n return deepest_node\n\nclass ScenarioTreeBundle(object):\n\n def __init__(self, *args, **kwds):\n\n self._name = None\n self._scenario_names = []\n self._scenario_tree = None # This is a compressed scenario tree, just for the bundle.\n self._probability = 0.0 # the absolute probability of scenarios associated with this node in the scenario tree.\n\nclass ScenarioTree(object):\n\n # a utility to construct scenario bundles.\n def _construct_scenario_bundles(self, scenario_tree_instance):\n\n for bundle_name in scenario_tree_instance.Bundles:\n scenario_list = []\n bundle_probability = 0.0\n for scenario_name in scenario_tree_instance.BundleScenarios[bundle_name]:\n scenario_list.append(scenario_name)\n bundle_probability += self._scenario_map[scenario_name]._probability\n\n scenario_tree_instance.Bundling[None] = False # to stop recursion!\n\n scenario_tree_for_bundle = ScenarioTree(scenariotreeinstance=scenario_tree_instance,\n scenariobundlelist=scenario_list)\n\n scenario_tree_instance.Bundling[None] = True\n\n if scenario_tree_for_bundle.validate() is False:\n raise RuntimeError(\"***ERROR: Bundled scenario tree is invalid!!!\")\n\n new_bundle = ScenarioTreeBundle()\n new_bundle._name = bundle_name\n new_bundle._scenario_names = scenario_list\n new_bundle._scenario_tree = scenario_tree_for_bundle\n new_bundle._probability = bundle_probability\n\n self._scenario_bundles.append(new_bundle)\n self._scenario_bundle_map[new_bundle._name] = new_bundle\n\n #\n # a utility to construct the stage objects for this scenario tree.\n # operates strictly by side effects, initializing the self\n # _stages and _stage_map attributes.\n #\n\n def _construct_stages(self, stage_names, stage_variable_names, stage_cost_variable_names, stage_derived_variable_names):\n\n # construct the stage objects, which will leave them\n # largely uninitialized - no variable information, in particular.\n for stage_name in stage_names:\n\n new_stage = ScenarioTreeStage()\n new_stage._name = stage_name\n new_stage._scenario_tree = self\n\n for variable_string in stage_variable_names[stage_name]:\n if isVariableNameIndexed(variable_string) is True:\n variable_name, match_template = extractVariableNameAndIndex(variable_string)\n else:\n variable_name = variable_string\n match_template = \"\"\n if variable_name not in new_stage._variables:\n new_stage._variables[variable_name] = []\n new_stage._variables[variable_name].append(match_template)\n\n if stage_name in stage_derived_variable_names: # not all stages have derived variables defined\n for variable_string in stage_derived_variable_names[stage_name]:\n if isVariableNameIndexed(variable_string) is True:\n variable_name, match_template = extractVariableNameAndIndex(variable_string)\n else:\n variable_name = variable_string\n match_template = \"\"\n if variable_name not in new_stage._derived_variables:\n new_stage._derived_variables[variable_name] = []\n new_stage._derived_variables[variable_name].append(match_template)\n\n # de-reference is required to access the parameter value\n cost_variable_string = value(stage_cost_variable_names[stage_name])\n if isVariableNameIndexed(cost_variable_string) is True:\n cost_variable_name, cost_variable_index = extractVariableNameAndIndex(cost_variable_string)\n else:\n cost_variable_name = cost_variable_string\n cost_variable_index = None\n new_stage._cost_variable = (cost_variable_name, cost_variable_index)\n\n self._stages.append(new_stage)\n self._stage_map[stage_name] = new_stage\n\n\n \"\"\" Constructor\n Arguments:\n scenarioinstance - the reference (deterministic) scenario instance.\n scenariotreeinstance - the pyomo model specifying all scenario tree (text) data.\n scenariobundlelist - a list of scenario names to retain, i.e., cull the rest to create a reduced tree!\n \"\"\"\n def __init__(self, *args, **kwds):\n\n self._name = None # some arbitrary identifier\n\n # should be called once for each variable blended across a node\n self._id_labeler = CounterLabeler()\n\n # the core objects defining the scenario tree.\n self._tree_nodes = [] # collection of ScenarioTreeNodes\n self._stages = [] # collection of ScenarioTreeStages - assumed to be in time-order. the set (provided by the user) itself *must* be ordered.\n self._scenarios = [] # collection of Scenarios\n self._scenario_bundles = [] # collection of ScenarioTreeBundles\n\n # dictionaries for the above.\n self._tree_node_map = {}\n self._stage_map = {}\n self._scenario_map = {}\n self._scenario_bundle_map = {}\n\n # a boolean indicating how data for scenario instances is specified.\n # possibly belongs elsewhere, e.g., in the PH algorithm.\n self._scenario_based_data = None\n\n scenario_tree_instance = kwds.pop( 'scenariotreeinstance', None )\n scenario_bundle_list = kwds.pop( 'scenariobundlelist', None )\n\n # process the keyword options\n for key in kwds:\n sys.stderr.write(\"Unknown option '%s' specified in call to ScenarioTree constructor\\n\" % key)\n\n if scenario_tree_instance is None:\n raise ValueError(\"A scenario tree instance must be supplied in the ScenarioTree constructor\")\n\n node_ids = scenario_tree_instance.Nodes\n node_child_ids = scenario_tree_instance.Children\n node_stage_ids = scenario_tree_instance.NodeStage\n node_probability_map = scenario_tree_instance.ConditionalProbability\n stage_ids = scenario_tree_instance.Stages\n stage_variable_ids = scenario_tree_instance.StageVariables\n stage_cost_variable_ids = scenario_tree_instance.StageCostVariable\n stage_derived_variable_ids = scenario_tree_instance.StageDerivedVariables\n scenario_ids = scenario_tree_instance.Scenarios\n scenario_leaf_ids = scenario_tree_instance.ScenarioLeafNode\n scenario_based_data = scenario_tree_instance.ScenarioBasedData\n\n # save the method for instance data storage.\n self._scenario_based_data = scenario_based_data()\n\n # the input stages must be ordered, for both output purposes and knowledge of the final stage.\n if stage_ids.ordered is False:\n raise ValueError(\"An ordered set of stage IDs must be supplied in the ScenarioTree constructor\")\n\n empty_nonleaf_stages = [stage for stage in stage_ids \\\n if len(stage_variable_ids[stage])==0 \\\n and stage != stage_ids.last()]\n if len(empty_nonleaf_stages) > 0:\n raise ValueError(\"A ScenarioTree has been declared with one\"\n \" or more empty (non-leaf) stages. This must\"\n \" be corrected by defining non-empty sets \"\n \"for the following entries in \"\n \"ScenarioStructure.dat: \\n- %s\" % \\\n ('\\n- '.join('StageVariables[%s]'%(stage) \\\n for stage in empty_nonleaf_stages)))\n\n #\n # construct the actual tree objects\n #\n\n # construct the stage objects w/o any linkages first; link them up\n # with tree nodes after these have been fully constructed.\n self._construct_stages(stage_ids, stage_variable_ids, stage_cost_variable_ids, stage_derived_variable_ids)\n\n # construct the tree node objects themselves in a first pass,\n # and then link them up in a second pass to form the tree.\n # can't do a single pass because the objects may not exist.\n for tree_node_name in node_ids:\n\n if tree_node_name not in node_stage_ids:\n raise ValueError(\"No stage is assigned to tree node=%s\" % (tree_node._name))\n\n stage_name = value(node_stage_ids[tree_node_name])\n if stage_name not in self._stage_map:\n raise ValueError(\"Unknown stage=%s assigned to tree node=%s\"\n % (stage_name, tree_node._name))\n\n new_tree_node = ScenarioTreeNode(tree_node_name,\n value(node_probability_map[tree_node_name]),\n self._stage_map[stage_name])\n\n self._tree_nodes.append(new_tree_node)\n self._tree_node_map[tree_node_name] = new_tree_node\n self._stage_map[stage_name]._tree_nodes.append(new_tree_node)\n\n # link up the tree nodes objects based on the child id sets.\n for this_node in self._tree_nodes:\n this_node._children = []\n # otherwise, you're at a leaf and all is well.\n if this_node._name in node_child_ids:\n child_ids = node_child_ids[this_node._name]\n for child_id in child_ids:\n if child_id in self._tree_node_map:\n child_node = self._tree_node_map[child_id]\n this_node._children.append(child_node)\n if child_node._parent is None:\n child_node._parent = this_node\n else:\n raise ValueError(\"Multiple parents specified for tree node=%s; \"\n \"existing parent node=%s; conflicting parent \"\n \"node=%s\"\n % (child_id,\n child_node._parent._name,\n this_node._name))\n else:\n raise ValueError(\"Unknown child tree node=%s specified \"\n \"for tree node=%s\"\n % (child_id, this_node._name))\n\n # at this point, the scenario tree nodes and the stages are set - no\n # two-pass logic necessary when constructing scenarios.\n for scenario_name in scenario_ids:\n\n new_scenario = Scenario()\n new_scenario._name=scenario_name\n\n if scenario_name not in scenario_leaf_ids:\n raise ValueError(\"No leaf tree node specified for scenario=%s\"\n % (scenario_name))\n else:\n scenario_leaf_node_name = value(scenario_leaf_ids[scenario_name])\n if scenario_leaf_node_name not in self._tree_node_map:\n raise ValueError(\"Uknown tree node=%s specified as leaf \"\n \"of scenario=%s\"\n (scenario_leaf_node_name, scenario_name))\n else:\n new_scenario._leaf_node = self._tree_node_map[scenario_leaf_node_name]\n\n current_node = new_scenario._leaf_node\n while current_node is not None:\n new_scenario._node_list.append(current_node)\n current_node._scenarios.append(new_scenario) # links the scenarios to the nodes to enforce necessary non-anticipativity\n current_node = current_node._parent\n new_scenario._node_list.reverse()\n # This now loops root -> leaf\n probability = 1.0\n for current_node in new_scenario._node_list:\n probability *= current_node._conditional_probability\n # NOTE: The line placement below is a little weird, in that\n # it is embedded in a scenario loop - so the probabilities\n # for some nodes will be redundantly computed. But this works.\n current_node._probability = probability\n\n new_scenario._stage_costs[current_node._stage._name] = None\n new_scenario._x[current_node._name] = {}\n new_scenario._w[current_node._name] = {}\n new_scenario._rho[current_node._name] = {}\n new_scenario._fixed[current_node._name] = set()\n new_scenario._stale[current_node._name] = set()\n\n new_scenario._probability = probability\n\n self._scenarios.append(new_scenario)\n self._scenario_map[scenario_name] = new_scenario\n\n # for output purposes, it is useful to known the maximal\n # length of identifiers in the scenario tree for any\n # particular category. I'm building these up incrementally, as\n # they are needed. 0 indicates unassigned.\n self._max_scenario_id_length = 0\n\n # does the actual traversal to populate the members.\n self.computeIdentifierMaxLengths()\n\n # if a sub-bundle of scenarios has been specified, mark the\n # active scenario tree components and compress the tree.\n if scenario_bundle_list is not None:\n self.compress(scenario_bundle_list)\n\n # NEW SCENARIO BUNDLING STARTS HERE\n if value(scenario_tree_instance.Bundling[None]) is True:\n self._construct_scenario_bundles(scenario_tree_instance)\n\n #\n # populate those portions of the scenario tree and associated\n # stages and tree nodes that reference the scenario instances\n # associated with the tree.\n #\n\n def linkInInstances(self,\n scenario_instance_map,\n objective_sense=None,\n create_variable_ids=True,\n master_scenario_tree=None,\n initialize_solution_data=True):\n\n if objective_sense not in (minimize, maximize, None):\n raise ValueError(\"Invalid value (%r) for objective sense given to the linkInInstances method. \"\n \"Choices are: [minimize, maximize, None]\" % (objective_sense))\n\n if (create_variable_ids == True) and (master_scenario_tree is not None):\n raise RuntimeError(\"The linkInInstances method of ScenarioTree objects cannot be invoked with both create_variable_ids=True and master_scenario_tree!=None\")\n\n # propagate the scenario instances to the scenario tree object\n # structure.\n # NOTE: The input scenario instances may be a super-set of the\n # set of Scenario objects for this ScenarioTree.\n master_has_instance = {}\n for scenario_name, scenario_instance in iteritems(scenario_instance_map):\n if self.contains_scenario(scenario_name):\n master_has_instance[scenario_name] = False\n if master_scenario_tree is not None:\n master_scenario = master_scenario_tree.get_scenario(scenario_name)\n if master_scenario._instance is not None:\n master_has_instance[scenario_name] = True\n _scenario = self.get_scenario(scenario_name)\n _scenario._instance = scenario_instance\n\n # link the scenario tree object structures to the instance components.\n self.populateVariableIndicesAndValues(create_variable_ids=create_variable_ids,\n master_scenario_tree=master_scenario_tree,\n initialize_solution_data=initialize_solution_data)\n\n # create the scenario cost expression to be used for the objective\n for scenario_name, scenario_instance in iteritems(scenario_instance_map):\n if self.contains_scenario(scenario_name):\n scenario = self.get_scenario(scenario_name)\n\n if master_has_instance[scenario_name]:\n master_scenario = master_scenario_tree.get_scenario(scenario_name)\n scenario._instance_cost_expression = master_scenario._instance_cost_expression\n scenario._instance_objective = master_scenario._instance_objective\n scenario._objective_sense = master_scenario._objective_sense\n scenario._objective_name = master_scenario\n continue\n\n user_objective = find_active_objective(scenario_instance, safety_checks=True)\n if objective_sense is None:\n if user_objective is None:\n raise RuntimeError(\"An active Objective could not \"\n \"be found on instance for \"\n \"scenario %s.\" % (scenario_name))\n cost_expr_name = \"_USER_COST_EXPRESSION_\"+str(scenario_name)\n cost_expr = Expression(name=cost_expr_name,initialize=user_objective.expr)\n scenario_instance.add_component(cost_expr_name,cost_expr)\n scenario._instance_cost_expression = cost_expr\n\n user_objective_sense = minimize if (user_objective.is_minimizing()) else maximize\n cost_obj_name = \"_USER_COST_OBJECTIVE_\"+str(scenario_name)\n cost_obj = Objective(name=cost_obj_name,expr=cost_expr, sense=user_objective_sense)\n scenario_instance.add_component(cost_obj_name,cost_obj)\n scenario._instance_objective = cost_obj\n scenario._objective_sense = user_objective_sense\n scenario._objective_name = scenario._instance_objective.cname()\n user_objective.deactivate()\n else:\n if user_objective is not None:\n #print(\"* Active Objective \\\"%s\\\" on scenario instance \\\"%s\\\" will not be used. \"\n # % (user_objective.cname(True),scenario_name))\n user_objective.deactivate()\n\n cost = 0.0\n for stage in self._stages:\n stage_cost_var = scenario_instance.find_component(stage._cost_variable[0])[stage._cost_variable[1]]\n cost += stage_cost_var\n cost_expr_name = \"_PYSP_COST_EXPRESSION_\"+str(scenario_name)\n cost_expr = Expression(name=cost_expr_name,initialize=cost)\n scenario_instance.add_component(cost_expr_name,cost_expr)\n scenario._instance_cost_expression = cost_expr\n\n cost_obj_name = \"_PYSP_COST_OBJECTIVE_\"+str(scenario_name)\n cost_obj = Objective(name=cost_obj_name,expr=cost_expr, sense=objective_sense)\n scenario_instance.add_component(cost_obj_name,cost_obj)\n scenario._instance_objective = cost_obj\n scenario._objective_sense = objective_sense\n scenario._objective_name = scenario._instance_objective.cname()\n\n #\n # compute the set of variable indices being blended at each node. this can't be done\n # until all of the scenario instances are available, as different scenarios can have\n # different index sets.\n #\n\n def populateVariableIndicesAndValues(self,\n create_variable_ids=True,\n master_scenario_tree=None,\n initialize_solution_data=True):\n\n if (create_variable_ids == True) and (master_scenario_tree != None):\n raise RuntimeError(\"The populateVariableIndicesAndValues method of ScenarioTree objects cannot be invoked with both create_variable_ids=True and master_scenario_tree!=None\")\n\n labeler = None\n if create_variable_ids is True:\n labeler = self._id_labeler\n\n for stage in self._stages:\n tree_node_list = sorted(stage._tree_nodes, key=lambda x: x._name)\n for tree_node in tree_node_list:\n name_index_to_id_map = None\n if master_scenario_tree is not None:\n name_index_to_id_map = master_scenario_tree.get_node(tree_node._name)._name_index_to_id\n tree_node.populateVariableIndicesAndValues(id_labeler=labeler,\n name_index_to_id_map=name_index_to_id_map,\n initialize_solution_data=initialize_solution_data)\n\n #\n # is the indicated scenario / bundle in the tree?\n #\n\n def contains_scenario(self, name):\n return name in self._scenario_map\n\n def contains_bundles(self):\n return len(self._scenario_bundle_map) > 0\n\n def contains_bundle(self, name):\n return name in self._scenario_bundle_map\n\n #\n # get the scenario / bundle object from the tree.\n #\n\n def get_scenario(self, name):\n return self._scenario_map[name]\n\n def get_bundle(self, name):\n return self._scenario_bundle_map[name]\n\n # there are many contexts where manipulators of a scenario\n # tree simply need an arbitrary scenario to proceed...\n def get_arbitrary_scenario(self):\n return self._scenarios[0]\n\n def contains_node(self, name):\n return name in self._tree_node_map\n\n #\n # get the scenario tree node object from the tree\n #\n def get_node(self, name):\n return self._tree_node_map[name]\n\n #\n # utility for compressing or culling a scenario tree based on\n # a provided list of scenarios (specified by name) to retain -\n # all non-referenced components are eliminated. this particular\n # method compresses *in-place*, i.e., via direct modification\n # of the scenario tree structure.\n #\n\n def compress(self, scenario_bundle_list):\n\n # scan for and mark all referenced scenarios and\n # tree nodes in the bundle list - all stages will\n # obviously remain.\n for scenario_name in scenario_bundle_list:\n if scenario_name not in self._scenario_map:\n raise ValueError(\"Scenario=%s selected for \"\n \"bundling not present in \"\n \"scenario tree\"\n % (scenario_name))\n scenario = self._scenario_map[scenario_name]\n scenario.retain = True\n\n # chase all nodes comprising this scenario,\n # marking them for retention.\n for node in scenario._node_list:\n node.retain = True\n\n # scan for any non-retained scenarios and tree nodes.\n scenarios_to_delete = []\n tree_nodes_to_delete = []\n for scenario in self._scenarios:\n if hasattr(scenario, \"retain\") is True:\n delattr(scenario, \"retain\")\n else:\n scenarios_to_delete.append(scenario)\n del self._scenario_map[scenario._name]\n\n for tree_node in self._tree_nodes:\n if hasattr(tree_node, \"retain\") is True:\n delattr(tree_node, \"retain\")\n else:\n tree_nodes_to_delete.append(tree_node)\n del self._tree_node_map[tree_node._name]\n\n # JPW does not claim the following routines are\n # the most efficient. rather, they get the job\n # done while avoiding serious issues with\n # attempting to remove elements from a list that\n # you are iterating over.\n\n # delete all references to unmarked scenarios\n # and child tree nodes in the scenario tree node\n # structures.\n for tree_node in self._tree_nodes:\n for scenario in scenarios_to_delete:\n if scenario in tree_node._scenarios:\n tree_node._scenarios.remove(scenario)\n for node_to_delete in tree_nodes_to_delete:\n if node_to_delete in tree_node._children:\n tree_node._children.remove(node_to_delete)\n\n # delete all references to unmarked tree nodes\n # in the scenario tree stage structures.\n for stage in self._stages:\n for tree_node in tree_nodes_to_delete:\n if tree_node in stage._tree_nodes:\n stage._tree_nodes.remove(tree_node)\n\n # delete all unreferenced entries from the core scenario\n # tree data structures.\n for scenario in scenarios_to_delete:\n self._scenarios.remove(scenario)\n for tree_node in tree_nodes_to_delete:\n self._tree_nodes.remove(tree_node)\n\n # re-normalize the conditional probabilities of the\n # children at each tree node.\n for tree_node in self._tree_nodes:\n sum_child_probabilities = 0.0\n for child_node in tree_node._children:\n sum_child_probabilities += child_node._conditional_probability\n\n for child_node in tree_node._children:\n # the user may specify that the probability of a scenario is 0.0,\n # and while odd, we should allow the edge case.\n if sum_child_probabilities == 0.0:\n child_node._conditional_probability = 0.0\n else:\n child_node._conditional_probability = child_node._conditional_probability / sum_child_probabilities\n\n # re-compute the absolute scenario probabilities based\n # on the re-normalized conditional node probabilities.\n for scenario in self._scenarios:\n probability = 1.0\n for tree_node in scenario._node_list:\n probability = probability * tree_node._conditional_probability\n scenario._probability = probability\n\n # now that we've culled the scenarios, cull the bundles. do\n # this in two passes. in the first pass, we identify the names\n # of bundles to delete, by looking for bundles with deleted\n # scenarios. in the second pass, we delete the bundles from\n # the scenario tree, and normalize the probabilities of the\n # remaining bundles.\n\n # indices of the objects in the scenario tree bundle list\n bundles_to_delete = []\n for i in xrange(0,len(self._scenario_bundles)):\n scenario_bundle = self._scenario_bundles[i]\n for scenario_name in scenario_bundle._scenario_names:\n if scenario_name not in self._scenario_map:\n bundles_to_delete.append(i)\n break\n bundles_to_delete.reverse()\n for i in bundles_to_delete:\n deleted_bundle = self._scenario_bundles.pop(i)\n del self._scenario_bundle_map[deleted_bundle._name]\n\n sum_bundle_probabilities = sum(bundle._probability for bundle in self._scenario_bundles)\n for bundle in self._scenario_bundles:\n bundle._probability /= sum_bundle_probabilities\n\n #\n # utility for automatically selecting a proportion of scenarios from the\n # tree to retain, eliminating the rest.\n #\n\n def downsample(self, fraction_to_retain, random_seed, verbose=False):\n\n random.seed(random_seed)\n\n random_sequence=range(len(self._scenarios))\n random.shuffle(random_sequence)\n\n number_to_retain = max(int(round(float(len(random_sequence)*fraction_to_retain))), 1)\n\n scenario_bundle_list = []\n for i in xrange(number_to_retain):\n scenario_bundle_list.append(self._scenarios[random_sequence[i]]._name)\n\n if verbose is True:\n print(\"Downsampling scenario tree - retained %s \"\n \"scenarios: %s\"\n % (len(scenario_bundle_list),\n str(scenario_bundle_list)))\n\n self.compress(scenario_bundle_list)\n\n\n #\n # returns the root node of the scenario tree\n #\n\n def findRootNode(self):\n\n for tree_node in self._tree_nodes:\n if tree_node._parent is None:\n return tree_node\n return None\n\n #\n # a utility function to compute, based on the current scenario tree content,\n # the maximal length of identifiers in various categories.\n #\n\n def computeIdentifierMaxLengths(self):\n\n self._max_scenario_id_length = 0\n for scenario in self._scenarios:\n if len(str(scenario._name)) > self._max_scenario_id_length:\n self._max_scenario_id_length = len(str(scenario._name))\n\n #\n # a utility function to (partially, at the moment) validate a scenario tree\n #\n\n def validate(self):\n\n # for any node, the sum of conditional probabilities of the children should sum to 1.\n for tree_node in self._tree_nodes:\n sum_probabilities = 0.0\n if len(tree_node._children) > 0:\n for child in tree_node._children:\n sum_probabilities += child._conditional_probability\n if abs(1.0 - sum_probabilities) > 0.000001:\n print(\"The child conditional probabilities for tree node=%s \"\n \" sum to %s\" % (tree_node._name, sum_probabilities))\n return False\n\n # ensure that there is only one root node in the tree\n num_roots = 0\n root_ids = []\n for tree_node in self._tree_nodes:\n if tree_node._parent is None:\n num_roots += 1\n root_ids.append(tree_node._name)\n\n if num_roots != 1:\n print(\"Illegal set of root nodes detected: \" + str(root_ids))\n return False\n\n # there must be at least one scenario passing through each tree node.\n for tree_node in self._tree_nodes:\n if len(tree_node._scenarios) == 0:\n print(\"There are no scenarios associated with tree node=%s\"\n % (tree_node._name))\n return False\n\n return True\n\n #\n # copies the parameter values stored in any tree node _averages attribute\n # into any tree node _solution attribute - only for active variable values.\n #\n\n def snapshotSolutionFromAverages(self):\n\n for tree_node in self._tree_nodes:\n\n tree_node.snapshotSolutionFromAverages()\n\n #\n # assigns the variable values at each tree node based on the input\n # instances.\n #\n\n # Note: Trying to work this function out of the code. The only\n # solution we should get used to working with is that stored\n # on the scenario objects\n def XsnapshotSolutionFromInstances(self):\n\n for tree_node in self._tree_nodes:\n tree_node.snapshotSolutionFromInstances()\n\n def pullScenarioSolutionsFromInstances(self):\n\n for scenario in self._scenarios:\n scenario.update_solution_from_instance()\n\n def snapshotSolutionFromScenarios(self):\n for tree_node in self._tree_nodes:\n tree_node.snapshotSolutionFromScenarios()\n\n def create_random_bundles(self, scenario_tree_instance, num_bundles, random_seed):\n\n random.seed(random_seed)\n\n num_scenarios = len(self._scenarios)\n\n sequence = list(xrange(num_scenarios))\n random.shuffle(sequence)\n\n scenario_tree_instance.Bundling[None] = True\n\n next_scenario_index = 0\n\n # this is a hack-ish way to re-initialize the Bundles set of a\n # scenario tree instance, which should already be there\n # (because it is defined in the abstract model). however, we\n # don't have a \"clear\" method on a set, so...\n scenario_tree_instance.del_component(\"Bundles\")\n scenario_tree_instance.add_component(\"Bundles\", Set())\n for i in xrange(1, num_bundles+1):\n bundle_name = \"Bundle\"+str(i)\n scenario_tree_instance.Bundles.add(bundle_name)\n\n # ditto above comment regarding del_component/add_component\n scenario_tree_instance.del_component(\"BundleScenarios\")\n scenario_tree_instance.add_component(\"BundleScenarios\",\n Set(scenario_tree_instance.Bundles))\n\n bundles = []\n for i in xrange(num_bundles):\n bundle_name = \"Bundle\"+str(i+1)\n scenario_tree_instance.BundleScenarios[bundle_name] = Set()\n bundles.append(scenario_tree_instance.BundleScenarios[bundle_name])\n\n scenario_index = 0\n while (scenario_index < num_scenarios):\n for bundle_index in xrange(num_bundles):\n if (scenario_index == num_scenarios):\n break\n bundles[bundle_index].add(\n self._scenarios[sequence[scenario_index]]._name)\n scenario_index += 1\n\n self._construct_scenario_bundles(scenario_tree_instance)\n\n #\n # a utility function to pretty-print the static/non-cost\n # information associated with a scenario tree\n #\n\n def pprint(self):\n\n print(\"Scenario Tree Detail\")\n\n print(\"----------------------------------------------------\")\n print(\"Tree Nodes:\")\n print(\"\")\n for tree_node_name in sorted(iterkeys(self._tree_node_map)):\n tree_node = self._tree_node_map[tree_node_name]\n print(\"\\tName=%s\" % (tree_node_name))\n if tree_node._stage is not None:\n print(\"\\tStage=%s\" % (tree_node._stage._name))\n else:\n print(\"\\t Stage=None\")\n if tree_node._parent is not None:\n print(\"\\tParent=%s\" % (tree_node._parent._name))\n else:\n print(\"\\tParent=\" + \"None\")\n if tree_node._conditional_probability is not None:\n print(\"\\tConditional probability=%4.4f\" % tree_node._conditional_probability)\n else:\n print(\"\\tConditional probability=\" + \"***Undefined***\")\n print(\"\\tChildren:\")\n if len(tree_node._children) > 0:\n for child_node in sorted(tree_node._children, key=lambda x: x._name):\n print(\"\\t\\t%s\" % (child_node._name))\n else:\n print(\"\\t\\tNone\")\n print(\"\\tScenarios:\")\n if len(tree_node._scenarios) == 0:\n print(\"\\t\\tNone\")\n else:\n for scenario in sorted(tree_node._scenarios, key=lambda x: x._name):\n print(\"\\t\\t%s\" % (scenario._name))\n print(\"\")\n print(\"----------------------------------------------------\")\n print(\"Stages:\")\n for stage_name in sorted(iterkeys(self._stage_map)):\n stage = self._stage_map[stage_name]\n print(\"\\tName=%s\" % (stage_name))\n print(\"\\tTree Nodes: \")\n for tree_node in sorted(stage._tree_nodes, key=lambda x: x._name):\n print(\"\\t\\t%s\" % (tree_node._name))\n if len(stage._variables) > 0:\n print(\"\\tVariables: \")\n for variable_name in sorted(iterkeys(stage._variables)):\n match_templates = stage._variables[variable_name]\n sys.stdout.write(\"\\t\\t \"+variable_name+\" : \")\n for match_template in match_templates:\n sys.stdout.write(indexToString(match_template)+' ')\n print(\"\")\n if len(stage._derived_variables) > 0:\n print(\"\\tDerived Variables: \")\n for variable_name in sorted(iterkeys(stage._derived_variables)):\n match_templates = stage._derived_variables[variable_name]\n sys.stdout.write(\"\\t\\t \"+variable_name+\" : \")\n for match_template in match_templates:\n sys.stdout.write(indexToString(match_template)+' ')\n print(\"\")\n print(\"\\tCost Variable: \")\n if stage._cost_variable[1] is None:\n print(\"\\t\\t\" + stage._cost_variable[0])\n else:\n print(\"\\t\\t\" + stage._cost_variable[0] + indexToString(stage._cost_variable[1]))\n print(\"\")\n print(\"----------------------------------------------------\")\n print(\"Scenarios:\")\n for scenario_name in sorted(iterkeys(self._scenario_map)):\n scenario = self._scenario_map[scenario_name]\n print(\"\\tName=%s\" % (scenario_name))\n print(\"\\tProbability=%4.4f\" % scenario._probability)\n if scenario._leaf_node is None:\n print(\"\\tLeaf node=None\")\n else:\n print(\"\\tLeaf node=%s\" % (scenario._leaf_node._name))\n print(\"\\tTree node sequence:\")\n for tree_node in scenario._node_list:\n print(\"\\t\\t%s\" % (tree_node._name))\n print(\"\")\n print(\"----------------------------------------------------\")\n if len(self._scenario_bundles) > 0:\n print(\"Scenario Bundles:\")\n for bundle_name in sorted(iterkeys(self._scenario_bundle_map)):\n scenario_bundle = self._scenario_bundle_map[bundle_name]\n print(\"\\tName=%s\" % (bundle_name))\n print(\"\\tProbability=%4.4f\" % scenario_bundle._probability )\n sys.stdout.write(\"\\tScenarios: \")\n for scenario_name in sorted(scenario_bundle._scenario_names):\n sys.stdout.write(str(scenario_name)+' ')\n sys.stdout.write(\"\\n\")\n print(\"\")\n print(\"----------------------------------------------------\")\n\n #\n # a utility function to pretty-print the solution associated with a scenario tree\n #\n\n def pprintSolution(self, epsilon=1.0e-5):\n\n print(\"----------------------------------------------------\")\n print(\"Tree Nodes:\")\n print(\"\")\n for tree_node_name in sorted(iterkeys(self._tree_node_map)):\n tree_node = self._tree_node_map[tree_node_name]\n print(\"\\tName=%s\" % (tree_node_name))\n if tree_node._stage is not None:\n print(\"\\tStage=%s\" % (tree_node._stage._name))\n else:\n print(\"\\t Stage=None\")\n if tree_node._parent is not None:\n print(\"\\tParent=%s\" % (tree_node._parent._name))\n else:\n print(\"\\tParent=\" + \"None\")\n if len(tree_node._stage._variables) > 0:\n print(\"\\tVariables: \")\n for variable_name in sorted(iterkeys(tree_node._stage._variables)):\n indices = sorted(tree_node._variable_indices[variable_name])\n for index in indices:\n id = tree_node._name_index_to_id[variable_name,index]\n if id in tree_node._standard_variable_ids:\n # if a solution has not yet been stored /\n # snapshotted, then the value won't be in the solution map\n try:\n value = tree_node._solution[id]\n except:\n value = None\n if (value is not None) and (fabs(value) > epsilon):\n print(\"\\t\\t\"+variable_name+indexToString(index)+\"=\"+str(value))\n if len(tree_node._stage._derived_variables) > 0:\n print(\"\\tDerived Variables: \")\n for variable_name in sorted(iterkeys(tree_node._stage._derived_variables)):\n indices = sorted(tree_node._variable_indices[variable_name])\n for index in indices:\n id = tree_node._name_index_to_id[variable_name,index]\n if id in tree_node._derived_variable_ids:\n # if a solution has not yet been stored /\n # snapshotted, then the value won't be in the solution map\n try:\n value = tree_node._solution[tree_node._name_index_to_id[variable_name,index]]\n except:\n value = None\n if (value is not None) and (fabs(value) > epsilon):\n print(\"\\t\\t\"+variable_name+indexToString(index)+\"=\"+str(value))\n print(\"\")\n\n #\n # a utility function to pretty-print the cost information associated with a scenario tree\n #\n\n def pprintCosts(self):\n\n print(\"Scenario Tree Costs\")\n print(\"***CAUTION***: Assumes full (or nearly so) convergence of scenario solutions at each node in the scenario tree - computed costs are invalid otherwise\")\n\n print(\"----------------------------------------------------\")\n print(\"Tree Nodes:\")\n print(\"\")\n for tree_node_name in sorted(iterkeys(self._tree_node_map)):\n tree_node = self._tree_node_map[tree_node_name]\n print(\"\\tName=%s\" % (tree_node_name))\n if tree_node._stage is not None:\n print(\"\\tStage=%s\" % (tree_node._stage._name))\n else:\n print(\"\\t Stage=None\")\n if tree_node._parent is not None:\n print(\"\\tParent=%s\" % (tree_node._parent._name))\n else:\n print(\"\\tParent=\" + \"None\")\n if tree_node._conditional_probability is not None:\n print(\"\\tConditional probability=%4.4f\" % tree_node._conditional_probability)\n else:\n print(\"\\tConditional probability=\" + \"***Undefined***\")\n print(\"\\tChildren:\")\n if len(tree_node._children) > 0:\n for child_node in sorted(tree_node._children, key=lambda x: x._name):\n print(\"\\t\\t%s\" % (child_node._name))\n else:\n print(\"\\t\\tNone\")\n print(\"\\tScenarios:\")\n if len(tree_node._scenarios) == 0:\n print(\"\\t\\tNone\")\n else:\n for scenario in sorted(tree_node._scenarios, key=lambda x: x._name):\n print(\"\\t\\t%s\" % (scenario._name))\n print(\"\\tExpected cost of (sub)tree rooted at node=%10.4f\" % tree_node.computeExpectedNodeCost())\n print(\"\")\n\n print(\"----------------------------------------------------\")\n print(\"Scenarios:\")\n print(\"\")\n for scenario_name in sorted(iterkeys(self._scenario_map)):\n scenario = self._scenario_map[scenario_name]\n\n print(\"\\tName=%s\" % (scenario_name))\n print(\"\\tProbability=%4.4f\" % scenario._probability)\n\n if scenario._leaf_node is None:\n print(\"\\tLeaf Node=None\")\n else:\n print(\"\\tLeaf Node=%s\" % (scenario._leaf_node._name))\n\n print(\"\\tTree node sequence:\")\n for tree_node in scenario._node_list:\n print(\"\\t\\t%s\" % (tree_node._name))\n\n aggregate_cost = 0.0\n for stage in self._stages:\n # find the tree node for this scenario, representing this stage.\n tree_node = None\n for node in scenario._node_list:\n if node._stage == stage:\n tree_node = node\n break\n\n cost_variable_value = scenario._stage_costs[stage._name]\n\n if cost_variable_value is not None:\n print(\"\\tStage=%20s Cost=%10.4f\"\n % (stage._name, cost_variable_value))\n cost = cost_variable_value\n else:\n print(\"\\tStage=%20s Cost=%10s\"\n % (stage._name, \"Not Rprted.\"))\n cost = 0.0\n aggregate_cost += cost\n\n print(\"\\tTotal scenario cost=%10.4f\" % aggregate_cost)\n print(\"\")\n print(\"----------------------------------------------------\")\n","sub_path":"pyomo/pysp/scenariotree.py","file_name":"scenariotree.py","file_ext":"py","file_size_in_byte":110295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"126950253","text":"from django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Course, CourseContent, CourseSignIn\nfrom django.utils import timezone\nfrom django.urls import reverse\nfrom django.contrib.auth.models import Group, Permission, User\nfrom comment.models import Comment\nfrom comment.forms import CommentForm\n\n\n# 跳转到首页\ndef index(request):\n context = {'courses': Course.objects.all()}\n return render(request, 'course/index.html', context)\n\n\n# 跳转到课程列表\ndef course_list(request):\n context = {'courses': Course.objects.all()}\n return render(request, 'course/course_list.html', context)\n\n\n# 跳转到课程详情\n# def course_detail(request, course_id):\n# context = {\"course\": get_object_or_404(Course, id=course_id),\n# \"chapters\": CourseContent.objects.filter(courseId=int(course_id))}\n# return render(request, 'course/course_detail.html', context)\n\n\n# 跳转到课程章节\ndef course_chapter(request, course_id, chapter_num):\n sign_info = CourseSignIn.objects.filter(courseId=int(course_id), signIn_time=timezone.now(), student_id=request.user.id)\n sign_bool = 0\n if sign_info.exists():\n sign_bool = 1\n context = {\"course\": get_object_or_404(Course, id=course_id),\n \"chapters\": CourseContent.objects.filter(courseId=int(course_id)),\n \"thisChapter\": get_object_or_404(CourseContent, courseId=int(course_id), chapter=chapter_num),\n \"signInBool\": sign_bool,\n \"comments\": Comment.objects.filter(course_id=int(course_id), chapter_num=int(chapter_num)),\n \"comment_form\": CommentForm(initial={\n \"course_id\": course_id,\n \"chapter_num\": chapter_num})\n }\n return render(request, 'course/course_detail.html', context)\n\n\n# 跳转到课程章节添加页面\ndef course_chapter_add(request, course_id):\n next_chapter_num = CourseContent.objects.filter(courseId=int(course_id)).count() + 1\n context = {\"course\": get_object_or_404(Course, id=course_id),\n \"chapters\": CourseContent.objects.filter(courseId=int(course_id)),\n \"comment_form\": CommentForm(initial={\n \"course_id\": course_id,\n \"chapter_num\": next_chapter_num})\n }\n return render(request, 'course/course_chapter_add.html', context)\n\n\n# 用户签到\ndef signIn(request, course_id):\n sig_in = CourseSignIn.objects.create(courseId=course_id, student_id=request.user.id, student_name=request.user.first_name)\n referer = request.META.get('HTTP_REFERER', reverse('course_chapter', kwargs={'course_id': 1, 'chapter_num': 1}))\n return redirect(referer)\n\n\n# 跳转到课程管理\ndef course_manage(request):\n context = {'courses': Course.objects.all()}\n return render(request, 'course/course_manage.html', context)\n\n\n# 课程添加\ndef courseAdd(request, course_id):\n my_group = Group.objects.get(name=course_id)\n my_group.user_set.add(request.user)\n referer = request.META.get('HTTP_REFERER', reverse('course_manage'))\n return redirect(referer)\n\n\n# 课程退选\ndef courseRemove(request, course_id):\n my_group = Group.objects.get(name=course_id)\n my_group.user_set.remove(request.user)\n referer = request.META.get('HTTP_REFERER', reverse('course_manage'))\n return redirect(referer)\n\n\n# 暂停课程\ndef courseStop(request, course_id):\n my_course = Course.objects.get(id=course_id)\n my_course.course_state = 0\n my_course.save()\n referer = request.META.get('HTTP_REFERER', reverse('course_manage'))\n return redirect(referer)\n\n\n# 开始课程\ndef courseOpen(request, course_id):\n my_course = Course.objects.get(id=course_id)\n my_course.course_state = 1\n my_course.save()\n referer = request.META.get('HTTP_REFERER', reverse('course_manage'))\n return redirect(referer)\n\n\n# 学生管理页面\ndef student_manage(request):\n context = {}\n course_id = 1\n context[\"courses\"] = Course.objects.all()\n context[\"signIn\"] = CourseSignIn.objects.filter(signIn_time=timezone.now())\n while course_id > 0:\n # 遍历课程\n group = Group.objects.filter(name=course_id)\n if group.exists():\n # 课程存在\n context[\"course_sum\"] = Course.objects.all().count()\n context[\"users\"+str(course_id)] = User.objects.filter(groups__in=str(course_id))\n course_id += 1\n else:\n break\n return render(request, 'course/student_manage.html', context)\n","sub_path":"course/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"584037379","text":"\"\"\"Copyright 2019 Cisco Systems\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\nThe contents of this file are licensed under the Apache License, Version 2.0\n(the \"License\"); you may not use this file except in compliance with the\nLicense. You may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\nWARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\nLicense for the specific language governing permissions and limitations under\nthe License.\n\"\"\"\n\n\"\"\"Wrapper for IOS XE to simplify usage of gNMI implementation.\"\"\"\n\nimport json\nimport logging\n\nfrom six import string_types\nfrom .client import Client, proto, util\n\n\nLOGGER = logging.getLogger(__name__)\nlogger = LOGGER\n\n\nclass XEClient(Client):\n \"\"\"IOS XE-specific wrapper for gNMI functionality.\n Assumes IOS XE 16.12+\n\n Returns direct responses from base Client methods.\n\n Methods\n -------\n delete_xpaths(...)\n Convenience wrapper for set() which constructs Paths from XPaths for deletion.\n get_xpaths(...)\n Convenience wrapper for get() which helps construct get requests for specified xpaths.\n set_json(...)\n Convenience wrapper for set() which assumes model-based JSON payloads.\n subscribe_xpaths(...)\n Convenience wrapper for subscribe() which helps construct subscriptions for specified xpaths.\n\n Examples\n --------\n >>> from cisco_gnmi import ClientBuilder\n >>> client = ClientBuilder('127.0.0.1:9339').set_os(\n ... 'IOS XE'\n ... ).set_secure_from_file(\n ... 'rootCA.pem',\n ... 'client.key',\n ... 'client.crt'\n ... ).set_ssl_target_override().set_call_authentication(\n ... 'admin',\n ... 'its_a_secret'\n ... ).construct()\n >>> capabilities = client.capabilities()\n >>> print(capabilities)\n ...\n >>> get_response = client.get_xpaths('/interfaces/interface')\n >>> print(get_response)\n ...\n >>> subscribe_response = client.subscribe_xpaths('/interfaces/interface')\n >>> for message in subscribe_response: print(message)\n ...\n >>> config = '{\"Cisco-IOS-XE-native:native\": {\"hostname\": \"gnmi_test\"}}'\n >>> set_response = client.set_json(config)\n >>> print(set_response)\n ...\n >>> delete_response = client.delete_xpaths('/Cisco-IOS-XE-native:native/hostname')\n \"\"\"\n\n def delete_xpaths(self, xpaths, prefix=None):\n \"\"\"A convenience wrapper for set() which constructs Paths from supplied xpaths\n to be passed to set() as the delete parameter.\n\n Parameters\n ----------\n xpaths : iterable of str\n XPaths to specify to be deleted.\n If prefix is specified these strings are assumed to be the suffixes.\n prefix : str\n The XPath prefix to apply to all XPaths for deletion.\n\n Returns\n -------\n set()\n \"\"\"\n if isinstance(xpaths, string_types):\n xpaths = [xpaths]\n paths = []\n for xpath in xpaths:\n if prefix:\n if prefix.endswith(\"/\") and xpath.startswith(\"/\"):\n xpath = \"{prefix}{xpath}\".format(\n prefix=prefix[:-1], xpath=xpath[1:]\n )\n elif prefix.endswith(\"/\") or xpath.startswith(\"/\"):\n xpath = \"{prefix}{xpath}\".format(prefix=prefix, xpath=xpath)\n else:\n xpath = \"{prefix}/{xpath}\".format(prefix=prefix, xpath=xpath)\n paths.append(self.parse_xpath_to_gnmi_path(xpath))\n return self.set(deletes=paths)\n\n def set_json(\n self,\n update_json_configs=None,\n replace_json_configs=None,\n ietf=True,\n prefix=None,\n ):\n \"\"\"A convenience wrapper for set() which assumes JSON payloads and constructs desired messages.\n All parameters are optional, but at least one must be present.\n\n This method expects JSON in the same format as what you might send via the native gRPC interface\n with a fully modeled configuration which is then parsed to meet the gNMI implementation.\n\n Parameters\n ----------\n update_json_configs : iterable of JSON configurations, optional\n JSON configs to apply as updates.\n replace_json_configs : iterable of JSON configurations, optional\n JSON configs to apply as replacements.\n ietf : bool, optional\n Use JSON_IETF vs JSON.\n\n Returns\n -------\n set()\n \"\"\"\n if not any([update_json_configs, replace_json_configs]):\n raise Exception(\"Must supply at least one set of configurations to method!\")\n\n def check_configs(name, configs):\n if isinstance(configs, string_types):\n LOGGER.debug(\"Handling %s as JSON string.\", name)\n try:\n configs = json.loads(configs)\n except:\n raise Exception(\"{name} is invalid JSON!\".format(name=name))\n configs = [configs]\n elif isinstance(configs, dict):\n LOGGER.debug(\"Handling %s as already serialized JSON object.\", name)\n configs = [configs]\n elif not isinstance(configs, (list, set)):\n raise Exception(\n \"{name} must be an iterable of configs!\".format(name=name)\n )\n return configs\n\n def create_updates(name, configs):\n if not configs:\n return None\n configs = check_configs(name, configs)\n updates = []\n for config in configs:\n if not isinstance(config, dict):\n raise Exception(\"config must be a JSON object!\")\n if len(config.keys()) > 1:\n raise Exception(\"config should only target one YANG module!\")\n top_element = next(iter(config.keys()))\n update = proto.gnmi_pb2.Update()\n update.path.CopyFrom(self.parse_xpath_to_gnmi_path(top_element))\n config = config.pop(top_element)\n if ietf:\n update.val.json_ietf_val = json.dumps(config).encode(\"utf-8\")\n else:\n update.val.json_val = json.dumps(config).encode(\"utf-8\")\n updates.append(update)\n return updates\n\n updates = create_updates(\"update_json_configs\", update_json_configs)\n replaces = create_updates(\"replace_json_configs\", replace_json_configs)\n return self.set(prefix=prefix, updates=updates, replaces=replaces)\n\n def get_xpaths(self, xpaths, data_type=\"ALL\", encoding=\"JSON_IETF\"):\n \"\"\"A convenience wrapper for get() which forms proto.gnmi_pb2.Path from supplied xpaths.\n\n Parameters\n ----------\n xpaths : iterable of str or str\n An iterable of XPath strings to request data of\n If simply a str, wraps as a list for convenience\n data_type : proto.gnmi_pb2.GetRequest.DataType, optional\n A direct value or key from the GetRequest.DataType enum\n [ALL, CONFIG, STATE, OPERATIONAL]\n encoding : proto.gnmi_pb2.GetRequest.Encoding, optional\n A direct value or key from the Encoding enum\n [JSON, JSON_IETF]\n\n Returns\n -------\n get()\n \"\"\"\n supported_encodings = [\"JSON\", \"JSON_IETF\"]\n encoding = util.validate_proto_enum(\n \"encoding\",\n encoding,\n \"Encoding\",\n proto.gnmi_pb2.Encoding,\n supported_encodings,\n )\n gnmi_path = None\n if isinstance(xpaths, (list, set)):\n gnmi_path = map(self.parse_xpath_to_gnmi_path, set(xpaths))\n elif isinstance(xpaths, string_types):\n gnmi_path = [self.parse_xpath_to_gnmi_path(xpaths)]\n else:\n raise Exception(\n \"xpaths must be a single xpath string or iterable of xpath strings!\"\n )\n return self.get(gnmi_path, data_type=data_type, encoding=encoding)\n\n def subscribe_xpaths(\n self,\n xpath_subscriptions,\n request_mode=\"STREAM\",\n sub_mode=\"SAMPLE\",\n encoding=\"JSON_IETF\",\n sample_interval=Client._NS_IN_S * 10,\n suppress_redundant=False,\n heartbeat_interval=None,\n prefix=None,\n ):\n \"\"\"A convenience wrapper of subscribe() which aids in building of SubscriptionRequest\n with request as subscribe SubscriptionList. This method accepts an iterable of simply xpath strings,\n dictionaries with Subscription attributes for more granularity, or already built Subscription\n objects and builds the SubscriptionList. Fields not supplied will be defaulted with the default arguments\n to the method.\n\n Generates a single SubscribeRequest.\n\n Parameters\n ----------\n xpath_subscriptions : str or iterable of str, dict, Subscription\n An iterable which is parsed to form the Subscriptions in the SubscriptionList to be passed\n to SubscriptionRequest. Strings are parsed as XPaths and defaulted with the default arguments,\n dictionaries are treated as dicts of args to pass to the Subscribe init, and Subscription is\n treated as simply a pre-made Subscription.\n request_mode : proto.gnmi_pb2.SubscriptionList.Mode, optional\n Indicates whether STREAM to stream from target.\n [STREAM]\n sub_mode : proto.gnmi_pb2.SubscriptionMode, optional\n The default SubscriptionMode on a per Subscription basis in the SubscriptionList.\n SAMPLE will stream the subscription at a regular cadence/interval.\n [SAMPLE]\n encoding : proto.gnmi_pb2.Encoding, optional\n A member of the proto.gnmi_pb2.Encoding enum specifying desired encoding of returned data\n [JSON_IETF]\n sample_interval : int, optional\n Default nanoseconds for sample to occur.\n Defaults to 10 seconds.\n suppress_redundant : bool, optional\n Indicates whether values that have not changed should be sent in a SAMPLE subscription.\n heartbeat_interval : int, optional\n Specifies the maximum allowable silent period in nanoseconds when\n suppress_redundant is in use. The target should send a value at least once\n in the period specified.\n prefix : proto.gnmi_pb2.Path, optional\n A common path prepended to all path elements in the message. This reduces message size by\n removing redundent path elements. Smaller message == improved thoughput.\n\n Returns\n -------\n subscribe()\n \"\"\"\n supported_request_modes = [\"STREAM\"]\n request_mode = util.validate_proto_enum(\n \"mode\",\n request_mode,\n \"SubscriptionList.Mode\",\n proto.gnmi_pb2.SubscriptionList.Mode,\n subset=supported_request_modes,\n return_name=True,\n )\n supported_encodings = [\"JSON_IETF\"]\n encoding = util.validate_proto_enum(\n \"encoding\",\n encoding,\n \"Encoding\",\n proto.gnmi_pb2.Encoding,\n subset=supported_encodings,\n return_name=True,\n )\n supported_sub_modes = [\"SAMPLE\"]\n sub_mode = util.validate_proto_enum(\n \"sub_mode\",\n sub_mode,\n \"SubscriptionMode\",\n proto.gnmi_pb2.SubscriptionMode,\n subset=supported_sub_modes,\n return_name=True,\n )\n return super(XEClient, self).subscribe_xpaths(\n xpath_subscriptions,\n request_mode,\n sub_mode,\n encoding,\n sample_interval,\n suppress_redundant,\n heartbeat_interval,\n prefix,\n )\n\n @classmethod\n def parse_xpath_to_gnmi_path(cls, xpath, origin=None):\n \"\"\"Naively tries to intelligently (non-sequitur!) origin\n Otherwise assume rfc7951\n legacy is not considered\n \"\"\"\n if origin is None:\n # naive but effective\n if \":\" not in xpath:\n origin = \"openconfig\"\n else:\n origin = \"rfc7951\"\n return super(XEClient, cls).parse_xpath_to_gnmi_path(xpath, origin)\n","sub_path":"src/cisco_gnmi/xe.py","file_name":"xe.py","file_ext":"py","file_size_in_byte":12767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"152722870","text":"fname = input(\"Podaj nazwę pliku: \")\nif len(fname) < 1: fname = 'clown.txt'\nhand = open(fname)\n\ndic = dict()\nfor lin in hand:\n lin = lin.rstrip()\n #print(lin)\n wds = lin.split()\n #print(wds)\n for w in wds:\n dic[w] = dic.get(w,0) + 1\n\n\n\nlargest = -1\nword = None\nfor k,v in dic.items():\n# print(k,v)\n if v > largest:\n largest = v\n word = k\nprint('Podsumowując: ', word, largest)\n\n\n# print(\"**JUŻ JEST**\")\n# else:\n# dic[w] = 1\n# print(\"**NOWE!**\")\n#print(dic)\n# print(w, dic[w])\n","sub_path":"p25.py","file_name":"p25.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"17309178","text":"import time\nprint(\"Hello Human\")\ntime.sleep(1)\nAnws = input(\"What Is Your Age In Human Years ?\")\nAge = int(Anws)\ntime.sleep(0.5)\nprint(\"Processing...\")\ntime.sleep(1)\nif Age< 18:\n print(\"No Younglings Allowed Beyond This Point\")\n break\nelse:\n print(\"Wonderful, You Make Continue Human\")","sub_path":"1st.py","file_name":"1st.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"156170995","text":"import logging\nimport tempfile\nimport os\nfrom pathlib import Path\nfrom typing import Dict\n\nfrom .interfaces import DatabaseInterface, StorageInterface, IndexInterface, TextExtractorInterface\n\n\ndef get_gazette_file_key_used_in_storage(gazette) -> str:\n \"\"\"\n Get the file key used to store the gazette in the object storage\n \"\"\"\n return gazette[\"file_path\"]\n\n\ndef download_gazette_file(gazette, storage: StorageInterface) -> str:\n \"\"\"\n Download the file from the object storage and write it down in the local\n disk to allow the text extraction\n \"\"\"\n with tempfile.NamedTemporaryFile(delete=False) as tmpfile:\n gazette_file_key = get_gazette_file_key_used_in_storage(gazette)\n storage.get_file(gazette_file_key, tmpfile)\n return tmpfile.name\n\n\ndef delete_gazette_files(gazette_file: str) -> None:\n \"\"\"\n Removes the files used to process the gazette content.\n \"\"\"\n os.remove(gazette_file)\n\n\ndef try_to_extract_content(gazette_file: str, text_extractor: TextExtractorInterface) -> str:\n \"\"\"\n Calls the function to extract the content from the gazette file. If it fails\n remove the gazette file and raise an exception\n \"\"\"\n try:\n return text_extractor.extract_text(gazette_file)\n except Exception as e:\n os.remove(gazette_file)\n raise e\n\ndef get_file_endpoint() -> str:\n \"\"\"\n Get the endpoint where the gazette files can be downloaded.\n \"\"\"\n return os.environ[\"QUERIDO_DIARIO_FILES_ENDPOINT\"]\n\ndef get_gazette_text_and_define_url(\n gazette: Dict, gazette_file: str, text_extractor: TextExtractorInterface\n):\n \"\"\"\n Extract file content and define the url to access the file in the storage\n \"\"\"\n gazette[\"source_text\"] = try_to_extract_content(gazette_file, text_extractor)\n file_endpoint = get_file_endpoint()\n gazette[\"url\"] = f\"{file_endpoint}/{gazette['file_path']}\"\n\ndef upload_gazette_raw_text(\n gazette: Dict, storage\n):\n \"\"\"\n Define gazette raw text\n \"\"\"\n file_raw_txt = Path(gazette['file_path']).with_suffix(\".txt\").as_posix()\n storage.upload_content(file_raw_txt, gazette[\"source_text\"])\n logging.debug(f\"file_raw_txt uploaded {file_raw_txt}\")\n file_endpoint = get_file_endpoint()\n gazette[\"file_raw_txt\"] = f\"{file_endpoint}/{file_raw_txt}\"\n\n\ndef try_process_gazette_file(\n gazette: Dict,\n database: DatabaseInterface,\n storage: StorageInterface,\n index: IndexInterface,\n text_extractor: TextExtractorInterface,\n) -> None:\n \"\"\"\n Do all the work to extract the content from the gazette files\n \"\"\"\n logging.debug(f\"Processing gazette {gazette['file_path']}\")\n gazette_file = download_gazette_file(gazette, storage)\n get_gazette_text_and_define_url(gazette, gazette_file, text_extractor)\n upload_gazette_raw_text(gazette, storage)\n index.index_document(gazette)\n database.set_gazette_as_processed(gazette[\"id\"], gazette[\"file_checksum\"])\n delete_gazette_files(gazette_file)\n\n\ndef process_gazette_file(\n gazette: Dict,\n database: DatabaseInterface,\n storage: StorageInterface,\n index: IndexInterface,\n text_extractor: TextExtractorInterface,\n) -> None:\n \"\"\"\n Try to process the gazette file. If an exception happen log a warning message\n and return.\n \"\"\"\n try:\n try_process_gazette_file(\n gazette, database, storage, index, text_extractor\n )\n except Exception as e:\n logging.warning(f\"Could process gazette: {gazette['file_path']}. Cause: {e}\")\n\n\ndef extract_text_pending_gazettes(\n database: DatabaseInterface,\n storage: StorageInterface,\n index: IndexInterface,\n text_extractor: TextExtractorInterface,\n) -> None:\n \"\"\"\n Process the gazettes files waiting to extract the text\n\n This function access the database containing all the gazettes files found by\n the spider and extract the text from the gazettes marked as not processed yet.\n \"\"\"\n logging.info(\"Starting text extraction from pending gazettes\")\n for gazette in database.get_pending_gazettes():\n process_gazette_file(gazette, database, storage, index, text_extractor)\n","sub_path":"tasks/gazette_text_extraction.py","file_name":"gazette_text_extraction.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"327173927","text":"#!/usr/bin/python\n\nimport gensim\nimport unicodedata\nimport random\nimport cytoolz as toolz\nimport pickle\nimport re\n\n\ndef slw(n, seq):\n\tyield from toolz.sliding_window(n, ([None] * (n - 1)) + seq)\n\t#for i in toolz.sliding_window(n, ([None] * (n - 1)) + seq):\n\t#\tyield tuple(filter(None, i))\n\ndef slw2(n, seq):\n\tfor i in toolz.sliding_window(n, ([None] * (n - 1)) + seq):\n\t\tyield tuple(filter(None, i))\n\ndef norm(s):\n\treturn unicodedata.normalize('NFKD', s).encode('ASCII', 'ignore').decode()\n\n\ndef is_czech(s):\n\treturn re.fullmatch('[a-zA-ZáÁčČďĎéÉěĚíÍňŇóÓřŘšŠťŤúÚůŮýÝžŽ]+', s)\n\n\ndef simplify(d):\n\tret = {}\n\tfor k, v in d.items():\n\t\tif len(v) == 1:\n\t\t\tkk = list(v.keys())[0]\n\t\t\tret[k] = {kk: {'': v[kk]['']}}\n\t\telse:\n\t\t\tret[k] = v\n\treturn ret\n\n\ndef gen_endings(s):\n\twhile s:\n\t\tyield s\n\t\ts = s[1:]\n\n\nd = {}\nn_pred = 1\n\nfn = 'cswiki-latest-pages-articles.xml.bz2'\nwiki = gensim.corpora.WikiCorpus(fn, lemmatize=False, dictionary={})\nfor i, text in enumerate(wiki.get_texts()):\n\tif i % 1000 == 0:\n\t\tprint(i, len(d))\n\tfor words in slw(2, text):\n\t\tprev_word, word = words\n\t\tprev_word = '' if not prev_word else prev_word\n\t\tprev_word = '' if not is_czech(prev_word) else prev_word\n\t\tif not is_czech(word):\n\t\t\tcontinue\n\t\tw = norm(word)\n\t\tif not w:\n\t\t\tcontinue\n\t\tif len(w) != len(word):\n\t\t\tcontinue\n\t\tif not w in d:\n\t\t\t#d[w] = {'options': {}, 'pwes': {}}\n\t\t\td[w] = {}\n\t\tif word not in d[w]:\n\t\t\td[w][word] = {}\n\t\t#d[w]['options'][word] = d[w]['options'].get(word, 0) + 1\n\t\tpwe_ = prev_word[-3:]\n\t\twhile 1:\n\t\t\t#if pwe_ not in d[w]:\n\t\t\t#\td[w][pwe_] = {}\n\t\t\td[w][word][pwe_] = d[w][word].get(pwe_, 0) + 1\n\t\t\tif not pwe_:\n\t\t\t\tbreak\n\t\t\tpwe_ = pwe_[1:]\n\t\t#print('TTT', d[w])\n\n\tif i and i % 10000 == 0:\n\t\tprint('saving %s %s' % (i, len(d)))\n\t\t#with open('dict.msgpack', 'wb') as f:\n\t\t\t#msgpack.dump(d, f)\n\t\tdd = simplify(d)\n\t\tprint('SIMP', len(d), len(dd))\n\t\twith open('dict.pickle', 'wb') as f:\n\t\t\tpickle.dump(dd, f)\n\t\tprint('saved')\n","sub_path":"corp.py","file_name":"corp.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"163319","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/topics/item-pipeline.html\n\nfrom scrapy.exceptions import DropItem\n\nfrom cpi_scrapers.items import ProductItem\n\n\nclass ProductValidationPipeline(object):\n\n def process_item(self, item, spider):\n ''' ''' # {{{\n\n # Required Fields\n for field in ProductItem.VALIDATION_REQUIRED:\n if field not in item:\n raise DropItem('Missing %s in Item', field)\n\n # Length\n for field, length in ProductItem.VALIDATION_LEN.iteritems():\n tmp = item.get(field)\n if tmp and (not isinstance(tmp, basestring) or len(tmp) > length):\n raise DropItem('Wrong string format: %s | \"%s\"', field, tmp)\n \n # product condition\n tmp = item['product_condition'] \n if not (0 < tmp <= ProductItem.NUM_PC_OPTS):\n raise DropItem('Wrong Product Condition')\n \n tmp = item['availability'] \n if not (0 < tmp <= ProductItem.NUM_AVAIL_OPTS):\n raise DropItem('Wrong Availability')\n\n if item['on_sale'] not in (0, 1):\n raise DropItem('Wrong On Sale')\n \n return item\n \n # }}}\n \n","sub_path":"cpi_scrapers/pipelines/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"466495256","text":"# coding: utf-8\n\nimport os\nimport h5py\nimport pandas as pd\n\nfrom experiment import XP_BASE\n\n\nclass XPS_BASE(XP_BASE):\n def __init__(self, base_data_dir, save_dir, dataset_name, detector_name, detector_cls=None, base_data_type='cat'):\n super().__init__(base_data_dir, None, save_dir, dataset_name, detector_name,\n detector_cls=detector_cls, base_data_type=base_data_type)\n\n self.metadata = {\n 'dataset': dataset_name,\n 'base_path': os.path.join(base_data_dir, '%s_%s.csv' % (dataset_name, base_data_type)),\n }\n\n def set_default(self):\n self.data_cond = {\n 'type': 'stream'\n }\n\n # classifier conditions\n self.clf_cond = {\n 'n_estimators': 20,\n 'random_state': 42,\n 'update_policy': True,\n 'train_size': 1000,\n }\n\n self.detector_cond = {}\n\n def make_data(self):\n self.data = pd.read_csv(self.base_data_path).values\n\n\nclass XPS_LCODE(XPS_BASE):\n def save(self, path):\n idx, wd, drift_map = self.detector._get_results()\n self.drifts = list(sorted(set(drift_map['position'].values.tolist())))\n super().save(path)\n with h5py.File(path, 'a') as f:\n f.create_dataset('idx', data=idx)\n f.create_dataset('wd', data=wd)\n f.create_dataset('th', data=self.detector.l1_th)\n drift_map.to_hdf(path, key='drift_map', mode='a')\n pd.DataFrame(self.detector.l2_log).to_hdf(path, key='l2_log', mode='a')\n\n\nclass XPS_MD3(XPS_BASE):\n def save(self, path):\n super().save(path)\n with h5py.File(path, 'a') as f:\n f.create_dataset('label_requests', data=self.detector.label_requests)\n\n\nclass XPS_UPDATE(XPS_BASE):\n def run_detection(self, show_progress):\n update_policy = self.clf_cond['update_policy']\n train_size = self.calc_train_size()\n change_points = list(range(train_size, len(self.data), train_size))\n self.drifts, self.score = self.detector.run(self.data, train_size, update_policy=update_policy,\n change_points=change_points, show_progress=show_progress)\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"src/experiment_stream.py","file_name":"experiment_stream.py","file_ext":"py","file_size_in_byte":2243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"282909364","text":"# works with Python 3.7.0b3\n\nimport asyncio\nimport os\nimport subprocess as sbp\nimport csv\n\nasync def res_proc(cmd):\n proc = await asyncio.create_subprocess_exec(\n cmd,\n stdout = asyncio.subprocess.PIPE,\n stderr = asyncio.subprocess.PIPE,\n stdin = asyncio.subprocess.PIPE)\n i = 0\n while True:\n # flush out stdout lines from the buffer\n line = await proc.stdout.readuntil(separator = b'\\n')\n lineb = line.decode()\n i=i+1\n print(\"Number:{}, line:{} \".format(i,lineb.replace(\"\\n\",\"\")))\n if (lineb.find(\"Enter rows depth cols\")!=-1):\n print(\"* Oops ----*\")\n proc.stdin.write(b'2 256 4\\n')\n proc.stdin.write(b'1 1\\n')\n proc.stdin.write(b'0 0\\n')\n print(\"* Oops ----*\")\n\n#asyncio.run(res_proc('./emu'))\n\nif __name__ == '__main__':\n\n dpm_dims = [2,4,8,16] # lhs\n dpn_dims = dpm_dims # rhs\n dpk_dims = [64,128,256,512] #common dim\n\n # bram dimensions\n brams_big = [256,512,1024,2048]\n brams_small = [8,16,32,64]\n\n # matrices that have to be tested\n mat_rowlist = [2,4,8,16,32]\n mat_collist = [64,128,256,512,1024,2048,4096]\n\n m_bram = 256\n n_bram = 256\n\n # first run the emu instatiattions\n # with the required dimensions\n # temporary bram variables\n tm_bram = 1\n tn_bram = 1\n\n dir='build'\n exec_cmd=''\n for dk in dpk_dims:\n for dm in dpm_dims:\n for dn in dpn_dims:\n #bram size setup\n if(dm>dn):\n tm_bram = int(dm/dn * m_bram)\n tn_bram = int(n_bram)\n if(dn>dm):\n tn_bram = int(dn/dm * n_bram)\n tm_bram = int(n_bram)\n if(dm==dn):\n tm_bram = int(m_bram)\n tn_bram = int(n_bram)\n\n exec_cmd = './'+dir+\"/{0}x{1}x{2}_{3}_{4}/emu\".format(dm,dk,dn,tn_bram,tm_bram)\n # make the environment variables\n os.environ[\"M\"] = str(dm)\n os.environ[\"N\"] = str(dn)\n os.environ[\"K\"] = str(dk)\n os.environ[\"LHPM\"] = str(tm_bram)\n os.environ[\"RHPM\"] = str(tn_bram)\n ts = sbp.Popen([\"make\",\"instemu\"])\n ts.wait()\n\n asyncio.run(res_proc(exec_cmd))\n\n","sub_path":"auto_rsh/sub.py","file_name":"sub.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"76631876","text":"#_*_ coding:utf-8 _*_\nimport sys, random, math, pygame\nfrom pygame.locals import *\n#import common\nfrom game.superman.gameobject import *\n\n\ndef start():\n\n pygame.init()\n screen = pygame.display.set_mode( (10240, 768) )\n pygame.display.set_caption( \"斯古大陆\" )\n\n while True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n keys = pygame.key.get_pressed()\n if keys[K_ESCAPE]:\n sys.exit()\n\n # fontobject = pygame.font.Font( None, 18 )\n\n pygame.draw.rect( screen, (0, 0, 0),\n ((screen.get_width() / 2) - 100,\n (screen.get_height() / 2) - 10,\n 200, 20), 0 )\n pygame.draw.rect( screen, (255, 255, 255),\n ((screen.get_width() / 2) - 102,\n (screen.get_height() / 2) - 12,\n 204, 24), 1 )\n pygame.display.update()","sub_path":"game/studygame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"613767928","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 25 11:02:32 2018\n\n@author: chuck\n\"\"\"\n\n\"\"\"\nThis script will take each sRNA/antisense RNA and randomly shuffle its sequence to preseve the base composition\nRandomly using a program that randomly shuffles the sequences but preserves dinucleotide frequency\nThese shuffled sequences will be used to calculate the folding energy as a baseline, random case\n\"\"\"\n\nimport subprocess\nfrom Bio import SeqIO\nimport os\nimport numpy as np\n\n#Open up the sequences to be shuffled\nsequences = list(SeqIO.parse('/home/chuck/Documents/RNAseq/master_sRNA_antisense_sequences.fasta', 'fasta'))\n#Open a file that the folding energy of the shuffled sequences will be written to\nRNALfold_stats = open('/home/chuck/Documents/RNAseq/RNAfolding/permutations_statistics_np.txt', 'w')\n\n#uShuffle the sequences -- preserves dinucleotide frequencies\nfor sequence in sequences:\n #The program generates a new file for each shuffled sequence, so write each file to a new directory\n new_file = open('/home/chuck/Documents/RNAseq/RNAfolding/permutations_1000/' + sequence.description + \"_permutations_1k.fasta\", \"w\")\n #Build the uShuffle command that will be executed. We will generate 1000 permutations per sequences\n uShuffle_command = ['/home/chuck/Documents/RNAseq/RNAfolding/uShuffle/main.exe', '-s', str(sequence.seq), '-n', '1000', '-k', '2', '-seed', '137']\n #Run uShuffle as a subprocess\n permutations = list(subprocess.check_output(uShuffle_command).decode(\"utf-8\").split(\"\\n\"))\n #Delete the last item in the permutations, which is just metadata we don't need\n del permutations[len(permutations) - 1]\n #Write the 1000 permutations to a new file\n for permutation in permutations:\n new_file.write(\">%s_%s\\n%s\\n\" % (sequence.description, str(permutations.index(permutation) + 1), permutation))\n\n \n#Determine the folding energy of all of the shuffled RNA sequences. These use the files we just generated in the previous loop\nfor RNA_permutations in os.listdir('/home/chuck/Documents/RNAseq/RNAfolding/permutations_numpy/'):\n #Open the folding energy file that the results will be written to\n RNALfold_out = open('/home/chuck/Documents/RNAseq/RNAfolding/permutations_numpy/%s' % (RNA_permutations[:-6] + \"_folding.txt\"), \"w\")\n #Build the command for the RNA folding energy program\n RNALfold_command = [\"RNALfold\", '--infile=/home/chuck/Documents/RNAseq/RNAfolding/permutations_numpy/%s' % (RNA_permutations)]\n #Run RNAfold\n RNALfold = list(subprocess.check_output(RNALfold_command).decode(\"utf-8\").split(\"\\n\"))\n \n #Loop through folding energies from each permutation\n #RNAfold outputs in a messy format, so process the format and just grab the folding energy\n #We don't care about the actual folding structure in the output\n free_energy_list = []\n for fold in RNALfold:\n if \">\" in fold:\n current_permutation = fold\n if fold[0:2] == \" (\":\n current_energy = fold.replace(\"(\", \"\").replace(\" \", \"\").replace(\")\", \"\")\n RNALfold_out.write(\"%s\\t%s\\n\" % (current_permutation, current_energy))\n free_energy_list.append(-1*float(current_energy.replace(\"-\", \"\")))\n \n #Write the mean folding energies for the permutations of each original shuffled sequence\n RNALfold_stats.write(\"%s\\t%s\\t%s\\t%s\\t%s\\n\" % (RNA_permutations[:-6], np.mean(free_energy_list), np.std(free_energy_list), \n np.mean(free_energy_list) + np.std(free_energy_list), \n np.mean(free_energy_list) - np.std(free_energy_list)))\n \nRNALfold_out.close()\n","sub_path":"uShuffle_sequence_generator.py","file_name":"uShuffle_sequence_generator.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"178119554","text":"import random\n\nchances = 5\nguess = 5\n\n\n\n\nwhile chances > 0:\n chances = chances - 1\n print(chances)\n\n yourguess = int(input(\"Type in your guess\"))\n\n randomNo = random.randint(1,9)\n \n if yourguess == randomNo:\n print(\"Congratulations you won!!!\")\n break\n \n else:\n print (\"You lose !!! the number is\", randomNo)\n\n ","sub_path":"GuessingGame/guessingGame.py","file_name":"guessingGame.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"261977652","text":"import torch\nimport torch.nn.functional as F\n\nimport pyro\nimport pyro.distributions as dist\n\nfrom dcgan import Generator\nfrom utils import ensure_batched\n\n\ndef contains_row(tensor, row):\n return torch.any(torch.all(torch.eq(row, tensor), dim=-1))\n\n\nnum_iters = 0\n\n\nclass ObserveGrid():\n def __init__(self, zooms=[2, 4, 8], sizes=[(8, 8), (8, 8), (8, 8)],\n cuda=True):\n \"\"\"\n provides method to get foveal views of images\n\n zooms - how zoomed in each view is:\n e.g. zoom=1: full image\n zoom=2: twice as \"close up\"\n sizes - each view is `size[0] x size[1]` pixels\n \"\"\"\n self.cuda = cuda\n self.zooms = zooms\n self.sizes = sizes\n self.xc = None\n self.yc = None\n\n grids = self._make_centred_grids()\n # flatten grids since we don't need spatial information\n grids = torch.cat([grid.view(-1, 2)\n for grid in grids],\n dim=0)\n # remove duplicates for points that were in overlapping views\n grids = torch.cat([row for i, row in enumerate(grids)\n if not contains_row(grids[i+1:], row)],\n dim=0)\n grid = grids.view(1, 1, -1, 2)\n if self.cuda:\n self.grid = grid.cuda()\n else:\n self.grid = grid\n\n def _make_centred_grids(self):\n identity = torch.Tensor([[[1., 0., 0.],\n [0., 1., 0.]]])\n sizes = [(1, 1, size[0], size[1]) for size in self.sizes]\n grids = [F.affine_grid(identity/zoom, size)\n for zoom, size in zip(self.zooms, sizes)]\n if self.cuda:\n grids = [g.cuda() for g in grids]\n return grids\n\n def peek(self, images):\n \"\"\"\n xc, yc \\in [1, 1] are coordinates of centre of\n observed patch. currently must be same for every\n image.\n \"\"\"\n images, (N, C, H, W) = ensure_batched(images)\n batch_grid = self.moved_grid.repeat(N, 1, 1, 1)\n foveal_view = F.grid_sample(images, batch_grid,\n padding_mode='zeros')\n return foveal_view.view(N, C, -1)\n\n def set_pos(self, xc, yc):\n self.xc = xc\n self.yc = yc\n self.pos = torch.Tensor([xc, yc])\n if self.cuda:\n self.pos = self.pos.cuda()\n self.moved_grid = self.grid + self.pos\n\n def visualise_grid(self, images):\n images, (_, C, H, W) = ensure_batched(images)\n image = images[0].clone()\n grid = self.moved_grid.view(-1, 2)\n for x, y in grid:\n # scale coords to [0, 1]\n x = (x+1)/2\n y = (y+1)/2\n # scale coords to [0, H], [0, W]\n y = int(y*H)\n x = int(x*W)\n if y >= 0 and x >= 0 and y < H and x < W:\n image[:, y, x] = torch.Tensor([1., -1., -1.])\n return image\n\n def visualise_peeks(self, images):\n \"\"\"\n generator of images representing what each\n grid `sees`\n \"\"\"\n images, (_, C, H, W) = ensure_batched(images)\n image = images[0:1]\n grids = [grid + self.pos for grid in self._make_centred_grids()]\n for grid in grids:\n yield F.grid_sample(image, grid,\n padding_mode='zeros').squeeze(dim=0)\n\n def copy(self):\n other = ObserveGrid(self.zooms, self.sizes, self.cuda)\n if self.xc is not None:\n other.set_pos(self.xc, self.yc)\n return other\n\n\nclass FaceModel():\n def __init__(self, cuda=True):\n self.cuda = cuda\n self.latent_dim = 100\n self.latent_mean = torch.zeros(1, self.latent_dim)\n self.latent_std = torch.ones(1, self.latent_dim)\n self.obs_std = torch.tensor(1.)\n if cuda:\n self.latent_mean = self.latent_mean.cuda()\n self.latent_std = self.latent_std.cuda()\n self.obs_std = self.obs_std.cuda()\n self.generator = Generator().cuda()\n else:\n self.generator = Generator()\n self.generator.eval()\n\n self.generator.load_state_dict(\n torch.load('checkpoints/trained_wgan/wgan-gen.pt',\n map_location=(None if cuda else 'cpu'))\n )\n\n def __call__(self, observe_grids, observations):\n global num_iters\n num_iters += 1\n latents = pyro.sample(\n \"latents\",\n dist.Normal(self.latent_mean,\n self.latent_std)\n )\n image = self.generator(latents).squeeze(0)\n\n if observe_grids is not None:\n for i, (observe_grid, observation) in enumerate(\n zip(observe_grids,\n observations)):\n sim_foveal = observe_grid.peek(image)\n pyro.sample(\n f\"observed_patch_{i}\",\n dist.Normal(sim_foveal.contiguous().view(-1),\n self.obs_std),\n obs=observation.view(-1)\n )\n return image\n\n def latent_var(self, trace):\n \"\"\"\n Some arbitrary binary latent variable. Stand-in since\n we don't currently have disentangled representations.\n \"\"\"\n return trace.nodes['latents']['value'][0, 0] > 0\n\n\nif __name__ == '__main__':\n from utils import to_pil\n\n # make an array of samples from the model\n model = FaceModel(False)\n grid = [[model(None, None)\n for _ in range(10)]\n for _ in range(10)]\n grid = torch.cat([torch.cat(row, dim=2) for row in grid],\n dim=1)\n to_pil(grid).show()\n\n # vary a single dimension\n dim = 0\n model = FaceModel(False)\n latents = torch.distributions.Normal(\n torch.zeros(20, 100),\n torch.ones(20, 100)\n ).sample()\n hi_latents = latents.clone()\n hi_latents[:, dim] = 1\n lo_latents = latents.clone()\n lo_latents[:, dim] = -1\n hi_images = model.generator(hi_latents)\n lo_images = model.generator(lo_latents)\n hi_row = torch.cat([im for im in hi_images], dim=2)\n lo_row = torch.cat([im for im in lo_images], dim=2)\n grid = torch.cat([hi_row, lo_row], dim=1)\n to_pil(grid).show()\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"14508699","text":"from django.conf.urls import url\n\nfrom . import views\n\napp_name='myShoppingHistory'\n\nurlpatterns = [\n #url(r'^$', views.defaultBaseView, name='index' ),\n url(r'^entry/', views.myShoppingDbEntry, name='myShoppingEntry' ),\n url(r'^query/', views.myShoppingDbQuery, name='myShoppingQuery' ),\n]","sub_path":"myShoppingHistory/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"324893317","text":"import pandas as pd\r\nimport sys\r\n\r\nfrom io import StringIO\r\nfrom nltk.tokenize import RegexpTokenizer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn import metrics\r\n\r\n\r\ndef get_text_count(data):\r\n\t\"\"\"\r\n\tGenerate document term matrix by using scikit-learn's CountVectorizer.\r\n\tUsing default n-gram size of (1,1)\r\n\tStop words are removed, and sentences are converted to lower case .\r\n\t\"\"\"\r\n\t#tokenizer to remove unwanted elements from the data like symbols and numbers\r\n\ttoken = RegexpTokenizer(r'[a-zA-Z0-9]+')\r\n\tcv = CountVectorizer(lowercase=True,stop_words='english',ngram_range = (1,1),tokenizer = token.tokenize)\r\n\ttext_counts= cv.fit_transform(data['sentence'])\r\n\r\n\treturn text_counts\r\n\r\ndef train_model(X_train,y_train):\r\n\t\"\"\"\r\n\tcreate a Multinomial Naive Bayes classifier object using MultinomialNB() function.\r\n\t\"\"\"\r\n\tmodel = MultinomialNB().fit(X_train, y_train)\r\n\treturn model\r\n\r\n\r\ndef main():\r\n\t#Read in the data using pandas and label the columns\r\n\tinput_file = sys.argv[1]\r\n\tdata = pd.read_csv(input_file,names=['sentence','label'])\r\n\t#print (data.head())\r\n\t#print (data.info())\r\n\t\r\n\t# Use Bag of words method to get text count. \r\n\ttext_counts = get_text_count(data)\r\n\t#print (text_counts)\r\n\r\n\t#split data into train & test\r\n\tX_train, X_test, y_train, y_test = train_test_split(text_counts, data['label'], test_size=0.8, random_state=1)\r\n\r\n\t#Train the model using fit method\r\n\tmodel = train_model(X_train,y_train)\r\n\tprint (\"Training complete....\")\r\n\r\n\t#predict & verify the accuracy with the test data\r\n\tprint(\"Validating with test data...\")\r\n\tpredicted = model.predict(X_test)\r\n\tprint(\"Multinomial Naive Bayes Accuracy:\",metrics.accuracy_score(y_test, predicted))\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"classifier_naive_bayes.py","file_name":"classifier_naive_bayes.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"367670982","text":"import os\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport networkx as nx\nfrom sklearn import metrics\n\n\ndef load_graph():\n g = nx.read_gexf(\"data/BlogCatalog.gexf\")\n return g\n\n\ndef loss_and_metric(logits, labels):\n labels = tf.cast(labels, tf.float32)\n logits = tf.cast(logits, tf.float32)\n\n predictions = tf.argmax(logits, -1)\n actuals = tf.argmax(labels, -1)\n\n cross_entropy_loss = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits_v2(\n labels=labels, logits=logits))\n\n hits = tf.equal(tf.argmax(logits, -1), tf.argmax(labels, -1))\n accuracy = tf.reduce_mean(tf.cast(hits, tf.float32))\n\n return cross_entropy_loss, predictions, actuals\n\n\ndef label_to_list(label):\n c = []\n a = label[1:len(label) - 1]\n b = a.split(' ')\n for idx in b:\n c.append(int(idx.split(',')[0]))\n return c\n\n\ndef one_hot_embed(label):\n \"\"\"\n :param node: node_id\n :param graph: G\n :return: embedding [0 0 0 ... 1 0 0 ... 0]\n \"\"\"\n emb_line = np.zeros(39)\n emb_line[label[0] - 1] = 1\n return emb_line\n\n\ndef load_model():\n with tf.Session() as sess:\n saver = tf.train.import_meta_graph(\"embedding/7000.ckpt-7000.meta\")\n saver.restore(sess, tf.train.latest_checkpoint(\"embedding\"))\n graph = tf.get_default_graph()\n embeddings = graph.get_tensor_by_name(\"embeddings:0\").eval()\n return embeddings\n\n\ndef get_batch(graph, batch_size, type):\n embeddings_total = load_model()\n if type == 'train':\n embeddings = embeddings_total[:int(len(embeddings_total) * 0.6)]\n num_batch = len(embeddings) // batch_size\n for num in range(num_batch):\n x, y = [], []\n for idx in range(batch_size):\n label = label_to_list(graph.node[str(batch_size * num + idx + 1)]['label'])\n if len(label) == 1:\n x.append(embeddings_total[batch_size * num + idx + 1])\n y_emb = one_hot_embed(label)\n y.append(y_emb)\n yield x, y\n elif type == 'test':\n embeddings = embeddings_total[int(len(embeddings_total) * 0.6):]\n x, y = [], []\n for idx in range(len(embeddings)):\n num = int(len(embeddings_total) * 0.6) + idx\n label = label_to_list(graph.node[str(num)]['label'])\n if len(label) == 1:\n x.append(embeddings_total[num])\n y_emb = one_hot_embed(label)\n y.append(y_emb)\n yield x, y\n else:\n raise ValueError(\"type must be test or train\")\n\n\ndef classification_model(embedding_size):\n graph = load_graph()\n node_classes = 39\n classification_graph = tf.Graph()\n with classification_graph.as_default():\n embeddings_c = tf.placeholder(tf.float32, [None, embedding_size], name='embeddings_c')\n labels_c = tf.placeholder(tf.int32, [None, node_classes], name='label_c')\n weight_c = tf.Variable(tf.truncated_normal([embedding_size, node_classes], stddev=0.1), dtype=tf.float32)\n bias_c = tf.Variable(tf.zeros(node_classes), dtype=tf.float32)\n logits_c = tf.nn.softmax(tf.add(tf.matmul(embeddings_c, weight_c), bias_c))\n # loss_c = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels_c, logits=logits_c))\n loss_c, predict_c, true_c = loss_and_metric(logits=logits_c, labels=labels_c)\n optimizer_c = tf.train.AdamOptimizer(learning_rate=0.005).minimize(loss_c)\n\n with tf.Session(graph=classification_graph) as sess:\n epochs = 100\n iteration = 1\n total_loss = 0\n sess.run(tf.global_variables_initializer())\n # train\n for e in range(1, epochs + 1):\n batches = get_batch(graph=graph, batch_size=128, type='train')\n start = time.time()\n for x, y in batches:\n feed = {embeddings_c: x,\n labels_c: y}\n train_loss, _ = sess.run([loss_c, optimizer_c], feed_dict=feed)\n total_loss += train_loss\n if iteration % 10 == 0:\n end = time.time()\n print(\"Epoch {}/{}\".format(e, epochs),\n \"Iteration: {}\".format(iteration),\n \"Avg. Training loss: {:.4f}\".format(total_loss / 10),\n \"{:.4f} sec/batch\".format((end - start) / 10))\n total_loss = 0\n start = time.time()\n iteration += 1\n\n # test\n batches = get_batch(graph=graph, batch_size=128, type='test')\n for x, y in batches:\n print(np.shape(x))\n print(np.shape(y))\n feed = {embeddings_c: x,\n labels_c: y}\n y_predict, y_true = sess.run([predict_c, true_c], feed_dict=feed)\n print(\"predict:\", y_predict)\n print(\"true:\", y_true)\n print('macro-f1-score:', metrics.f1_score(y_true, y_predict,\n labels=list(range(1, node_classes + 1)), average='macro'))\n print('micro-f1-score:', metrics.f1_score(y_true, y_predict,\n labels=list(range(1, node_classes + 1)), average='micro'))\n\n\nif __name__ == '__main__':\n embeddings = load_model()\n print(np.shape(embeddings)[0])\n print(np.shape(embeddings)[1])\n embedding_size = np.shape(embeddings)[1]\n classification_model(embedding_size)\n\n","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":5494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"469464700","text":"import matplotlib.colors as co\nimport numpy as np\n\n\ndef name2color(name):\n \"\"\"Return the 3-element RGB array of a given color name.\"\"\"\n return co.hex2color(co.cnames[name])\n\ndef one2another(bottom='white', top='red', alphatop=1.0, alphabottom=1.0, N=256):\n rgb_bottom, rgb_top = name2color(bottom), name2color(top)\n r = np.linspace(rgb_bottom[0],rgb_top[0],N)\n g = np.linspace(rgb_bottom[1],rgb_top[1],N)\n b = np.linspace(rgb_bottom[2],rgb_top[2],N)\n a = np.linspace(alphabottom, alphatop,N)\n colors = np.transpose(np.vstack([r,g,b,a]))\n cmap = co.ListedColormap(colors, name='{bottom}2{top}'.format(**locals()))\n return cmap\n\n#anchors = ['red', 'orange','yellow','green','blue','purple', 'red']\n#cycle = []\n#for i in range(len(anchors)-1):\n# cycle.append(new(anchors[i], anchors[i+1]))\n","sub_path":"zachopy/cmaps.py","file_name":"cmaps.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"626298702","text":"import numpy as np\r\nfrom hypothesis import strategies, given, example, settings\r\nimport wfsim\r\n\r\n\r\n@settings(max_examples=100, deadline=None)\r\n@given(strategies.integers(min_value=0, max_value=1_000),\r\n strategies.integers(min_value=0, max_value=4),\r\n strategies.integers(min_value=0, max_value=1_000))\r\n@example(data_length=101, n_channels=4, noise_data_length=1000)\r\ndef test_noise(data_length, n_channels, noise_data_length):\r\n \"\"\"Testing wfsim.RawData.add_noise\"\"\"\r\n if data_length <= 0 or noise_data_length <= 0:\r\n # Double check the input, cannot make np.arrays with negative\r\n # dimensions\r\n return\r\n if n_channels > 4 or n_channels < 0:\r\n # Double check input\r\n return\r\n\r\n # Data are random integers. NB: we are at sim rr so pulse is negative\r\n max_pulse_size = 100 # ADC counts\r\n data = np.random.randint(-max_pulse_size, 0, size=(n_channels, data_length))\r\n channel_mask = np.array([(False, 9223372036399775857, -454999850),\r\n (False, 9223372036399775857, -454999850),\r\n (False, 9223372036399775857, -454999850),\r\n (True, n_channels - 1, noise_data_length - n_channels),\r\n ],\r\n dtype=[('mask', '?'), ('left', '<i8'), ('right', '<i8')])\r\n # Take a copy of the channel mask\r\n channel_mask = channel_mask[:n_channels]\r\n # Noise is a string with random floats\r\n noise_data = np.random.randint(-10, 10, size=noise_data_length).astype(np.float)\r\n\r\n RawData = wfsim.RawData\r\n noise_function = RawData.add_noise\r\n \r\n # Actually test that we can run the function\r\n noise_function(data, channel_mask, noise_data, noise_data_length)","sub_path":"tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"104619171","text":"#\n# Copyright (C) 2015 Red Hat Inc.\n#\n# Author: Frederic Lepied <frederic.lepied@redhat.com>\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport unittest\n\nimport score\n\n\ndef sum_first(li):\n return sum(item for item, _ in li)\n\n\nclass TestScore(unittest.TestCase):\n\n def test_score_review_none(self):\n self.assertEqual(\n sum_first(\n score.score_review('{\"type\": \"stats\", \"status\": \"NONE\"}',\n 'patch', ())), 200)\n\n def test_score_review_exception(self):\n self.assertEqual(\n sum_first(\n score.score_review('{\"type\": \"stats\", \"status\": \"NONE\"}',\n 'patch', ('patch'))), 50)\n\n def test_score_review_new(self):\n self.assertEqual(\n sum_first(\n score.score_review(\n '{\"status\":\"NEW\",\"id\":\"id\",'\n '\"patchSets\":[{\"approvals\":[{\"type\": \"verified\"}]}]}',\n 'patch', ())), 20)\n\n def test_score_review_merged(self):\n self.assertEqual(\n sum_first(\n score.score_review(\n '{\"status\":\"MERGED\",\"id\":\"id\"}',\n 'patch', ())), 10)\n\n def test_score_review_abandoned(self):\n self.assertEqual(\n sum_first(\n score.score_review(\n '{\"status\":\"ABANDONED\",\"id\":\"id\",'\n '\"patchSets\":[{\"approvals\":[{\"type\": \"verified\"}]}]}',\n 'patch', ())), 150)\n\n def test_score_review_cherry(self):\n self.assertEqual(\n sum_first(\n score.score_review(\n '{\"status\":\"CHERRY\",\"id\":\"id\"}',\n 'patch', ())), 10)\n\n def test_score_review_unknown(self):\n self.assertEqual(\n sum_first(\n score.score_review(\n '{\"status\":\"UNKNOWN\",\"id\":\"id\"}',\n 'patch', ())), 200)\n\n def test_score_review_no_jenkins(self):\n self.assertEqual(\n sum_first(\n score.score_review(\n '{\"status\":\"NEW\",\"id\":\"id\",'\n '\"patchSets\":[{\"approvals\":'\n '[{\"type\": \"Verified\",\"by\": {\"username\":\"jenkins\"},'\n '\"value\": \"-1\"}]}]}',\n 'patch', ())), 70)\n\n def test_score_review_negative1_vote(self):\n self.assertEqual(\n sum_first(\n score.score_review(\n '{\"status\":\"NEW\",\"id\":\"id\",'\n '\"patchSets\":[{\"approvals\":'\n '[{\"type\": \"Code-Review\",'\n '\"value\": \"-1\"}]}]}',\n 'patch', ())), 70)\n\n def test_score_review_negative2_vote(self):\n self.assertEqual(\n sum_first(\n score.score_review(\n '{\"status\":\"NEW\",\"id\":\"id\",'\n '\"patchSets\":[{\"approvals\":'\n '[{\"type\": \"Code-Review\",'\n '\"value\": \"-2\"}]}]}',\n 'patch', ())), 120)\n\n def test_score_review_positive_vote(self):\n self.assertEqual(\n sum_first(\n score.score_review(\n '{\"status\":\"NEW\",\"id\":\"id\",'\n '\"patchSets\":[{\"approvals\":'\n '[{\"type\": \"Code-Review\",'\n '\"value\": \"+1\"}]}]}',\n 'patch', ())), 15)\n\n def test_score_review_positive2_vote(self):\n self.assertEqual(\n sum_first(\n score.score_review(\n '{\"status\":\"NEW\",\"id\":\"id\",'\n '\"patchSets\":[{\"approvals\":'\n '[{\"type\": \"Code-Review\",'\n '\"value\": \"+2\"}]}]}',\n 'patch', ())), 10)\n\n def test_score_interdiff(self):\n self.assertEqual(\n score.score_interdiff(['', '0,0,0'], 'patch')[0],\n 0)\n\n def test_score_interdiff_small(self):\n self.assertEqual(\n score.score_interdiff(['', '1,0,0'], 'patch')[0],\n 10)\n\n def test_score_interdiff_big(self):\n self.assertEqual(\n score.score_interdiff(['', '26,0,0'], 'patch')[0],\n 100)\n\nif __name__ == \"__main__\":\n unittest.main()\n\n# test_score.py ends here\n","sub_path":"test_score.py","file_name":"test_score.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"498690901","text":"import time\nfrom os.path import join\n\nimport tensorflow as tf\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\n\nimport utils\nimport libs.cylib as cylib\nimport model\nfrom data import Dataset\n\n# hyperparameters\n# num_epochs = 50\n# num_epochs = 30\nnum_epochs = 30\nbatch_size = 10\n# batch_size = 20\n# batch_size = 50\nnum_classes = Dataset.num_classes\nsave_dir = 'local/output/'\n# learning_rate = 1e-4\n# learning_rate = 1e-4\n# learning_rate = 1e-2\n# learning_rate = 1e-3\nlearning_rate = 5e-4\ndecay_power = 1.0\n# decay_power = 1.4\n\n#sgd\n# learning_rate = 1e-3\n# decay_power = 0.9\n\n# learning_rate = 1e-4\n\n\n\ndef add_regularization(loss):\n regularization_losses = tf.losses.get_regularization_losses()\n print(regularization_losses)\n #total_loss = tf.add_n(losses + regularization_losses, name='total_loss')\n return tf.add_n([loss] + regularization_losses, name='total_loss')\n\n\ndef build_loss(logits, y):\n with tf.name_scope('loss'):\n y = tf.reshape(y, shape=[-1])\n logits = tf.reshape(logits, [-1, num_classes])\n\n mask = y < num_classes\n\n # idx = tf.where(mask)\n # y = tf.to_float(y)\n # y = tf.gather_nd(y, idx)\n # y = tf.to_int32(y)\n # logits = tf.gather_nd(logits, idx)\n \n # slower\n y = tf.boolean_mask(y, mask)\n logits = tf.boolean_mask(logits, mask)\n\n y_one_hot = tf.one_hot(y, num_classes)\n xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_one_hot)\n\n # class_weights = [1] * (num_classes+1)\n # class_weights[-1] = 0\n # class_weights = tf.constant(class_weights, dtype=tf.float32)\n # pixel_weights = tf.gather(class_weights, y)\n # xent = tf.multiply(pixel_weights, xent)\n\n loss = tf.reduce_mean(xent)\n tf.summary.scalar('cross_entropy_loss', loss)\n # loss = add_regularization(loss)\n return loss\n\n\ndef validate(data, x, y, y_pred, loss):\n print('\\nValidation phase:')\n conf_mat = np.zeros((num_classes, num_classes), dtype=np.uint64) \n for i, (x_np, y_np, names) in enumerate(data):\n start_time = time.time()\n loss_np, y_pred_np = sess.run([loss, y_pred],\n feed_dict={x: x_np, y: y_np, is_training: False})\n\n duration = time.time() - start_time\n batch_conf_mat = confusion_matrix(y_np.reshape(-1), y_pred_np.reshape(-1))\n batch_conf_mat = batch_conf_mat[:-1,:-1].astype(np.uint64)\n conf_mat += batch_conf_mat\n\t\t# save_path = join(save_dir, '%03d'%i + '.png')\n # utils.draw_output(y_pred_np[0]fix, Dataset.class_info, save_path=save_path)\n\n # utils.draw_labels(y_pred_np, names, Dataset.class_info,\n # 'local/output/predictions')\n utils.draw_labels(y_np, names, Dataset.class_info,\n 'local/output/labels')\n\n # net_labels = logits.argmax(3).astype(np.int32)\n #gt_labels = gt_labels.astype(np.int32, copy=False)\n # cylib.collect_confusion_matrix(y_pred_np.reshape(-1),\n # y_np.reshape(-1), conf_mat)\n # conf_mat_all += conf_mat_np.astype(np.uint64)\n if i % 10 == 0:\n string = 'batch %03d loss = %.2f (%.1f images/sec)' % \\\n (i, loss_np, x_np.shape[0] / duration)\n print(string)\n print(conf_mat)\n return utils.print_stats(conf_mat, 'Validation', Dataset.class_info)\n\n\n\n\n# BEGINING\n\ntf.set_random_seed(31415)\n\ntrain_data = Dataset('train', batch_size)\nval_data = Dataset('val', batch_size, shuffle=False)\n\nheight = train_data.height\nwidth = train_data.width\nchannels = train_data.channels\n\n# x = tf.placeholder(tf.float32, shape=(batch_size, height, width, channels))\n# y = tf.placeholder(tf.int32, shape=(batch_size, height, width))\n\n# create placeholders for inputs\nwith tf.name_scope('data'):\n x = tf.placeholder(tf.float32, shape=(None, height, width, channels), name='rgb_images')\n y = tf.placeholder(tf.int32, shape=(None, height, width), name='labels')\n\nlogits, is_training = model.build_model(x, num_classes)\nloss = build_loss(logits, y)\n\n# build ops for confusion matrix\n# y_labeled_pred = tf.argmax(logits_labeled, axis=1, output_type=tf.int32)\ny_pred = tf.argmax(logits, axis=3, output_type=tf.int32)\n# conf_mat = tf.confusion_matrix(y_labeled, y_labeled_pred, num_classes)\n\nglobal_step = tf.Variable(0, trainable=False)\ndecay_steps = num_epochs * train_data.num_batches\n\nlr = tf.train.polynomial_decay(learning_rate, global_step, decay_steps,\n end_learning_rate=0, power=decay_power)\n# opt = tf.train.AdamOptimizer(lr)\n# grads = opt.compute_gradients(loss)\n# train_op = opt.apply_gradients(grads, global_step=global_step)\n# train_step = opt.apply_gradients(grads)\n\nupdate_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\nwith tf.control_dependencies(update_ops):\n train_step = tf.train.AdamOptimizer(lr).minimize(loss, global_step=global_step)\n # train_step = tf.train.MomentumOptimizer(lr, momentum=0.9).minimize(loss, global_step=global_step)\n# loss = tf.Print(loss, [lr, global_step])\n\nsess = tf.Session()\n\nlog_dir = 'local/logs'\ncheckpoint_dir = 'local/checkpoints'\nutils.clear_dir(log_dir)\nutils.clear_dir(checkpoint_dir)\n\nsaver = tf.train.Saver()\n\nsummary_all = tf.summary.merge_all()\ntrain_writer = tf.summary.FileWriter(join(log_dir, 'train'), sess.graph)\n# test_writer = tf.summary.FileWriter()\n\ntf.global_variables_initializer().run(session=sess)\n\nstep = 0\nbest_iou = 0\nbest_epoch = 0\nexp_start_time = time.time()\nfor epoch in range(1, num_epochs+1):\n # confusion_mat = np.zeros((num_classes, num_classes), dtype=np.uint64)\n print('\\nTraining phase:')\n for x_np, y_np, names in train_data:\n # if step > 1:\n # break\n start_time = time.time()\n # batch_loss, batch_conf_mat, _ = sess.run([loss, conf_mat, train_step],\n # loss_np, _ = sess.run([loss, train_step],\n # feed_dict={x: x_np, y: y_np, is_training: True})\n loss_np, summary, _ = sess.run([loss, summary_all, train_step],\n feed_dict={x: x_np, y: y_np, is_training: True})\n train_writer.add_summary(summary, step)\n duration = time.time() - start_time\n # confusion_mat += batch_conf_mat.astype(np.uint64)\n if step % 10 == 0:\n string = '%s: epoch %d / %d, iter %05d, loss = %.2f (%.1f images/sec)' % \\\n (utils.get_expired_time(exp_start_time), epoch, num_epochs, step,\n loss_np, batch_size / duration)\n print(string)\n step += 1\n # utils.print_metrics(confusion_mat, 'Train') \n iou = validate(val_data, x, y, y_pred, loss)\n if iou > best_iou:\n best_iou, best_epoch = iou, epoch\n save_path = saver.save(sess, join(checkpoint_dir, 'model.ckpt'))\n print('Model saved in file: ', save_path)\n print('\\nBest IoU = %.2f (epoch %d)' % (best_iou, best_epoch))\n\n\n # dodat restore","sub_path":"semseg/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"201082215","text":"#! /usr/bin/python\nimport json\nfrom tf import *\n\n\n\nif __name__ == '__main__':\n xaxis = (1,0,0)\n yaxis = (0,1,0)\n zaxis = (0,0,1)\n params = {}\n with open('../dh.json', 'r') as file:\n params=json.loads(file.read())\n\n with open('../urdf.yaml', 'w') as file:\n for param in params:\n inst = json.loads(json.dumps(param))\n a = inst[\"a\"]\n d = inst[\"d\"]\n al = inst[\"alpha\"]\n th = inst[\"theta\"]\n\n trans_z = translation_matrix((0,0,d))\n rot_z = rotation_matrix(th, zaxis)\n trans_x = translation_matrix((a,0,0))\n rot_x = rotation_matrix(al, xaxis)\n\n mat = concatenate_matrices(trans_x, rot_x, rot_z, trans_z)\n\n (roll, pitch, yaw) = euler_from_matrix(mat)\n (x,y,z) = translation_from_matrix(mat)\n\n file.write(inst[\"name\"] + \":\\n\")\n file.write(\" j_xyz: \" + str(x) + \" \" + str(y) + \" \"+ str(z) + \"\\n\")\n file.write(\" j_rpy: \" + str(roll) + ' ' + str(pitch) + ' ' + str(yaw) + \"\\n\")\n file.write(\" l_xyz: \" + str(0) + ' ' + str(0) + ' ' + str(float(d)*(-0.5)) + \"\\n\")\n file.write(\" l_rpy: \" + str(0) + ' ' + str(0) + ' ' + str(0) + \"\\n\")\n file.write(\" l_len: \" + str(d) + \"\\n\")","sub_path":"lab3/src/urdfconv.py","file_name":"urdfconv.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"571240587","text":"#! /usr/bin/python3\r\n\r\n#import sys\r\n#if len(sys.argv)==2: \r\n# if sys.argv[1] == 'create_tables': \r\nimport models\r\nfrom forms import home \r\n\r\nmodels.create_tables_if_not_exist()\r\nroot=home.Tk()\r\nroot['bg']='black'\r\n#root.resizable(0,0)\r\nfrmmenu=home.FormMenu(root)\r\nroot.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"529565181","text":"import os\nimport mmcv\nimport numpy as np\nfrom mmdet.core import get_classes\nimport pycocotools.mask as maskUtils\n\n\ndef show_result(img, result, dataset='voc', score_thr=0.3, out_file=None): # dataset:[coco, voc, ...]\n img = mmcv.imread(img)\n class_names = get_classes(dataset)\n if isinstance(result, tuple):\n bbox_result, segm_result = result\n else:\n bbox_result, segm_result = result, None\n bboxes = np.vstack(bbox_result)\n # draw segmentation masks\n if segm_result is not None: # show the segmentation.\n segms = mmcv.concat_list(segm_result)\n inds = np.where(bboxes[:, -1] > score_thr)[0] # select boxes whose score higher than thr.\n for i in inds:\n color_mask = np.random.randint(\n 0, 256, (1, 3), dtype=np.uint8) # RGB-(1,3)\n mask = maskUtils.decode(segms[i]).astype(np.bool) # the mask predicted.\n img[mask] = img[mask] * 0.5 + color_mask * 0.5 # color fusion to make it high-lighted!\n # draw bounding boxes\n labels = [\n np.full(bbox.shape[0], i, dtype=np.int32)\n for i, bbox in enumerate(bbox_result)\n ]\n labels = np.concatenate(labels)\n mmcv.imshow_det_bboxes(\n img.copy(),\n bboxes,\n labels,\n class_names=class_names,\n score_thr=score_thr,\n show=False,\n out_file=out_file)\n\n\ndef parse_pkl(preds):\n \"\"\"\n for c in range(len(preds)):\n cls_pred = preds[c]\n if np.shape(cls_pred)[0] > 0:\n for i in np.shape(cls_pred)[0]:\n bbox = []\n # bbox cords:\n for j in range(4):\n bbox.append(cls_pred[i, j])\n # bbox scores:\n bbox.append(cls_pred[i, -1])\n # bbox catogary:\n bbox.append(c+1) \"\"\"\n return preds\n\n\ndef main():\n\n input_pkl = '/home/cao/workspace/PASCAL_VOC/Utils/pkls_to_merge'\n img_dir = '/home/cao/workspace/PASCAL_VOC/Dataset/VOCSubmitTest/VOCdevkit/VOC2012/JPEGImages'\n\n out_dir = '/home/cao/workspace/PASCAL_VOC/VOC_Tasks/Cascade_x101_yzz/work_dirs/mtest_results_epoch3/show800'\n show_interval = 100\n\n if not os.path.exists(out_dir): os.mkdir(out_dir)\n pkl = mmcv.load(input_pkl)\n img_names_list = []\n for line in open(img_list):\n img_names_list.append(line[:-1] + '.jpg')\n print('num_imgs: ', len(img_names_list))\n print('num_dets: ', len(pkl))\n\n for i in range(len(pkl)):\n if i % show_interval == 0:\n result = parse_pkl(pkl[i])\n show_result(img_dir + '/' + img_names_list[i], \n result, dataset='voc',\n score_thr=0.3, out_file=out_dir + '/' + img_names_list[i])\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n\n\n\n\n","sub_path":"show_imgs_with_pkldets.py","file_name":"show_imgs_with_pkldets.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"382245092","text":"from django.contrib.auth.models import Group, Permission, ContentType\nfrom django.contrib.auth.decorators import permission_required\n\n\n\n\n\npermission = Permission()\npermission.codename = 'jedi_level'\npermission.content_type = ContentType.objects.get(id=7)\npermission.name = 'Has Jedi Privileges'\npermission.save()\n\ngroup = Group()\ngroup.name = \"Jedis\"\ngroup.save()\ngroup.permissions.add(permission)\ngroup.user_set.add(UserObjectHere)","sub_path":"static/homepage/permissions/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"194233342","text":"import os\nimport sys\nimport numpy as np\nimport pickle\nfrom distribution_generator import tree_data_generator\n\n# #### Get parameters & set up parameters and directories #### #\nargs = sys.argv[1:-1]\nensemble_size, param_dim, num_jobs, resolution, tree_species, hpc_mode, date = args\nensemble_size = str(ensemble_size)\n\n# Make output directory /Users/py13jh/PycharmProjects/uk_epi_phase/\noutput_path = os.getcwd() + '/output_data/normalised_metric/'\nsim_path = date + '_#reps_' + ensemble_size + '_#params_' + param_dim + '/'\n\nif not os.path.exists(output_path):\n # make path for ensemble sim\n os.mkdir(output_path)\n\n# make sub paths for individual results\nif not os.path.exists(output_path + sim_path):\n os.mkdir(output_path + sim_path)\n # create sub directories\n os.mkdir(output_path + sim_path + '/mortality/')\n os.mkdir(output_path + sim_path + '/vel_var/')\n os.mkdir(output_path + sim_path + '/vel_av/')\n\n# Generate tree data, if it already exits, pass\ninput_tree_data_path = os.getcwd() + '/input_data/Qro_cg_' + resolution\nif os.path.exists(input_tree_data_path):\n pass\n\nelif not os.path.exists(input_tree_data_path):\n os.mkdir(input_tree_data_path)\n # tree_data_generator: generate data set with resolution and save in a directory\n # input_data/Qro_cg_i\n tree_data_generator.main_raster(tree_species, resolution, input_tree_data_path, bool(int(hpc_mode)))\n\n\n\n\n\n\n","sub_path":"mkdir.py","file_name":"mkdir.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"144773425","text":"#imports\nfrom openmsipython.command_line_scripts.upload_data_files_added_to_directory import main\nfrom openmsipython.utilities.config_file_parser import ConfigFileParser\nfrom openmsipython.utilities.argument_parsing import existing_dir, config_path, int_power_of_two\nfrom openmsipython.data_file_io.config import RUN_OPT_CONST\nfrom argparse import Namespace\nimport sys, pathlib\n\n#helper function to get the configs from the file and make sure they are all valid, and then return them in a namespace\n#basically cloning the command line argument parser from the command line script, but using the arguments from the config file\ndef get_args(config_file_path) :\n #parse the config file\n cfp = ConfigFileParser(pathlib.Path(config_file_path))\n configs = cfp.get_config_dict_for_groups('openmsi_directory_stream_service')\n #if the cluster and producer configs are also specified in the given file, add it as the \"config\" argument to the command line script\n if 'cluster' in cfp.available_group_names and 'producer' in cfp.available_group_names :\n #but in this case the 'cluster_producer_config' argument would be ambiguous\n if 'cluster_producer_config' in configs.keys() :\n msg = 'ERROR: ambiguous cluster/producer configuration. Specify cluster/producer configs EITHER '\n msg+=f'in the {config_file_path} file or in the file at \"cluster_producer_config\", but not both.'\n raise ValueError(msg)\n configs['config']=config_file_path\n #otherwise, check if a different config file to use for the cluster/producer configs was given\n if 'cluster_producer_config' in configs.keys() :\n configs['config'] = config_path(configs['cluster_producer_config']) \n #check the other arguments to make sure they're the correct type and replace any that weren't given with the defaults\n arg_checks = {'file_directory':{'type':existing_dir,'default':None},\n 'config':{'type':config_path,'default':config_path(RUN_OPT_CONST.DEFAULT_CONFIG_FILE)},\n 'topic_name':{'type':str,'default':RUN_OPT_CONST.DEFAULT_TOPIC_NAME}, \n 'n_threads':{'type':int,'default':RUN_OPT_CONST.N_DEFAULT_UPLOAD_THREADS}, \n 'chunk_size':{'type':int_power_of_two,'default':RUN_OPT_CONST.DEFAULT_CHUNK_SIZE},\n 'queue_max_size':{'type':int,'default':RUN_OPT_CONST.DEFAULT_MAX_UPLOAD_QUEUE_SIZE},\n 'new_files_only':{'type':bool,'default':True},\n }\n for argname,argdict in arg_checks.items() :\n #if the argument was given\n if argname in configs.keys() :\n #make sure it's of the right type\n try :\n configs[argname] = argdict['type'](configs[argname])\n except Exception as e :\n raise (e)\n else :\n #if there's no default, throw an error\n if argdict['default'] is None :\n raise RuntimeError(f'ERROR: missing argument {argname} in!')\n configs[argname] = argdict['default']\n #return a Namespace with all of the necessary arguments\n args = Namespace(file_directory=configs['file_directory'],\n config=configs['config'],\n topic_name=configs['topic_name'],\n n_threads=configs['n_threads'],\n chunk_size=configs['chunk_size'],\n queue_max_size=configs['queue_max_size'],\n update_seconds=-1,\n new_files_only=configs['new_files_only'],\n )\n return args\n\n#call the main method of the command line script\nif __name__=='__main__' :\n if len(sys.argv)!=2 :\n raise RuntimeError('ERROR: must provide exactly one argument (the path to the config file)!')\n main(args=get_args(sys.argv[1]))\n","sub_path":"openmsipython/services/openmsi_directory_stream_service.py","file_name":"openmsi_directory_stream_service.py","file_ext":"py","file_size_in_byte":3832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"569349722","text":"import sys\nfrom genologics.lims import *\nfrom genologics import config\n\ndef main(process_id, output_file):\n lims = Lims(config.BASEURI, config.USERNAME, config.PASSWORD)\n process = Process(lims, id=process_id)\n\n container_output = {}\n for i, o in process.input_output_maps:\n if o['output-generation-type'] == \"PerInput\":\n container_output[o['uri'].location] = o['uri']\n\n containers = set(loc[0] for loc in container_output)\n\n if len(containers) > 1:\n print(\"Error: currently only one plate is supported, {0} were provided\".format(len(containers)))\n sys.exit(1)\n\n container = next(iter(containers))\n\n # Clear the file, to make sure old data don't remain\n with open(output_file, 'wb') as f:\n assert container.type.name == '96 well plate'\n for row in 'ABCDEFGH':\n row_has_output = False\n for col in range(1, 13):\n output = container_output.get((container, \"{0}:{1}\".format(row, col)))\n if output:\n row_has_output = True\n f.write(output.name + \"\\r\\n\")\n elif col == 12 and row_has_output:\n f.write(\"LADDER\\r\\n\")\n else:\n f.write(\"\\r\\n\")\n\n\nmain(sys.argv[1], sys.argv[2])\n\n","sub_path":"fragment-analyzer/fa-input.py","file_name":"fa-input.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"609552262","text":"from nose.tools import *\nfrom unittest import TestCase\nfrom unittest import mock\n\nimport logging\nimport shlex\nfrom itertools import chain\n\nfrom .utils import *\n\nimport scuba.utils\n\n\nclass TestUtils(TestCase):\n\n def _parse_cmdline(self, cmdline):\n # Strip the formatting and whitespace\n lines = [l.rstrip('\\\\').strip() for l in cmdline.splitlines()]\n\n # Split each line, and return a flattened list of arguments\n return chain.from_iterable(map(shlex.split, lines))\n\n def _test_format_cmdline(self, args):\n\n # Call the unit-under-test to get the formatted command line\n result = scuba.utils.format_cmdline(args)\n\n # Parse the result back out to a list of arguments\n out_args = self._parse_cmdline(result)\n\n # Verify that they match\n assert_seq_equal(out_args, args)\n\n\n def test_format_cmdline(self):\n '''format_cmdline works as expected'''\n\n self._test_format_cmdline([\n 'something',\n '-a',\n '-b',\n '--long', 'option text',\n '-s', 'hort',\n 'a very long argument here that will end up on its own line because it is so wide and nothing else will fit at the default width',\n 'and now',\n 'some', 'more', 'stuff',\n 'and even more stuff',\n ])\n\n\n def test_shell_quote_cmd(self):\n args = ['foo', 'bar pop', '\"tee ball\"']\n\n result = scuba.utils.shell_quote_cmd(args)\n\n out_args = shlex.split(result)\n\n assert_seq_equal(out_args, args)\n\n\n def test_parse_env_var(self):\n '''parse_env_var returns a key, value pair'''\n result = scuba.utils.parse_env_var('KEY=value')\n self.assertEqual(result, ('KEY', 'value'))\n\n def test_parse_env_var_more_equals(self):\n '''parse_env_var handles multiple equals signs'''\n result = scuba.utils.parse_env_var('KEY=anotherkey=value')\n self.assertEqual(result, ('KEY', 'anotherkey=value'))\n\n def test_parse_env_var_no_equals(self):\n '''parse_env_var handles no equals and gets value from environment'''\n with mocked_os_env(KEY='mockedvalue'):\n result = scuba.utils.parse_env_var('KEY')\n self.assertEqual(result, ('KEY', 'mockedvalue'))\n\n def test_parse_env_var_not_set(self):\n '''parse_env_var returns an empty string if not set'''\n with mocked_os_env():\n result = scuba.utils.parse_env_var('NOTSET')\n self.assertEqual(result, ('NOTSET', ''))\n\n\n def test_flatten_list__not_nested(self):\n sample = [1, 2, 3, 4]\n result = scuba.utils.flatten_list(sample)\n self.assertEqual(result, sample)\n\n def test_flatten_list__nested_1(self):\n sample = [\n 1,\n [2, 3],\n 4,\n [5, 6, 7],\n ]\n exp = range(1, 7+1)\n result = scuba.utils.flatten_list(sample)\n assert_seq_equal(result, exp)\n\n def test_flatten_list__nested_many(self):\n sample = [\n 1,\n [2, 3],\n [4, 5, [6, 7, 8]],\n 9, 10,\n [11, [12, [13, [14, [15, [16, 17, 18]]]]]],\n ]\n exp = range(1, 18+1)\n result = scuba.utils.flatten_list(sample)\n assert_seq_equal(result, exp)\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"247267601","text":"import csv\nimport PyPDF2\n\n\n# another git test\nwith open ( '//home/urik/Documents/SOW008.005b ExecSum.pdf', mode ='rb') as pdff:\n pdfText = PyPDF2.PdfFileReader(pdff)\n x = pdfText.documentInfo\n with open ('/home/urik/Python38/workspace/extractedPDF.txt', 'w') as exFile:\n for i in range(len(x)-1):\n pdfPage = pdfText.getPage(i)\n pageText = pdfPage.extractText()\n exFile.write('This is Page {}\\n'.format(i+1))\n exFile.write('=============================')\n exFile.write(pageText)\n\n","sub_path":"PDF Extract.py","file_name":"PDF Extract.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"105465266","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport argparse\nimport itertools\nimport sys\nimport yaml\n\nfrom datetime import date, datetime, timedelta\nfrom backports import statistics\n\nimport iso8601\nfrom urlobject import URLObject\n\nfrom jreport.util import paginated_get\n\nDEBUG = False\n\nREPOS = (\n # owner, repo, label to indicate external contribution\n (\"edx\", \"edx-platform\", \"open-source-contribution\"),\n #(\"edx\", \"configuration\", \"open-source-contribution\"),\n)\n\n\ndef get_internal_usernames():\n \"\"\"\n Returns a set of the Github usernames that are associated with edX.\n \"\"\"\n with open(\"mapping.yaml\") as mapping_yaml:\n mapping = yaml.load(mapping_yaml)\n\n internal_usernames = set()\n for github_name, info in mapping.iteritems():\n if info.get(\"institution\", \"unknown\") == \"edX\":\n internal_usernames.add(github_name)\n return internal_usernames\n\n\ndef get_user_org_mapping():\n with open(\"mapping.yaml\") as mapping_yaml:\n mapping = yaml.load(mapping_yaml)\n\n return { user:data.get('institution', 'other') for user, data in mapping.items() }\n\n\ndef get_duration_data(\n durations, owner=\"edx\", repo=\"edx-platform\", since=None,\n external_label=\"open-source-contribution\", internal_usernames=None,\n user_org_mapping=None,\n):\n \"\"\"\n Update `durations`, a dict of dict of lists of pull requests.\n\n `durations` has four lists of data, where each list contains only timedelta objects:\n age of internal open pull requests (all)\n age of external open pull requests (all)\n age of internal closed pull requests (since the `since` value)\n age of external closed pull requests (since the `since` value)\n\n These lists are organized into a dictionary that categorizes the lists\n by position and state.\n \"\"\"\n internal_usernames = internal_usernames or set()\n user_org_mapping = user_org_mapping or {}\n\n url = URLObject(\"https://api.github.com/repos/{owner}/{repo}/issues\".format(\n owner=owner, repo=repo))\n # two separate URLs, one for open PRs, the other for closed PRs\n open_url = url.set_query_param(\"state\", \"open\")\n closed_url = url.set_query_param(\"state\", \"closed\")\n if since:\n closed_url = closed_url.set_query_param('since', since.isoformat())\n\n open_issues_generator = itertools.izip(\n paginated_get(open_url),\n itertools.repeat(\"open\")\n )\n closed_issues_generator = itertools.izip(\n paginated_get(closed_url),\n itertools.repeat(\"closed\")\n )\n\n for issue, state in itertools.chain(open_issues_generator, closed_issues_generator):\n if not issue.get('pull_request', {}).get('url'):\n continue\n\n label_names = [label[\"name\"] for label in issue[\"labels\"]]\n\n if external_label and external_label in label_names:\n position = \"external\"\n else:\n if issue[\"user\"][\"login\"] in internal_usernames:\n position = \"internal\"\n else:\n position = \"external\"\n\n created_at = iso8601.parse_date(issue[\"created_at\"]).replace(tzinfo=None)\n if state == \"open\":\n closed_at = datetime.utcnow()\n else:\n closed_at = iso8601.parse_date(issue[\"closed_at\"]).replace(tzinfo=None)\n issue['duration'] = closed_at - created_at\n issue['org'] = user_org_mapping.get(issue['user']['login'], \"other\")\n\n if DEBUG:\n print(\"{owner}/{repo}#{num}: {position} {state}\".format(\n owner=owner, repo=repo, num=issue[\"number\"],\n position=position, state=state\n ), file=sys.stderr)\n\n durations[state][position].append(issue)\n\n\ndef main(argv):\n parser = argparse.ArgumentParser(description=\"Summarize pull requests.\")\n parser.add_argument(\"--since\", metavar=\"DAYS\", type=int, default=14,\n help=\"For closed issues, only include issues updated in the past DAYS days [%(default)d]\"\n )\n parser.add_argument(\"--human\", action=\"store_true\",\n help=\"Human-readable output\"\n )\n parser.add_argument(\"--org\", action=\"store_true\",\n help=\"Break down by organization\"\n )\n args = parser.parse_args(argv[1:])\n\n since = None\n if args.since:\n since = date.today() - timedelta(days=args.since)\n\n internal_usernames = get_internal_usernames()\n user_org_mapping = get_user_org_mapping()\n\n if args.org:\n categories = sorted(set(user_org_mapping.values()))\n def cat_filter(cat, pr):\n return pr['org'] == cat\n else:\n categories = [\"all\"]\n def cat_filter(cat, pr):\n return True\n\n durations = {\n \"open\": {\n \"internal\": [],\n \"external\": [],\n },\n \"closed\": {\n \"internal\": [],\n \"external\": [],\n }\n }\n for owner, repo, label in REPOS:\n get_duration_data(durations, owner, repo, since, label, internal_usernames, user_org_mapping)\n\n for linenum, cat in enumerate(categories):\n ss_friendly = []\n for position in (\"external\", \"internal\"):\n for state in (\"open\", \"closed\"):\n seconds = [p['duration'].total_seconds() for p in durations[state][position] if cat_filter(cat, p)]\n if seconds:\n median_seconds = int(statistics.median(seconds))\n median_duration = timedelta(seconds=median_seconds)\n else:\n median_seconds = -1\n median_duration = \"no data\"\n population = \"all\"\n if state == \"closed\" and since:\n population = \"since {date}\".format(date=since)\n if args.human:\n print(\"median {position} {state} ({population}): {duration}\".format(\n position=position, state=state, population=population,\n duration=median_duration\n ))\n else:\n ss_friendly += [len(seconds), median_seconds]\n\n if ss_friendly:\n if linenum == 0:\n print(\"cat\\twhen\\trepos\\teopen\\teopenage\\teclosed\\teclosedage\\tiopen\\tiopenage\\ticlosed\\ticlosedage\")\n ss_data = \"\\t\".join(str(x) for x in ss_friendly)\n print(\"{}\\t{:%m/%d/%Y}\\t{}\\t{}\".format(cat, date.today(), len(REPOS), ss_data))\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","sub_path":"pull-age.py","file_name":"pull-age.py","file_ext":"py","file_size_in_byte":6431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"236203996","text":"# Реализовать структуру «Рейтинг», представляющую собой набор натуральных чисел, который не возрастает.\n# У пользователя нужно запрашивать новый элемент рейтинга.\n# Если в рейтинге существуют элементы с одинаковыми значениями, то новый элемент с тем же значением должен разместиться после них.\n# Подсказка. Например, набор натуральных чисел: 7, 5, 3, 3, 2.\n# Пользователь ввёл число 3. Результат: 7, 5, 3, 3, 3, 2.\n# Пользователь ввёл число 8. Результат: 8, 7, 5, 3, 3, 2.\n# Пользователь ввёл число 1. Результат: 7, 5, 3, 3, 2, 1.\n# Набор натуральных чисел можно задать сразу в коде, например, my_list = [7, 5, 3, 3, 2].\n\nlist = [7, 5, 3, 3, 2]\n\nprint(list)\ntsil = list[::-1]\nprint(tsil)\nwhile True:\n\tanswer = str(input('Хочешь ввести число(y/n): '))\n\tif answer == 'y' or answer == 'Y':\n\t\tnumber = int(input('Введите число '))\n\t\tfor i in list:\n\t\t\tcount = int(list.index(i))\n\t\t\tif number == i:\n\t\t\t\tlist.insert(count, number)\n\t\t\t\tprint(f'Пользователь ввёл число {number}. Результат: {list}')\n\t\t\t\tbreak\n\t\t\telif number > i:\n\t\t\t\tlist.insert(count, number)\n\t\t\t\tprint(f'Пользователь ввёл число {number}. Результат: {list}')\n\t\t\t\tbreak\n\t\t\telif number < i and count + 1 == len(list): #условие для 1\n\t\t\t\tlist.insert(count + 1, number)\n\t\t\t\tprint(f'Пользователь ввёл число {number}. Результат: {list}')\n\t\t\t\tbreak\n\t\t\telif number < i:\n#\t\t\t\tlist.insert(count, number)\n#\t\t\t\tprint(f'Пользователь ввёл число {number}. Результат: {list}')\n\t\t\t\tcontinue\n\n\telse:\n\t\tbreak\n\n\nprint(list)\n\n","sub_path":"homework2/5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"380325708","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"ZWWJetsSkim\")\n\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = 100\n\nprocess.source = cms.Source(\"PoolSource\", \n fileNames = cms.untracked.vstring(\n #'/store/relval/CMSSW_3_8_3/RelValTTbar/GEN-SIM-RECO/START38_V9-v1/0022/CA9763E0-EFBF-DF11-81C5-002618943845.root',\n #'/store/relval/CMSSW_3_8_3/RelValZEE/GEN-SIM-RECO/MC_38Y_V9-v1/0021/3065AA71-9EBF-DF11-A946-0018F3D09676.root',\n 'file:WZEG_CentralSkim.root'\n ),\n)\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) ) \n\nprocess.load(\"Configuration.StandardSequences.MagneticField_cff\")\nprocess.load(\"Configuration.StandardSequences.Geometry_cff\")\nprocess.load(\"Configuration.StandardSequences.FrontierConditions_GlobalTag_cff\")\nprocess.load(\"Configuration.StandardSequences.Reconstruction_cff\")\n#process.GlobalTag.globaltag = cms.string('START38_V9::All')\nprocess.GlobalTag.globaltag = cms.string('GR_R_38X_V13::All')\n\n########### Event\nprocess.goodVertexFilter = cms.EDFilter(\"VertexSelector\",\n src = cms.InputTag(\"offlinePrimaryVertices\"),\n cut = cms.string(\"!isFake && ndof > 4 && abs(z) <= 25 && position.Rho <= 2\"),\n filter = cms.bool(True),\n)\n\nprocess.noScraping = cms.EDFilter(\"FilterOutScraping\",\n applyfilter = cms.untracked.bool(True),\n debugOn = cms.untracked.bool(False), ## Or 'True' to get some per-event info\n numtrack = cms.untracked.uint32(10),\n thresh = cms.untracked.double(0.25)\n)\nprocess.eventFilter = cms.Sequence(process.goodVertexFilter + process.noScraping)\n\n############ MUONS\nfrom WWAnalysis.Filters.muonSelections_cff import *\nprocess.load(\"MuonAnalysis.MuonAssociators.patMuonsWithTrigger_cff\")\nfrom MuonAnalysis.MuonAssociators.patMuonsWithTrigger_cff import *\n\nprocess.goodMuons = cms.EDFilter(\"PATMuonRefSelector\",\n src = cms.InputTag(\"patMuonsWithTrigger\"),\n cut = cms.string(\"pt > 10 && \" + MUON_ID_CUT + \" && \" + MUON_ISO_CUT),# + \" && abs(dB) < 0.02\")\n)\nprocess.zMuMu = cms.EDProducer(\"CandViewShallowCloneCombiner\",\n decay = cms.string(\"goodMuons@+ patMuonsWithTrigger@-\"),\n cut = cms.string('mass > 40 && daughter(1).pt > 10'),\n)\nprocess.muonSequence = cms.Sequence(process.patMuonsWithTriggerSequence + process.goodMuons + process.zMuMu)\n\n########### ELECTRONS\nfrom WWAnalysis.Filters.electronSelections_cff import *\nfrom WWAnalysis.Filters.convRejectionSelection_cfi import convRejectionSelection\nprocess.convRejectionElectrons = convRejectionSelection.clone(src = \"gsfElectrons\", filter = False)\nprocess.expectedHitsComputer = cms.EDProducer(\"ExpectedHitsComputer\",\n inputColl = cms.InputTag(\"gsfElectrons\"),\n useGsfTrack = cms.bool(True),\n objectSelection = cms.string(\"\"),\n propagator = cms.string('PropagatorWithMaterialOpposite'),\n navigationSchool = cms.string('SimpleNavigationSchool'),\n measurementTracker = cms.string(''),\n)\nprocess.load(\"PhysicsTools.PatAlgos.producersLayer1.electronProducer_cfi\")\nprocess.patElectrons.addElectronID = False\nprocess.patElectrons.addGenMatch = False\nprocess.patElectrons.userData.userInts.src = cms.VInputTag(cms.InputTag(\"expectedHitsComputer\"))\nprocess.patElectronsWithConvR = cms.EDProducer(\"PATElectronCleaner\",\n src = cms.InputTag(\"patElectrons\"), \n preselection = cms.string('pt > 10'),\n checkOverlaps = cms.PSet(\n convR = cms.PSet(\n src = cms.InputTag(\"convRejectionElectrons\"),\n algorithm = cms.string(\"byDeltaR\"),\n preselection = cms.string(\"pt > 10\"), \n deltaR = cms.double(0.0001), # they're the same!\n checkRecoComponents = cms.bool(False), # doesn't work\n pairCut = cms.string(\"\"),\n requireNoOverlaps = cms.bool(False), # overlaps don't cause the electron to be discared\n )\n ),\n finalCut = cms.string(''),\n)\n\nprocess.load(\"PhysicsTools.PatAlgos.triggerLayer1.triggerProducer_cfi\")\nprocess.eleTriggerMatchHLT = cms.EDProducer( \"PATTriggerMatcherDRDPtLessByR\",\n src = cms.InputTag( \"patElectronsWithConvR\" ),\n matched = cms.InputTag( \"patTrigger\" ),\n andOr = cms.bool( False ),\n filterIdsEnum = cms.vstring( '*' ),\n filterIds = cms.vint32( 0 ),\n filterLabels = cms.vstring( '*' ),\n pathNames = cms.vstring( '*' ),\n collectionTags = cms.vstring( 'hltL1IsoRecoEcalCandidate', 'hltL1NonIsoRecoEcalCandidate' ),\n maxDPtRel = cms.double( 0.5 ),\n maxDeltaR = cms.double( 0.5 ),\n resolveAmbiguities = cms.bool( True ),\n resolveByMatchQuality = cms.bool( True )\n)\nprocess.eleIdTriggerMatchHLT = process.eleTriggerMatchHLT.clone(collectionTags = cms.vstring('hltPixelMatchElectronsL1Iso', 'hltPixelMatchElectronsL1NonIso') )\nprocess.patElectronsWithTrigger = cms.EDProducer( \"PATTriggerMatchElectronEmbedder\",\n src = cms.InputTag( \"patElectronsWithConvR\" ),\n matches = cms.VInputTag(cms.InputTag('eleTriggerMatchHLT'), cms.InputTag('eleIdTriggerMatchHLT'))\n)\nprocess.goodElectrons = cms.EDFilter(\"PATElectronRefSelector\",\n src = cms.InputTag(\"patElectronsWithTrigger\"),\n cut = cms.string((\"pt > 10 && \"+ELE_ID_CUT+\" && \"+ELE_ISO_CUT)),\n #\" && abs(dB) < 0.02 && \" + \n #\"!(1.4442 < abs(eta) < 1.5666) && \" + \n #\"!hasOverlaps('convR') && userInt('expectedHitsComputer') == 0\")),\n)\nprocess.zElEl = cms.EDProducer(\"CandViewShallowCloneCombiner\",\n decay = cms.string('goodElectrons@+ patElectronsWithTrigger@-'),\n cut = cms.string('mass > 40 && daughter(1).pt > 10'),\n)\n\nprocess.electronSequence = cms.Sequence( \n process.convRejectionElectrons *\n process.expectedHitsComputer *\n process.patElectrons *\n process.patElectronsWithConvR *\n (process.eleTriggerMatchHLT + process.eleIdTriggerMatchHLT) *\n process.patElectronsWithTrigger *\n process.goodElectrons *\n process.zElEl\n)\n\nprocess.lep = cms.EDProducer(\"CandViewMerger\",\n src = cms.VInputTag(cms.InputTag(\"goodMuons\"), cms.InputTag(\"goodElectrons\"))\n)\nprocess.diLep = cms.EDProducer(\"CandViewShallowCloneCombiner\",\n decay = cms.string('lep lep'),\n cut = cms.string('mass > 10'),\n checkCharge = cms.bool(False)\n)\n\n\nprocess.load(\"PhysicsTools.PFCandProducer.pfNoPileUp_cff\")\nprocess.pfPileUp.Enable = True # otherwise it does nothing at all\nprocess.ak5PFJetsNoPU = process.ak5PFJets.clone(src = cms.InputTag(\"pfNoPileUp\"))\n\nprocess.jetSequence = cms.Sequence(\n process.pfNoPileUpSequence +\n process.ak5PFJetsNoPU\n)\n\n#VBTF\nimport WWAnalysis.SkimStep.simpleCutBasedElectronIDSpring10_cfi as newcb\nprocess.eidVBTFRel95 = newcb.simpleCutBasedElectronID.clone( electronQuality = '95relIso' )\nprocess.eidVBTFCom95 = newcb.simpleCutBasedElectronID.clone( electronQuality = '95cIso' )\nprocess.eidVBTFRel90 = newcb.simpleCutBasedElectronID.clone( electronQuality = '90relIso' )\nprocess.eidVBTFCom90 = newcb.simpleCutBasedElectronID.clone( electronQuality = '90cIso' )\nprocess.eidVBTFRel85 = newcb.simpleCutBasedElectronID.clone( electronQuality = '85relIso' )\nprocess.eidVBTFCom85 = newcb.simpleCutBasedElectronID.clone( electronQuality = '85cIso' )\nprocess.eidVBTFRel80 = newcb.simpleCutBasedElectronID.clone( electronQuality = '80relIso' )\nprocess.eidVBTFCom80 = newcb.simpleCutBasedElectronID.clone( electronQuality = '80cIso' )\nprocess.eidVBTFRel70 = newcb.simpleCutBasedElectronID.clone( electronQuality = '70relIso' )\nprocess.eidVBTFCom70 = newcb.simpleCutBasedElectronID.clone( electronQuality = '70cIso' )\n#CiC (requires RecoEgamma/ElectronIdentification V00-03-13)\nimport RecoEgamma.ElectronIdentification.cutsInCategoriesElectronIdentification_cfi as cic\nprocess.eidCiCVeryLoose = cic.eidVeryLoose.clone()\nprocess.eidCiCLoose = cic.eidLoose.clone()\nprocess.eidCiCMedium = cic.eidMedium.clone()\nprocess.eidCiCTight = cic.eidTight.clone()\nprocess.eidCiCSuperTight = cic.eidSuperTight.clone()\nprocess.eidCiCHyperTight1 = cic.eidHyperTight1.clone()\nprocess.eidCiCHyperTight2 = cic.eidHyperTight2.clone()\nprocess.eidCiCHyperTight3 = cic.eidHyperTight3.clone()\nprocess.eidCiCHyperTight4 = cic.eidHyperTight4.clone()\n#CiC for HWW (requires RecoEgamma/ElectronIdentification V00-03-13)\nimport RecoEgamma.ElectronIdentification.cutsInCategoriesHWWElectronIdentificationV04_cfi as cicHww\nprocess.eidCiCHwwVeryLoose = cicHww.eidVeryLoose.clone()\nprocess.eidCiCHwwLoose = cicHww.eidLoose.clone()\nprocess.eidCiCHwwMedium = cicHww.eidMedium.clone()\nprocess.eidCiCHwwTight = cicHww.eidTight.clone()\nprocess.eidCiCHwwSuperTight = cicHww.eidSuperTight.clone()\nprocess.eidCiCHwwHyperTight1 = cicHww.eidHyperTight1.clone()\nprocess.eidCiCHwwHyperTight2 = cicHww.eidHyperTight2.clone()\nprocess.eidCiCHwwHyperTight3 = cicHww.eidHyperTight3.clone()\nprocess.eidCiCHwwHyperTight4 = cicHww.eidHyperTight4.clone()\nprocess.eidSequence = cms.Sequence(\n process.eidVBTFRel95 + \n process.eidVBTFCom95 + \n process.eidVBTFRel90 + \n process.eidVBTFCom90 + \n process.eidVBTFRel85 + \n process.eidVBTFCom85 + \n process.eidVBTFRel80 + \n process.eidVBTFCom80 +\n process.eidVBTFRel70 + \n process.eidVBTFCom70 +\n process.eidCiCVeryLoose +\n process.eidCiCLoose +\n process.eidCiCMedium +\n process.eidCiCTight +\n process.eidCiCSuperTight +\n process.eidCiCHyperTight1 +\n process.eidCiCHyperTight2 +\n process.eidCiCHyperTight3 +\n process.eidCiCHyperTight4 +\n process.eidCiCHwwVeryLoose +\n process.eidCiCHwwLoose +\n process.eidCiCHwwMedium +\n process.eidCiCHwwTight +\n process.eidCiCHwwSuperTight +\n process.eidCiCHwwHyperTight1 +\n process.eidCiCHwwHyperTight2 +\n process.eidCiCHwwHyperTight3 +\n process.eidCiCHwwHyperTight4\n)\nprocess.newExpectedHitsElectrons = cms.EDProducer(\"ExpectedHitsComputer\",\n inputColl = cms.InputTag(\"gsfElectrons\"),\n useGsfTrack = cms.bool(True),\n objectSelection = cms.string(\"\"),\n propagator = cms.string('PropagatorWithMaterialOpposite'),\n navigationSchool = cms.string('SimpleNavigationSchool'),\n measurementTracker = cms.string(''),\n)\nprocess.newExpectedHitsMuons = cms.EDProducer(\"ExpectedHitsComputer\",\n inputColl = cms.InputTag(\"muons\"),\n useGsfTrack = cms.bool(False),\n objectSelection = cms.string(\"\"),\n propagator = cms.string('PropagatorWithMaterialOpposite'),\n navigationSchool = cms.string('SimpleNavigationSchool'),\n measurementTracker = cms.string(''),\n)\nprocess.newExpectedHits = cms.Sequence(process.newExpectedHitsElectrons + process.newExpectedHitsMuons)\n\nprocess.reco = cms.Sequence(\n process.muonSequence + process.electronSequence + \n process.lep + process.diLep + \n process.jetSequence + \n process.eidSequence +\n process.newExpectedHits\n)\n\nprocess.fMM = cms.EDFilter(\"CandViewCountFilter\", src = cms.InputTag(\"zMuMu\"), minNumber = cms.uint32(1))\nprocess.fEE = cms.EDFilter(\"CandViewCountFilter\", src = cms.InputTag(\"zElEl\"), minNumber = cms.uint32(1))\nprocess.fLL = cms.EDFilter(\"CandViewCountFilter\", src = cms.InputTag(\"diLep\"), minNumber = cms.uint32(1))\n\nprocess.pMM = cms.Path(process.eventFilter + process.reco + process.fMM)\nprocess.pEE = cms.Path(process.eventFilter + process.reco + process.fEE)\nprocess.pLL = cms.Path(process.eventFilter + process.reco + process.fLL)\n\nif False:\n process.genLeptonFiducial = cms.EDFilter(\"GenParticleSelector\",\n src = cms.InputTag(\"genParticles\"),\n cut = cms.string(\"(abs(pdgId) == 11 || abs(pdgId) == 13) && status == 3 && abs(eta) < 2.4 && pt > 20\"),\n )\n process.diGenLeptonFiducial = cms.EDProducer(\"CandViewShallowCloneCombiner\",\n decay = cms.string(\"genLeptonFiducial genLeptonFiducial\"),\n cut = cms.string(\"mass > 10\"),\n checkCharge = cms.bool(False),\n )\n process.diGenLeptonFiducialFilter = cms.EDFilter(\"CandViewCountFilter\", src = cms.InputTag(\"diGenLeptonFiducial\"), minNumber = cms.uint32(1))\n process.pGG = cms.Path(process.eventFilter + process.reco + process.genLeptonFiducial + process.diGenLeptonFiducial + process.diGenLeptonFiducialFilter)\n process.pGGX = cms.Path(process.genLeptonFiducial + process.diGenLeptonFiducial + process.diGenLeptonFiducialFilter)\n\nif False:\n process.zLooseElEl = cms.EDProducer(\"CandViewShallowCloneCombiner\",\n decay = cms.string(\"gsfElectrons gsfElectrons\"),\n cut = cms.string(\"mass > 40 && min(daughter(0).pt, daughter(1).pt) > 10\"),\n checkCharge = cms.bool(False),\n )\n process.zLooseMuMu = cms.EDProducer(\"CandViewShallowCloneCombiner\",\n decay = cms.string(\"muons muons\"),\n cut = cms.string(\"mass > 40 && min(daughter(0).pt, daughter(1).pt) > 10\"),\n checkCharge = cms.bool(False),\n )\n process.flEE = cms.EDFilter(\"CandViewCountFilter\", src = cms.InputTag(\"zLooseElEl\"), minNumber = cms.uint32(1))\n process.flMM = cms.EDFilter(\"CandViewCountFilter\", src = cms.InputTag(\"zLooseMuMu\"), minNumber = cms.uint32(1))\n process.lEE = cms.Path(process.zLooseElEl + process.flEE)\n process.lMM = cms.Path(process.zLooseMuMu + process.flMM)\n\n\nprocess.out = cms.OutputModule(\"PoolOutputModule\", \n fileName = cms.untracked.string(\"selected.root\"),\n SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring(\"pEE\",\"pMM\",\"pLL\")),\n outputCommands = cms.untracked.vstring(\n \"drop *\",\n # This stuff\n \"keep *_patMuonsWithTrigger_*_*\",\n \"keep *_patElectronsWithTrigger_*_*\",\n \"keep *_goodMuons_*_*\",\n \"keep *_goodElectrons_*_*\",\n \"keep *_zMuMu_*_*\",\n \"keep *_zElEl_*_*\",\n \"keep *_diLep_*_*\",\n \"keep *_ak5PFJets_*_*\",\n \"keep *_ak5PFJetsNoPU_*_*\",\n # Tracking\n \"keep *_offlinePrimaryVertices_*_*\",\n \"keep *_offlinePrimaryVerticesWithBS_*_*\",\n \"keep *_offlineBeamSpot_*_*\",\n \"keep recoTracks_generalTracks_*_*\",\n \"keep *_newExpectedHits*_*_*\",\n # Muon\n \"keep *_muons_*_*\",\n \"keep recoTracks_globalMuons_*_*\",\n \"keep recoTracks_standAloneMuons_*_*\",\n \"keep recoTrackExtras_standAloneMuons_*_*\",\n # E/Gamma\n \"keep *_photons_*_*\",\n \"keep *_gsfElectrons_*_*\",\n \"keep *_gsfElectronCores_*_*\",\n \"keep *_pfElectronTranslator_*_*\",\n \"keep recoSuperClusters_*_*_*\",\n \"keep recoGsfTracks_electronGsfTracks_*_*\",\n \"keep *_eidVBTF*_*_*\",\n \"keep *_eidCiC*_*_*\",\n # PF\n \"keep recoPFCandidates_particleFlow__*\",\n # tcMET\n \"keep *_pfMet_*_*\",\n \"keep *_tcMet_*_*\",\n # MC\n \"keep *_genParticles_*_*\",\n \"keep *_genMetTrue_*_\",\n # Trigger\n \"keep *_hltTriggerSummaryAOD_*_*\",\n \"keep *_TriggerResults_*_*\",\n )\n\n)\nprocess.end = cms.EndPath(process.out)\n","sub_path":"WWAnalysis/Filters/test/dataDriven/z_ww_jets.py","file_name":"z_ww_jets.py","file_ext":"py","file_size_in_byte":14858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"167597680","text":"#!/usr/bin/env python3\n#####################################\n# Usage: gene_length.py UCSC_refGenes_table.txt output.txt #\n# Manual:Get genes average length in KB #\n#####################################\nimport sys\nimport numpy\nf_output = open(sys.argv[2],'w')\ngenes={}\ngen_len = {}\n\nfor line in open(sys.argv[1]):\n\ti = line.split()\n\tname = i[12]\n\tcdsS = i[9].split(',')[:-1]\n\tcdsE = i[10].split(',')[:-1]\n\tlength = 0\n\tfor j in range(len(cdsS)):\n\t\tlength+=int(cdsE[j])-int(cdsS[j])\n\tgenes.setdefault(name,[]).append(length)\n\t\nfor i in genes:\n\tgen_len.setdefault(i,[]).append(str(numpy.mean(genes[i])/1000))\n\t\nfor i in genes_k:\n\tf_output.writelines(i+'\\t'+''.join(gen_len[i])+'\\n')\n\t\nf_output.close()\n################ END ################\n# Created by Aone #\n# quanyi.zhao@stanford.edu #\n################ END ################","sub_path":"gene_length.py","file_name":"gene_length.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"265389351","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom pixivsort.search import Search\nfrom sys import argv, exit\nimport os\n\nclass Main(object):\n \n def __init__(self, argv):\n # Get source and destination directory from\n # commandline arguments\n if len(argv) != 3:\n print(\"Wrong number of arguments.\")\n print(\"Please supply source and target dir.\")\n exit(1)\n\n script, src_path, dst_path = argv\n \n try:\n src_path = unicode(src_path)\n dst_path = unicode(dst_path)\n except:\n pass\n \n if not os.path.exists(src_path):\n print(\"Source directory does not exist. Exiting...\")\n exit(1)\n \n if not os.path.exists(dst_path):\n print(\"Destination directory does not exist. Exiting...\")\n exit(1)\n \n if not os.path.isdir(src_path):\n print(\"Source directory is not a directory. Exiting...\")\n exit(1)\n \n if not os.path.isdir(dst_path):\n print(\"Destination directory is not a directory. Exiting...\")\n exit(1)\n \n # Change these to match naming scheme.\n # Image naming scheme: (<artist id>) <artist> - <title>.<extension>\n self.src_regex = \"(?<=^\\()\\d*(?=\\))\"\n # Artist naming scheme: <artist> (<artist id>)\n self.dst_regex = \"(?<=\\()\\d*(?=\\)$)\"\n \n self.src_path = src_path\n self.dst_path = dst_path\n \n def run(self):\n search = Search(self.src_path, \n self.dst_path, \n self.src_regex, \n self.dst_regex)\n\n imagelist = search.find_images()\n artistlist = search.find_artists()\n \n if len(imagelist) == 0 and len(artistlist) == 0:\n print(\"Error: Nothing found neither in source or destination.\")\n print(\"Maybe you have supplied the paths in the wrong order?\")\n print(\"It should be source path first and destination path second.\")\n exit(1)\n \n for artist in artistlist:\n sepparator = \"--------------------------------------------\"\n artiststring = \"%s\\nChecking artist with id: %s\" % (sepparator, artist.id)\n imagestring = \"\"\n for image in imagelist:\n if artist.id == image.artist_id:\n imgpath = image.path.encode(\"utf-8\")\n artistpath = artist.path.encode(\"utf-8\")\n \n image.copy(artist.path)\n # assemble string for status about copying\n tempstring = \"\\n\\tCopying image\"\n copystring = \"\\n\\t%s\\n\\tto\\n\\t%s\" % (imgpath, artistpath)\n tempstring = tempstring + copystring\n imagestring = imagestring + tempstring\n \n if imagestring != \"\":\n print(artiststring + imagestring)\n \n print(\"Finished! Have a nice day.\")\n\n#if __name__ == '__main__':\n# main = Main(argv)\n# main.run()\n","sub_path":"pixivsort/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"50966560","text":"#Imports\r\nfrom tphysics import Circle, Rectangle, Game\r\nfrom random import randint\r\n\r\n#Create a new game\r\ng = Game(\"Empty Game\", 600, 600, \"light blue\")\r\n\r\n# Create a paddle for player one\r\npaddleOne = Rectangle(-250, 0, 30, 100)\r\npaddleOne.fill_colour = \"green\"\r\ng.add_shape(paddleOne)\r\n\r\n# Create a paddle for player two\r\npaddleTwo = Rectangle(250, 0, 30, 100)\r\npaddleTwo.fill_colour = \"red\"\r\ng.add_shape(paddleTwo)\r\n\r\n# Create the ball\r\nball = Circle(0, 0, 20)\r\nball.fill_colour = \"yellow\"\r\ng.add_shape(ball)\r\n\r\n# Store the x and y speed of the ball\r\nballSpeedX = 2\r\nballSpeedY = randint(-3, 3)\r\n\r\n#Game loop\r\nwhile True:\r\n\r\n # Move the ball\r\n ball.x += ballSpeedX\r\n ball.y += ballSpeedY\r\n\r\n # Check if the ball is going off the top of the screen\r\n if ball.y > 200 or ball.y < -200:\r\n ballSpeedY *= -1\r\n\r\n # Check if the ball collides with a paddle\r\n if ball.collide(paddleOne) or ball.collide(paddleTwo):\r\n ballSpeedX *= -1\r\n\r\n # Check if the ball goes off the left or right of the screen\r\n if ball.x > 300 or ball.x < -300:\r\n ball.x = 0\r\n ball.y = 0\r\n ballSpeedY = randint(-3, 3)\r\n\r\n # Check key presses\r\n if g.ispressed(\"w\"):\r\n paddleOne.y += 1\r\n if g.ispressed(\"s\"):\r\n paddleOne.y -= 1\r\n if g.ispressed(\"Up\"):\r\n paddleTwo.y += 1\r\n if g.ispressed(\"Down\"):\r\n paddleTwo.y -= 1\r\n\r\n # Update the game\r\n g.update()\r\n","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"404709259","text":"def findMaxPossibleArea(histogram, l, r):\n stack = list()\n max_area = 0\n index = l\n while index <= r:\n if (not stack) or (histogram[stack[-1]] <= histogram[index]):\n stack.append(index)\n index += 1\n else:\n top_of_stack = stack.pop()\n area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index - l))\n max_area = max(max_area, area)\n\n while stack:\n top_of_stack = stack.pop()\n area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index - l))\n max_area = max(max_area, area)\n\n return max_area\n\n","sub_path":"2019Nov/adhoc/histogram_area_optim2.py","file_name":"histogram_area_optim2.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"161152374","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import offsetbox\nfrom sklearn import manifold\nfrom utils import utils\nfrom skimage.transform import resize\n\n# Scale and visualize the embedding vectors\ndef plot_embedding(X, images, y, title=None):\n\tx_min, x_max = np.min(X, 0), np.max(X, 0)\n\tX = (X - x_min) / (x_max - x_min)\n\ty_unique = list(set(y))\n\tn_y = len(y_unique)\n\n\tplt.figure()\n\tax = plt.subplot(111)\n\tfor i in range(X.shape[0]):\n\t\ty_indx = y_unique.index(y[i])\n\t\tplt.text(X[i, 0], X[i, 1], str(y[i]),\n\t\t\t\t color=plt.cm.Set1(y_indx / float(n_y)),\n\t\t\t\t fontdict={'weight': 'bold', 'size': 9})\n\t\n\tsplit_len = 256\n\n\tif hasattr(offsetbox, 'AnnotationBbox'):\n\t\t# only print thumbnails with matplotlib > 1.0\n\t\tshown_images = np.array([[1., 1.]]) # just something big\n\t\tfor i in range(X.shape[0]):\n\t\t\tdist = np.sum((X[i] - shown_images) ** 2, 1)\n\t\t\tif np.min(dist) < 4e-3:\n\t\t\t\t# don't show points that are too close\n\t\t\t\tcontinue\n\t\t\tshown_images = np.r_[shown_images, [X[i]]]\n\n\t\t\timg = utils.imread(images[i])\n\t\t\timg = img[:, split_len:, :]\n\t\t\timagebox = offsetbox.AnnotationBbox(\n\t\t\t\toffsetbox.OffsetImage(img, cmap=plt.cm.gray_r),\n\t\t\t\tX[i])\n\t\t\tax.add_artist(imagebox)\n\tplt.xticks([]), plt.yticks([])\n\tif title is not None:\n\t\tplt.title(title)\n\ndef plot_grid_embedding(X, images, y, \n\tgrid_size=2000, img_size=50, title='grid_embedding.png', img_split=False):\n\t\"\"\"\n\tPlots t-SNE embedding in a grid similar to https://cs.stanford.edu/people/karpathy/cnnembed/\n\t\tX [n x 2]: vector encoding of images using t-SNE\n\t\timages [n x 1]: list of the n image's paths\n\t\ty [n x 1]: list of n labels, one label for each image (e.g. style, class)\n\t\tgrid_size: the size of the full embedding. \n\t\t\tThe default (2000) will create a grid of size 2000 x 2000 pixels\n\t\timg_size: the size of the individual images to be embedded onto the grid. \n\t\t\tThe default will embed n 50 x 50 images onto the grid\n\t\ttitle: plot title\n\t\timg_split: True if the images needs to be split (in the case of pix2pix AB combined)\n\t\t\t\t\tFalse if not\n\n\t\"\"\"\n\n\tx_min, x_max = np.min(X, 0), np.max(X, 0)\n\tX = (X - x_min) / (x_max - x_min)\n \n\ty_unique = list(set(y))\n\tn_y = len(y_unique)\n\n\t\n\tsplit_len = int(utils.imread(images[0]).shape[1]/2.0)\n\n\t# initialize the grid image \n\tgrid = np.zeros((grid_size, grid_size, 3))\n\tindxs = np.arange(0, grid_size, img_size)\n\n\timg_ids = []\n\n\tn = X.shape[0]\n\n\n\tfor i in indxs:\n\t\tfor j in indxs:\n\t\t\txf, yf = float(i)/grid_size, float(j)/grid_size\n\t\t\tn = X.shape[0]\n\t\t\tif not n:\n\t\t\t\tbreak\n\t\t\txg = np.tile([xf, yf], (n, 1))\n\t\t\td = np.sum((xg-X)**2, axis=1) #euclidean distances between images and grid point\n\n\t\t\tmin_i = np.argmin(d)\n\n\t\t\t# print(used)\n\t\t\timg = utils.imread(images[min_i])\n\t\t\tif img_split:\n\t\t\t\timg = img[:, split_len:, :]\n\t\t\t#resize image to embed in grid\n\t\t\timg = resize(img, (img_size, img_size))\n\t\t\tX = np.delete(X, (min_i), axis=0)\n\t\t\timages = np.delete(images, (min_i), axis=0)\n\t\t\t\n\t\t\tgrid[i:i+img_size, j:j+img_size] = img\n\t\t\timg_ids.append(min_i)\n\n\tutils.imwrite(title, grid)\n\treturn img_ids\n\n\n \n\n\n\n\ndef vis_TSNE(images, labels, codes):\n\tprint(' - Dimensionality reduction using TSNE') \n\n\ttsne = manifold.TSNE(n_components=2, init='pca', random_state=0)\n\tX_tsne = tsne.fit_transform(codes)\n\n\timg_ids = plot_grid_embedding(X_tsne, images, labels, img_split=True,\n\t\t\t\tgrid_size=1000, \n\t\t\t\t title=\"t-SNE embedding of styles\")\n\n\tplot_codes(img_ids, codes)\n\n\treturn img_ids\n\ndef plot_codes(img_ids, codes, title='qvector'):\n\tcodes = codes[img_ids]\n\tnp.savetxt('%s.csv'%title, codes, delimiter=',')\n\tgrid = np.zeros((10*codes.shape[0], 10*codes.shape[1] ))\n\n\tmax_c = np.max(codes)\n\tmin_c= np.min(codes)\n\tfor i in range(codes.shape[0]):\n\t\tfor j in range(codes.shape[1]):\n\t\t\tgrid[10*i:10*i+10, 10*j:10*j+10] = codes[i, j]\n\n\tgrid = grid - np.min(grid)\n\tgrid = grid / np.max(grid)\n\n\tutils.imwrite(title, np.uint8(255*grid))\n\n\n","sub_path":"utils/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":3846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"358817125","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\n\r\nPATH = 'C:/Users/User/Documents/bookPic/'\r\nimg = Image.open(PATH+'rada1.png')\r\narr_img = np.asarray(img)\r\nzero_red = arr_img.copy()\r\nzero_red[:, :, 0] = 0\r\n\r\n#plotting subplots of the image in original and R-, G- and B- channel\r\nrows = 1\r\ncols = 2\r\nfig = plt.figure()\r\nfig.add_subplot(rows, cols, 1)\r\nplt.imshow(arr_img)\r\nfig.add_subplot(rows, cols, 2, yticklabels = [])\r\nplt.imshow(zero_red)\r\nplt.show()","sub_path":"ex 2.18.py","file_name":"ex 2.18.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"319090766","text":"from aiohttp import web\nfrom app.models.adverts import Advert\n\nroutes = web.RouteTableDef()\n\n\n@routes.get('/')\nasync def index(request):\n return web.Response(text='Hello Aiohttp!')\n\n\n@routes.view('/advertisements')\nclass AdvertList(web.View):\n async def get(self):\n adverts = await Advert.all()\n return web.json_response([advert.to_dict() for advert in adverts])\n\n async def post(self):\n data = await self.request.json()\n advert = await Advert.add(**data)\n return web.json_response(advert.to_dict())\n\n\n@routes.view('/advertisements/{uid}')\nclass Advertisement(web.View):\n async def get(self):\n uid = int(self.request.match_info['uid'])\n advert = await Advert.get_or_404(uid)\n return web.json_response(advert.to_dict())\n\n async def delete(self):\n uid = int(self.request.match_info['uid'])\n advert = await Advert.get_or_404(uid)\n await advert.delete()\n return web.json_response(dict(id=uid))\n\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"352421626","text":"import turtle\r\n\r\nob = turtle.Turtle()\r\n\r\ndef draw(c, x, y):\r\n ob.pencolor(c)\r\n ob.pensize(width=10)\r\n ob.circle(60)\r\n ob.pu()\r\n ob.goto(x, y)\r\n ob.pd()\r\n\r\n\r\ndraw(\"red\", 120, -1)\r\ndraw(\"yellow\", 180, 90)\r\ndraw(\"green\", 65, 90)\r\ndraw(\"blue\", -59, 90)\r\ndraw(\"black\", 0, 0)\r\n\r\nturtle.done()","sub_path":"2_5235874989944603423.py","file_name":"2_5235874989944603423.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"348552215","text":"# -*- coding: utf-8 -*-\n\nimport os, sys, pdb\nfrom pydaily.filesystem import is_image_file\nfrom pydaily import DATA_DIR\n\n\ndef test_is_image_file():\n test_file1 = os.path.join(DATA_DIR, 'input/stomach/crop_patch/p0029.tiff')\n img_flag = is_image_file(test_file1)\n if img_flag == True:\n print(\"{} is an image file\".format(test_file1))\n else:\n print(\"{} is not an image file\".format(test_file1))\n\n test_file2 = os.path.join(DATA_DIR, 'README.md')\n img_flag = is_image_file(test_file2)\n if img_flag == True:\n print(\"{} is an image file\".format(test_file2))\n else:\n print(\"{} is not an image file\".format(test_file2))\n\n\n\nif __name__ == '__main__':\n test_is_image_file()\n","sub_path":"pydaily/filesystem/tests/test_suffix.py","file_name":"test_suffix.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"9096501","text":"\"\"\"Вспомогательные модели для модели Offer\"\"\"\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom main.models_addon.ya_market.base import BaseWeightDimension\nfrom main.models_addon.ya_market.offer.base import Offer\nfrom main.models_addon.ya_market.offer.choices import TimeUnitChoices, MappingType, ProcessingStateNoteType, \\\n ProcessingStateStatus, SupplyScheduleDayChoices, VatType, CurrencyChoices, PriceSuggestionChoices\n\n\nclass PriceSuggestion(models.Model):\n \"\"\"\n Модель для хранения цен для продвижения\n \"\"\"\n offer = models.ForeignKey(to=Offer, on_delete=models.CASCADE, related_name='priceSuggestion')\n price = models.DecimalField(\n max_digits=10,\n decimal_places=2,\n verbose_name='Цена',\n help_text='Указана в рублях. Точность — два знака после запятой',\n null=True)\n type = models.CharField(\n max_length=21,\n choices=PriceSuggestionChoices.choices,\n verbose_name='Типы цен',\n null=True, blank=True)\n\n\nclass Timing(models.Model):\n \"\"\"\n Модель для хранения периода времени.\n \"\"\"\n\n class Meta:\n abstract = True\n\n timePeriod = models.PositiveSmallIntegerField(null=True, blank=True)\n timeUnit = models.CharField(max_length=5, choices=TimeUnitChoices.choices, verbose_name='Единица измерения',\n null=True, blank=True)\n comment = models.CharField(max_length=2000, null=True, blank=True)\n\n def get_days(self):\n if self.timeUnit == TimeUnitChoices.HOUR:\n return self.timePeriod / 24\n elif self.timeUnit == TimeUnitChoices.DAY:\n return self.timePeriod\n elif self.timeUnit == TimeUnitChoices.WEEK:\n return self.timePeriod * 7\n elif self.timeUnit == TimeUnitChoices.MONTH:\n return self.timePeriod * 31\n elif self.timeUnit == TimeUnitChoices.YEAR:\n return self.timePeriod * 365\n\n\nclass ShelfLife(Timing):\n offer = models.OneToOneField(to=Offer, on_delete=models.CASCADE, related_name='shelfLife')\n\n\nclass LifeTime(Timing):\n offer = models.OneToOneField(to=Offer, on_delete=models.CASCADE, related_name='lifeTime')\n\n\nclass GuaranteePeriod(Timing):\n offer = models.OneToOneField(to=Offer, on_delete=models.CASCADE, related_name='guaranteePeriod')\n\n\nclass Price(models.Model):\n \"\"\"\n Модель для хранения цены товара.\n \"\"\"\n offer = models.OneToOneField(to=Offer, on_delete=models.CASCADE, related_name='price')\n currencyId = models.CharField(\n max_length=3,\n choices=CurrencyChoices.choices,\n verbose_name='Валюта',\n default=CurrencyChoices.RUR[0][0],\n )\n discountBase = models.FloatField(verbose_name=\"Цена на товар без скидки.\", null=True, blank=True)\n value = models.FloatField(verbose_name=\"Цена на товар.\", null=True, blank=True)\n vat = models.IntegerField(verbose_name='НДС',\n help_text=\"Если параметр не указан, используется ставка НДС, \"\n \"установленная в личном кабинете магазина.\",\n null=True,\n blank=True,\n choices=VatType.choices\n )\n net_cost = models.PositiveIntegerField(verbose_name=\"Себестоимость\", null=True, blank=True)\n has_changed = models.BooleanField(verbose_name='Есть изменения, не отправленные на Яндекс',\n help_text=\"True, если изменения есть, False, если изменений нет\",\n default=True)\n\n def clean(self):\n if self.discountBase and self.discountBase < self.value:\n raise ValidationError({'discountBase': 'Цена на товар без скидки меньше цены на товар'})\n if self.discountBase is not None and not self.discountBase > 0:\n raise ValidationError({'discountBase': 'Цена на товар без скидки должна быть больше'})\n\n\nclass ManufacturerCountry(models.Model):\n \"\"\"\n Модель для хранения страны производителя.\n \"\"\"\n offer = models.ForeignKey(to=Offer, on_delete=models.CASCADE, related_name=\"manufacturerCountries\", )\n name = models.CharField(max_length=255, verbose_name='Страна производства товара')\n\n\nclass WeightDimension(BaseWeightDimension):\n \"\"\"\n Модель для хранения размеров и веса товара.\n \"\"\"\n offer = models.OneToOneField(to=Offer, on_delete=models.CASCADE, related_name='weightDimensions')\n\n\nclass Url(models.Model):\n \"\"\"\n Модель для хранения списка URL\n \"\"\"\n offer = models.ForeignKey(to=Offer, on_delete=models.CASCADE, related_name='urls')\n url = models.URLField(max_length=2000, verbose_name='Ссылка на фото')\n\n\nclass Barcode(models.Model):\n \"\"\"\n Модель для хранения штрихкода товара.\n \"\"\"\n offer = models.ForeignKey(to=Offer, on_delete=models.CASCADE, related_name='barcodes')\n barcode = models.CharField(max_length=255, verbose_name='Штрихкод',\n help_text=\"\"\"Штрихкод обязателен при размещении товара по модели FBY и FBY+.\n Допустимые форматы: EAN-13, EAN-8, UPC-A, UPC-E, Code 128. Для книг\n — ISBN-10 или ISBN-13. Для товаров определённых производителей передайте\n только код GTIN. Если штрихкодов несколько, укажите их через запятую.\"\"\",\n )\n\n\nclass CustomsCommodityCode(models.Model):\n \"\"\"\n Модель для хранения кода ТН ВЭД товара.\n \"\"\"\n offer = models.ForeignKey(\n to=Offer,\n on_delete=models.CASCADE,\n related_name='customsCommodityCodes',\n )\n code = models.CharField(max_length=10, verbose_name='Код ТН ВЭД', help_text='Укажите 10 или 14 цифр без пробелов.',\n blank=True, null=True)\n\n def __str__(self):\n return self.code\n\n\nclass SupplyScheduleDays(models.Model):\n \"\"\"\n Модель для хранения дней поставки товара.\n \"\"\"\n offer = models.ForeignKey(to=Offer, on_delete=models.CASCADE, related_name=\"supplyScheduleDays\")\n supplyScheduleDay = models.CharField(\n max_length=9,\n choices=SupplyScheduleDayChoices.choices,\n verbose_name='Дни поставки',\n help_text='Дни недели, когда вы готовы поставлять товары на склад маркетплейса.'\n 'Заполняйте поле, чтобы получать рекомендации о пополнении товаров на складе.',\n null=True\n )\n\n def __str__(self):\n return self.supplyScheduleDay\n\n\nclass ProcessingState(models.Model):\n \"\"\"\n Модель для хранения статуса товара.\n \"\"\"\n offer = models.OneToOneField(to=Offer, on_delete=models.CASCADE, related_name='processingState')\n status = models.CharField(\n max_length=12,\n choices=ProcessingStateStatus.choices,\n verbose_name='Cтатус',\n help_text=\"Можно продавать или нет\",\n null=True\n )\n\n @property\n def get_notes(self):\n return self.notes.all()\n\n\nclass ProcessingStateNote(models.Model):\n \"\"\"\n Модель для хранения причины, по который товар не прошел модерацию.\n \"\"\"\n processingState = models.ForeignKey(\n to=ProcessingState,\n on_delete=models.CASCADE,\n related_name='notes',\n verbose_name='Причины, по которым товар не прошел модерацию',\n null=True\n )\n type = models.CharField(\n max_length=31,\n choices=ProcessingStateNoteType.choices,\n verbose_name='Тип причины, по которой товар не прошел модерацию',\n null=True\n )\n payload = models.CharField(\n max_length=2000,\n verbose_name='Дополнительная информация о причине отклонения товара',\n help_text='Возвращается, если параметр type имеет одно из следующих значений: '\n 'CONFLICTING_INFORMATION, INCORRECT_INFORMATION, NO_PARAMETERS_IN_SHOP_TITLE, NO_SIZE_MEASURE.',\n null=True\n )\n\n\nclass Mapping(models.Model):\n \"\"\"\n Модель для хранения маппинга товара.\n \"\"\"\n offer = models.ForeignKey(to=Offer, on_delete=models.CASCADE, related_name=\"mapping_set\")\n marketSku = models.PositiveSmallIntegerField(\n verbose_name='SKU на Яндексе — идентификатор текущей карточки товара на Маркете',\n null=True,\n )\n modelId = models.PositiveSmallIntegerField(\n verbose_name='Идентификатор модели для текущей карточки товара на Маркете',\n help_text='Например, две лопатки разных цветов имеют разные SKU на Яндексе (параметр market_sku), '\n 'но одинаковый идентификатор модели товара',\n null=True\n )\n categoryId = models.PositiveSmallIntegerField(\n verbose_name='Идентификатор категории для текущей карточки товара на Маркете',\n null=True,\n )\n mappingType = models.CharField(\n max_length=19,\n choices=MappingType.choices,\n verbose_name='Тип маппинга',\n null=True\n )\n","sub_path":"main/models_addon/ya_market/offer/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":10540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"496046767","text":"\"\"\"Flask-based service that detects ads in screenshots.\"\"\"\n\nimport json\nimport logging\nfrom timeit import default_timer as timer\n\nimport flask\nimport numpy as np\nimport paste.translogger as tl\nfrom PIL import Image\nimport waitress\n\nfrom yolo_v3 import non_max_suppression\nfrom utils import *\n\n\nTFF = tf.app.flags\n\nTFF.DEFINE_string('class_names', 'cfg/ad.names', 'File with class names')\nTFF.DEFINE_string('weights_file', '../models/page_based_yolov3.weights',\n 'Binary file with detector weights')\n\nTFF.DEFINE_integer('size', 416, 'Image size')\n\nTFF.DEFINE_float('conf_threshold', 0.5, 'Confidence threshold')\nTFF.DEFINE_float('iou_threshold', 0.4, 'IoU threshold')\nTFF.DEFINE_float('match_threshold', 0.4,\n 'IoU required for for detection to be counted as a match')\n\nFLAGS = TFF.FLAGS\n\n\n# Region type (a.k.a. class) that means \"advertisement\".\nAD_TYPE = 0\n\n\ndef scale_box(box, img_size):\n \"\"\"Scale detected box to match image size.\"\"\"\n xscale = img_size[0] / FLAGS.size\n yscale = img_size[1] / FLAGS.size\n x0, y0, x1, y1 = box\n return [\n float(x0) * xscale,\n float(y0) * yscale,\n float(x1) * xscale,\n float(y1) * yscale,\n ]\n\n\nclass AdDetector:\n \"\"\"Ad detector that encapsulates TF session and detection model.\"\"\"\n\n def __init__(self):\n classes = load_coco_names(FLAGS.class_names)\n self.inputs = tf.placeholder(tf.float32, [None, FLAGS.size, FLAGS.size, 3])\n config = tf.ConfigProto()\n logging.info('Initializing TF session')\n self.sess = tf.Session(config=config)\n logging.info('Loading YOLOv3 weights')\n self.detections, self.boxes = init_yolo(\n self.sess, self.inputs, len(classes),\n FLAGS.weights_file, header_size=4,\n )\n logging.info('Done')\n\n def detect(self, image):\n \"\"\"Detect ads in the image, return detection results as a dict.\n\n The return value is as follows:\n\n {\n 'size': [image_width, image_height],\n 'boxes': [\n [x0, y0, x1, y1, probability],\n ...\n ],\n }\n\n \"\"\"\n img = image.resize((FLAGS.size, FLAGS.size))\n if img.mode == 'RGBA':\n img = img.convert(mode='RGB')\n\n logging.info('Detecting ads')\n t1 = timer()\n detected_boxes = self.sess.run(\n self.boxes,\n feed_dict={self.inputs: [np.array(img, dtype=np.float32)]},\n )\n unique_boxes = non_max_suppression(\n detected_boxes,\n confidence_threshold=FLAGS.conf_threshold,\n iou_threshold=FLAGS.iou_threshold,\n )\n boxes = [scale_box(box, image.size) + [float(p)]\n for box, p in unique_boxes[AD_TYPE]]\n t2 = timer()\n logging.debug('Detected boxes: {}'.format(boxes))\n logging.info('Detection complete: found {} ads in {} seconds'\n .format(len(boxes), t2 - t1))\n\n return {\n 'size': image.size,\n 'boxes': boxes,\n 'detection_time': t2 - t1,\n }\n\n\napp = flask.Flask(__name__)\n\n\n@app.route('/', methods=['GET'])\ndef index():\n return \"\"\"\n<html>\n <body>\n <form action=\"/detect\" method=\"POST\" enctype=\"multipart/form-data\">\n <input type=\"file\" name=\"image\" />\n <input type=\"submit\" value=\"submit\" name=\"submit\" />\n </form>\n </body>\n</html>\n\"\"\"\n\n\n@app.route('/detect', methods=['POST'])\ndef detect():\n image_file = flask.request.files['image']\n image = Image.open(image_file)\n response_body = json.dumps(app.detector.detect(image))\n response_headers = {\n 'Content-type': 'application/json',\n }\n return response_body, response_headers\n\n\ndef serve(argv):\n app.detector = AdDetector()\n waitress.serve(tl.TransLogger(app, setup_console_handler=False),\n listen='*:8080')\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n tf.app.run(main=serve)\n","sub_path":"page-based/serve.py","file_name":"serve.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"610796943","text":"# -*- coding:utf-8 -*-\n\nimport json,urllib.request,re,time\n\ndef readdata():\n # f = open('test.json','r')\n # # for i in f.readlines():\n # # print(i)\n # print(type(f.read()))\n # f.close()\n\n f = open(\"test.json\");\n s = json.load(f)\n for i in s[0]['img']:\n name = str(time.time())+'.jpg'\n if(re.match(r'^https?:/{2}\\w.+$', i)):\n writedata(i,name)\n f.close\n\ndef writedata(url,name):\n path = \"img/\"+name\n #保存文件时候注意类型要匹配,如要保存的图片为jpg,则打开的文件的名称必须是jpg格式,否则会产生无效图片\n conn = urllib.request.urlopen(url)\n f = open(path,'wb')\n f.write(conn.read())\n f.close()\n print('Pic Saved!')\n\nif __name__==\"__main__\":\n readdata()","sub_path":"handledata.py","file_name":"handledata.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"45144247","text":"import sys\r\nimport time\r\nfrom datetime import datetime, timedelta\r\nimport math\r\nfrom log.logger import setup_logging\r\nimport importlib\r\nimport os\r\nfrom application.routes import check\r\nfrom multiprocessing import Process\r\n\r\ncfg = 'Config'\r\nc = getattr(importlib.import_module('config'), cfg)\r\nconfig = {}\r\n\r\nfor key in dir(c):\r\n if key.isupper():\r\n config[key] = getattr(c, key)\r\n\r\nsetup_logging(config)\r\n\r\n\r\nif len(sys.argv) < 3:\r\n print('Insuffcient parameters specified')\r\n exit()\r\n\r\ns_year = int(sys.argv[1])\r\ne_year = int(sys.argv[2])\r\n\r\n\r\n# sd = datetime.fromtimestamp(time.mktime(time.strptime(s, '%Y-%m-%d')))\r\n# ed = datetime.fromtimestamp(time.mktime(time.strptime(e, '%Y-%m-%d')))\r\n# days = (ed - sd).days\r\n\r\n# Slightly buggy: this will slice to a minimum of 1 day, so if there are too many slices it'll increase the width\r\n# of the range.\r\nslices = int(os.getenv(\"MIGRATOR_WORKERS\", '4'))\r\nyears_at_a_go = int(os.getenv(\"YEAR_CHUNKS\", '20'))\r\n\r\nc_year = s_year\r\nwhile c_year <= e_year:\r\n range_end = c_year + (years_at_a_go - 1)\r\n if range_end > e_year:\r\n range_end = e_year\r\n range_start = c_year\r\n\r\n print(\"{} --> {}\".format(c_year, range_end))\r\n c_year += years_at_a_go\r\n\r\n start_date = \"{}-01-01\".format(range_start)\r\n end_date = \"{}-12-31\".format(range_end)\r\n sd = datetime.fromtimestamp(time.mktime(time.strptime(start_date, '%Y-%m-%d')))\r\n ed = datetime.fromtimestamp(time.mktime(time.strptime(end_date, '%Y-%m-%d')))\r\n days = (ed - sd).days\r\n day_slice = math.floor(days / slices)\r\n ranges = [] # {start: x, end: y}\r\n for x in range(0, slices):\r\n if x == 0:\r\n start = sd\r\n else:\r\n start = ranges[x - 1]['end'] + timedelta(days=1)\r\n\r\n if start > ed:\r\n break\r\n\r\n if x == slices - 1:\r\n end = ed\r\n else:\r\n end = start + timedelta(days=day_slice)\r\n\r\n if end > ed:\r\n end = ed\r\n\r\n ranges.append({\r\n 'start': start,\r\n 'end': end\r\n })\r\n\r\n print(\"{} ranges\".format(len(ranges)))\r\n for r in ranges:\r\n name = \"Check {} -> {}\".format(r['start'].strftime('%Y-%m-%d'), r['end'].strftime('%Y-%m-%d'))\r\n print(\" \" + name)\r\n p = Process(target=check,\r\n args=(config, r['start'].strftime('%Y-%m-%d'), r['end'].strftime('%Y-%m-%d')),\r\n name=name)\r\n p.start()\r\n\r\n\r\n\r\n#\r\n#\r\n# day_slice = math.floor(days / slices)\r\n# #print(days / slices)\r\n# #print(day_slice)\r\n#\r\n\r\n\r\n\r\n","sub_path":"check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"66261343","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\nfrom setuptools import setup, find_packages\n\nwith open('requirements.txt') as f:\n requirements = f.read().splitlines()\n\nwith open('README.md') as f:\n readme = f.read()\n\nsetup(name='WormDemo',\n version='1.0',\n description='learn python',\n long_description=readme,\n author='wyh',\n author_email='609223770@qq.com',\n url='https://github.com/XiaoWuTongZhi/PythonWormDemo',\n packages=find_packages(),\n install_requires=requirements\n )","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"442644256","text":"# density based topology optimization of cantilever\n\nfrom matplotlib import interactive\nimport numpy as np\nfrom fenics import *\nimport matplotlib.pyplot as plt\n\n## Global settings\nplt.ion()\nplt.show()\nplt.rcParams['image.cmap'] = 'jet'\n\nset_log_active(False)\n\nTOL = 1e-4\n\n## Constants\ndim = 2\n\nLx = 4.0\nLy = 1.0\n\nload_width = 0.05*Ly\n\nE_ = 1.0\nnu_ = 0.33\n\nlambda_ = E_/(2.0*(1 + nu_))\nmu_ = E_*nu_/((1 + nu_)*(1 - 2*nu_))\n\neps_void = 1e-3\n\n## External loads\ntx = 0.0\nty = -1e-1\n\nbx = 0.0\nby = 0.0\n\n## Build mesh\nNx = 100\nNy = 25\nmesh = RectangleMesh(\n Point(0.0,0.0), Point(Lx,Ly), Nx, Ny, \"crossed\"\n)\n# plot(mesh)\n\n## Initialize finite element spaces\nVs = FunctionSpace(mesh, 'P', 1)\nV = VectorFunctionSpace(mesh, 'P', 1)\n\n## Dummy test functions\nv = TestFunction(V)\nvs = TestFunction(Vs)\n\n## Mark facets for identifying BCs\nfacets = MeshFunction('size_t', mesh, 1)\nfacets.set_all(0)\n\nds = Measure('ds', domain=mesh, subdomain_data=facets)\n\n## Set up Dirichlet boundary condition\nclass Clamped(SubDomain):\n def inside(self, x, on_boundary):\n return (\n on_boundary and \n near(x[0], 0.0, TOL)\n )\n\nclamped = Clamped()\nclamped.mark(facets, 1)\ndbc = DirichletBC(V, Constant((0.0, 0.0)), clamped)\n\n## Set up external load\nclass Load(SubDomain):\n def inside(self, x, on_boundary):\n return (\n on_boundary and\n near(x[0], Lx, TOL) and\n near(x[1], 0.5*Ly, load_width)\n ) \n\nload = Load()\nload.mark(facets, 2)\n\n## Thickness distribution\nh0 = 0.2\nh = interpolate(Constant(h0), Vs)\nhfrac = assemble(h*dx(mesh))\n\nhmin = 0.0\nhmax = 1.0\n\n## Lagrange multipliers\nl0 = -0.5*h0\nl1 = 1.5*h0\ndl = 0.1*h0\nlerr = 1e-3\n\n## Interpolating function\ndef zeta(t):\n return t*t*t\n\ndef d_zeta(t):\n return 3*t*t\n\n## Linear elastic strain\ndef epsilon(u):\n return (0.5*(grad(u) + grad(u).T))\n\n## Linear elastic stress\ndef sigma(u, h):\n damage = (zeta(h) + (1.0 - zeta(h))*eps_void)\n return damage*(lambda_*tr(epsilon(u))*Identity(dim) + 2*mu_*epsilon(u))\n\n## Derivative of objective function with respect to thickness\ndef d_obj(u, p, h):\n epsu = epsilon(u)\n epsp = epsilon(p)\n sig = lambda_*tr(epsu)*Identity(dim) + 2*mu_*epsu\n hfact = (1.0 - eps_void)*d_zeta(h)\n return hfact*inner(sig,epsp)\n\n## Primal problem\nT = Constant((tx,ty))\nb = Constant((bx,by))\nL = dot(b,v)*dx + dot(T,v)*ds(2)\n\ndef primal(h):\n u = TrialFunction(V)\n a = inner(sigma(u, h), epsilon(v))*dx\n u = Function(V)\n solve(a == L, u, dbc)\n return u\n\n# ## Adjoint problem\n# Ta = Constant((-tx,-ty))\n# ba = Constant((-bx,-by))\n# La = dot(ba,v)*dx + dot(Ta,v)*ds(2)\n\n# def adjoint(h):\n# u = TrialFunction(V)\n# a = inner(sigma(u, h), epsilon(v))*dx\n# u = Function(V)\n# solve(a == La, u, dbc)\n# return u\n\n## Regularization for h\nalfah = 0.01\nhr = TrialFunction(Vs)\nah = ((alfah**2)*dot(grad(hr),grad(vs))+ hr*vs)*dx \n\ndef regularize_h(h): \n L = h*vs*dx\n hr = Function(Vs)\n solve(ah == L, hr)\n return hr\n\n## Utility functions\ndef max(a, b):\n return (a + b + abs(a - b))/2\n\ndef min(a, b):\n return (a + b - abs(a - b))/2\n\n## Open files to record output\nfobj = open(\"objective_fn_pgd.dat\", 'w')\nfh = open(\"volume_pgd.dat\", 'w')\n\n#### Dump volume fraction\nhvol = assemble(h*dx(mesh))\nfh.write('%d\\t%f\\n' % (0, (hvol/hfrac)))\n\n#### Dump objective function\nu = primal(h)\nJ = assemble(dot(b,u)*dx + dot(T,u)*ds(2))\nfobj.write('%d\\t%f' % (0, J))\n\n## Optimization loop\ndt = 1.0 #0.25\nmax_iter = 250\nskip = 20\n\nu_vtk = File('cantilever_deflection_pgd.pvd')\nh_vtk = File('cantilever_pgd.pvd')\n\nfor iter in range(max_iter + 1):\n #### Solve primal and adjoint problems\n u = primal(h)\n # p = adjoint(h)\n\n #### Compute gradient of objective function\n # dJ = d_obj(u,p,h)\n dJ = -d_obj(u,u,h)\n\n #### Update h\n h = h - dt*dJ\n\n #### Enforce constraints by projection\n ###### Choose initial values of l0 and l1\n proj0 = assemble(max(hmin, min(hmax, h + l0))*dx(mesh))\n proj1 = assemble(max(hmin, min(hmax, h + l1))*dx(mesh))\n\n while proj0 > hfrac:\n l0 -= dl\n proj0 = assemble(max(hmin, min(hmax, h + l0))*dx(mesh))\n\n while proj1 < hfrac:\n l1 += dl\n proj1 = assemble(max(hmin, min(hmax, h + l1))*dx(mesh))\n\n ###### Bisection algorithm\n while (l1 - l0) > lerr:\n lmid = (l0 + l1)/2\n projmid = assemble(max(hmin, min(hmax, h + lmid))*dx(mesh))\n\n if projmid < hfrac:\n l0 = lmid\n proj0 = projmid\n else:\n l1 = lmid\n proj1 = projmid\n\n h = max(hmin, min(hmax, h + lmid))\n\n # h = max(hmin, min(hmax, h))\n h = regularize_h(h)\n\n #### Dump volume fraction\n hvol = assemble(h*dx(mesh))\n fh.write('%d\\t%f\\n' % ((iter + 1), (hvol/hfrac)))\n # hvol = assemble(h*dx(mesh))/(Lx*Ly)\n # fh.write('%d\\t%f\\n' % ((iter + 1), hvol))\n\n #### Dump objective function\n J = assemble(dot(b,u)*dx + dot(T,u)*ds(2))\n fobj.write('%d\\t%f\\n' % ((iter + 1), J))\n\n print(f'Iteration {iter + 1}: {J}')\n\n plot(h)\n plt.pause(0.0001)\n\n if iter % skip == 0:\n u.rename('u','u')\n h.rename('h','h')\n u_vtk << (u, iter)\n h_vtk << (h, iter)\n\n## Close files\nfobj.close()\nfh.close()\n","sub_path":"cantilever_pgd.py","file_name":"cantilever_pgd.py","file_ext":"py","file_size_in_byte":5239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"135358789","text":"# encoding:utf-8\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.linear_model import Lasso\nimport scipy as sp\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.feature_selection import VarianceThreshold\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn.feature_selection import RFE\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.linear_model import Ridge\nfrom sklearn.metrics import r2_score\nfrom sklearn.svm import *\n#import GeneSVM\nAllData = 'C:/Model/Source/Newgenotype.dat'\nData810 = 'C:/Model/Source/810feature.dat'\nData810_3bit = 'C:/Model/Source/810feature_3bit.dat'\nData430_3bit = 'C:/Model/Source/430feature_3bit.dat'\nData190_3bit = 'C:/Model/Source/190feature_3bit.dat'\nData10_3bit = 'C:/Model/Source/10feature_3bit.dat'\nData2_3bit = 'C:/Model/Source/2feature_3bit.dat'\nData2 = 'C:/Model/Source/2feature.dat'\nData430 = 'C:/Model/Source/430feature.dat'\nData190 = 'C:/Model/Source/190feature.dat'\nData10 = 'C:/Model/Source/10feature.dat'\nData800 = 'C:/Model/Source/800feature.dat'\nData80 = 'C:/Model/Source/80feature_3bit.dat'\nAllData_3bit = 'C:/Model/Source/Newgenotype_3bit.dat'\ngeneLocationData800 = 'C:/Model/Source/gene_info/gene_Data800.dat'\ngeneLocationData80 = 'C:/Model/Source/gene_info/gene_Data80.dat'\ndef MyData(Name):\n resultf = pd.read_csv('C:/Model/Source/phenotype.txt', delimiter=' ', header=None)\n dfAll = pd.read_csv(Name, delimiter=' ')\n return dfAll,resultf\ndef Cut():\n #dfAll ,resultf= MyData(Data10_3bit)\n #dfAll ,resultf= Univariate()\n dfAll , resultf = Univariate()\n df1 = pd.DataFrame(dfAll.iloc[0:400, :])\n df1 = df1.append(dfAll.iloc[500:900, :])\n df2 = pd.DataFrame(dfAll.iloc[400:500, :])\n df2 = df2.append(dfAll.iloc[900:1000, :])\n result1 = pd.DataFrame(resultf.iloc[0:400, :])\n result2 = pd.DataFrame(resultf.iloc[400:500, :])\n result1 = result1.append(resultf.iloc[500:900, :])\n result2 = result2.append(resultf.iloc[900:1000, :])\n return df1,df2,result1,result2\ndef SVR():\n print(\"SVR\")\n dfAll,resultf = MyData(Data190_3bit)\n print(dfAll.shape ,resultf.shape)\n estimator = LinearRegression()\n print(\"classifier finished\")\n selector = RFE(estimator,4, step=1)\n print(\"selector finished\")\n selector.fit(dfAll,resultf)\n print(\"selector fitting\")\n df2 = selector.transform(dfAll)\n featureNum = []\n for i in range(0,len(selector.support_),1):\n if(selector.support_[i]==True):\n print(\"数目:%s\"%dfAll.columns[i])\n featureNum.append(i)\n dfAll =dfAll.iloc[:,featureNum]\n # MyNewData(featureNum,Data190,str(len(featureNum))+'feature.dat')\n return dfAll,resultf\ndef MyNewData(arr,Data,Name):\n dfAll = pd.read_csv(Data, delimiter=' ')\n resultf = resultf = pd.read_csv('C:/Model/Source/phenotype.txt', delimiter=' ', header=None)\n Newdf = dfAll.iloc[:,arr]\n print(Newdf)\n Newdf.to_csv(path_or_buf='C:/Model/Source/'+Name,sep=' ',head=True,index=False)","sub_path":"Optimization/SVR.py","file_name":"SVR.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"372081386","text":"class Filter:\n\n def __init__(self, func, li_):\n self.func = func\n self.li_ = li_\n self.ind = 0\n\n def __iter__(self):\n return self\n\n def __next__(self):\n for _ in range(len(self.li_)):\n if self.func(self.li_[self.ind]) and self.ind < len(self.li_):\n res = self.li_[self.ind]\n self.ind += 1\n return res\n elif not self.func(self.li_[self.ind]):\n self.ind += 1\n res = None\n del res\n else:\n raise StopIteration\n\n \"\"\"полуить срез и значение по ключю\"\"\"\n def __getitem__(self, key):\n if isinstance(key, int) and key < len(self.li_):\n return self.li_[key]\n elif isinstance(key, slice):\n return self.li_[key.start:key.stop:key.step]\n else:\n raise IndexError\n\n\nli = [1, 2, 3, -4, 1, -5, -6, 99, 123, -98, 78]\nfilter_ = Filter(lambda x: x > 0, li)\n# List out of range. Fix later.\ntry:\n for el in filter_:\n print(el)\nexcept IndexError:\n print(\"need to Fix\")\n\nprint(filter_[1:4:2])\nprint(filter_[6])","sub_path":"Task17/2_17_2practice.py","file_name":"2_17_2practice.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"188800774","text":"import dash\nimport dash_html_components as html\nimport dash_core_components as dcc\nfrom dash.dependencies import Input, Output, State\nimport plotly.graph_objects as go\nimport plotly.express as px\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport pandas as pd\nimport pandas_datareader as pdr\nimport requests\nimport json\nfrom datetime import datetime, timedelta\n\ncmps_all = pd.read_csv('https://raw.githubusercontent.com/wieckiewiczpiotr/sample/master/WSE_metadata.csv', \n index_col='code')\ncmps_all['name'] = cmps_all['name'].str.split(',', expand=True)[0].str.split(\n 'for', expand=True)[1].str.lstrip()\ncmps_all['name'] = cmps_all['name'].fillna(cmps_all.index.to_series())\n\ncmps_current = cmps_all[cmps_all['to_date'] == cmps_all['to_date'].max()].copy()\ncmps_outdated = cmps_all[(cmps_all['to_date'] != cmps_all['to_date'].max())].copy()\n\ndropdown_opts_all = [{'label':row[0], 'value':index} \n for index, row in cmps_all.iterrows()]\ndropdown_opts_current = [{'label':row[0], 'value':index} \n for index, row in cmps_current.iterrows()]\n\nreq = requests.get('http://api.nbp.pl/api/exchangerates/tables/a/')\ncurr_list = pd.DataFrame(data=json.loads(req.content)[0]['rates'])\n\ndropdown_currs = [{'label':'{} ({})'.format(row[0], row[1]), \n 'value':row[1]} for index, row in curr_list.iterrows()]\n\ntab_style = {'line-height':'6vh', \n 'padding': '0', \n 'backgroundColor':'#35353b'}\ntab_style_selected = {'line-height':'6vh', \n 'padding': '0', \n 'backgroundColor':'#434247', \n 'color':'white'}\nindices = ['WIG30', 'MWIG40', 'SWIG80', 'WIG_GAMES', 'WIG_BANKI', 'WIG_SPOZYW']\ncurrencies = ['USD', 'EUR', 'CHF', 'GBP']\ncolors = px.colors.cyclical.HSV\ncolors_curr = px.colors.cyclical.Phase\ntext = '''\nThis dashboard is used to check the current and historical value of shares \non the Warsaw Stock Exchange. It is possible to check several \nprices at the same time and compare with the candlestick chart. The top of \nthe page current values of selected stock indexes are displayed \n(refreshed every 2 minutes, given in real time) and current \nexchange rates, updated every hour.\n'''\ntext_source = '''\n#### Source for data\n* index values: [webscraping from stooq.pl](https://stooq.pl/)\n* exchange rates: [NBP web API](https://api.nbp.pl/) for historical data and \n[The Free Currency Converter API](https://free.currencyconverterapi.com/) \nfor current data, updated every hour\n* main stock data: [Quandl](https://www.quandl.com/)\n'''\ntext_curr = '''\nDue to the fact that it is difficult to find a free source of \nhistorical exchange rates, this part of the dashboard has a specific \ndate range up to only **one year back** from the last business day. \nIn any case, it is still possible to choose from a wide range of \ncurrencies made available by the National Bank of Poland.\n'''\ntext_markowitz = '''\n## Markowitz portfolio selection\nThe model describes the relatioship between **risk** and **return** on \ngiven investment. By randomly selecting sets of weights for constructing a \nportfolio of selected stocks the most efficient portfolio of the given \nsecurities may be found. Each dot on the graph below symbolize a diffrent \nsets of weights of selected stocks in a investment portfolio. Generally \nspeaking, the best portoflio is the one with the highest **Sharpe Ratio**. \nReturn and volatility (risk) is calculated based on selected \nstocks and time period.\n\n**Bear in mind that the more sets of weights and the longer \ntime period the longer the graph will load.**\n'''\n\napp = dash.Dash(__name__)\nserver = app.server\napp.layout = html.Div(children=[\n html.Div(children=[\n html.Div(\n className='eight columns div-graphs',\n children=[dcc.Interval(id='interval', interval=1000*60*2, n_intervals=0),\n dcc.Interval(id='interval_curr', interval=1000*60*15, n_intervals=0),\n html.Div([dcc.Loading(id='loading_indices', \n children=[html.Div([dcc.Graph(id='indices')], \n style={'height':70, \n 'borderRadius': '15px',\n 'border': '1px solid #35353b',\n 'overflow': 'hidden'}),\n html.Div(id='upd_indx',\n style={'textAlign':'left'})\n ])\n ], style={'marginBottom':20}),\n dcc.Tabs([\n dcc.Tab(label='Stocks', children=[\n html.Div([\n dcc.Loading(id='loading_line', \n children=[html.Div([\n dcc.Graph(id='line-graph')],\n style={'height':300, \n 'marginTop':40})]\n )]),\n html.Div([\n dcc.Loading(id='loading_candle', \n children=[html.Div([\n dcc.Graph(id='candle-graph')],\n style={'height':400, \n 'marginTop':40})]\n )])], style=tab_style, \n selected_style=tab_style_selected),\n dcc.Tab(label='Currencies', children=[\n html.Div([dcc.Markdown(text_curr)], \n style={'textAlign': 'justify', \n 'marginTop':'20px',\n 'marginBottom':'30px'}),\n html.Div([\n html.Div([dcc.Dropdown(\n id='menu_currs',\n options=dropdown_currs,\n multi=True,\n value=['USD', 'EUR', 'CHF', 'GBP'],\n placeholder='Select one or more currencies',\n style={'marginTop':'10px', \n 'backgroundColor': '#434247', \n 'color': 'black'})], \n style={'textAlign': 'left'}),\n dcc.Loading(id='loading_curr_graph',\n children=[html.Div([\n dcc.Graph(id='curr-graph')],\n style={'marginTop':'40px'})]\n )])], \n style=tab_style, \n selected_style=tab_style_selected),\n \n dcc.Tab(label='Portfolio selection', children=[\n html.Div([dcc.Markdown(text_markowitz)], \n style={'textAlign': 'justify', \n 'marginTop':'20px',\n 'marginBottom':'30px'}),\n html.Div([\n dcc.Slider(id='slider',\n min=100,\n max=8000,\n step=100,\n value=3000)]),\n \n html.Div(id='slider_output', \n style={'textAlign': 'left'}),\n \n dcc.Loading(id='loading_sharpe',\n children=[html.Div([\n dcc.Graph(id='sharpe')],\n style={'marginTop':'40px'})]\n )], \n style=tab_style, \n selected_style=tab_style_selected)\n ], \n style={'height':'50px', \n 'fontSize': 20, \n 'marginTop':'1px'},\n colors={'border': '#434247', \n 'primary': 'red', \n 'background': '#212020'})], \n style={'textAlign': 'center'}), \n html.Div(\n className='four columns div-controls controls',\n children=[dcc.Loading(id='loading_currencies', \n children=html.Div([dcc.Graph(id='currencies')], \n style={'height':60, \n 'marginTop': 5})),\n html.H1(\"Investor's dashboard\"), \n html.Div(dcc.Markdown(text), style={'textAlign':'justify'}),\n html.Div(\n dcc.DatePickerRange(\n id='date-picker',\n min_date_allowed=cmps_all['to_date'].min(),\n max_date_allowed=datetime.today().date(),\n start_date=datetime.today().date()-timedelta(days=365),\n end_date=datetime.today().date(),\n display_format='DD MMM YYYY',\n style={'marginTop':'30px', \n 'marginBottom': '20px'}), \n style={'textAlign':'center'}),\n html.Div(\n html.Label(['Select one or multiple stocks',\n dcc.Dropdown(\n id='dropdown',\n options=dropdown_opts_current,\n multi=True,\n value=['KGHM', 'KRUK', 'TSGAMES'],\n placeholder='Select one or more companies',\n style={'marginTop':'10px', \n 'backgroundColor': '#35353b', \n 'color': 'black'})]), \n style={'marginTop':'25px'}),\n html.Div(\n dcc.Checklist(\n id='check',\n options=[\n {'label': 'Show historically listed companies in the menu above', \n 'value': 'all'}],\n labelStyle={'display': 'inline-block'},\n style={'marginTop':'15px'})),\n html.Div(\n html.Label(['Choose one of the stocks above for the candlestick chart',\n dcc.Dropdown(\n id='candle_dropdown',\n value='KRUK',\n placeholder='Select a company',\n style={'marginTop':'10px', \n 'color': 'black'})]), \n style={'marginTop':'30px'}),\n html.Div([html.Button(id='apply_button', \n children='Apply changes', \n n_clicks=0,\n style={'fontSize': 22})],\n style={'textAlign': 'center', \n 'marginTop':'30px',\n 'marginBottom':'10px'}),\n html.Div([dcc.Markdown(text_source)], \n style={'textAlign': 'justify',\n 'marginBottom':'10px',\n 'marginTop':'70px'})])])])\n#--------------------------------------------------------------------------------------------------------------\n#callback for updating menu for candlestick chart\n@app.callback(Output('candle_dropdown', 'options'),\n [Input('dropdown', 'value')])\ndef update_candle_menu(value):\n options = [{'label':tick, 'value':tick} for tick in value]\n return options\n\n#callback for showing last update\n@app.callback(Output('upd_indx', 'children'),\n [Input('interval', 'n_intervals')])\ndef update_upd_indx(n):\n return 'Last update: {}'.format(datetime.now().strftime('%H:%M:%S'))\n\n#callback for updating indices top of the app\n@app.callback(Output('indices', 'figure'),\n [Input('interval', 'n_intervals')])\ndef update_indices(n):\n rates = []\n changes = []\n traces = []\n try:\n req = requests.get(\"https://stooq.pl/t/?i=528\")\n soup = BeautifulSoup(req.text, 'html.parser')\n for index in indices:\n rates.append(float(soup.find(text=index).next_element.next_sibling.text))\n try:\n changes.append(float(soup.find(text=index).next_element.next_sibling.next_sibling.next_sibling.text))\n except:\n continue\n\n if len(changes) == len(indices):\n for number, index, rate, change in zip(range(len(indices)), indices, rates, changes):\n traces.append(go.Indicator(\n mode='number+delta',\n title=index,\n title_font_size=14,\n number_font_size=20,\n value=rate,\n delta={'reference': rate-change, \n 'relative': True, \n 'valueformat': '.2%'},\n domain={'row': 1, 'column': number}))\n else:\n for number, index, rate in zip(range(len(indices)), indices, rates):\n traces.append(go.Indicator(\n mode='number',\n title=index,\n title_font_size=14,\n number_font_size=20,\n value=rate,\n domain={'row': 1, \n 'column': number}))\n figure = {'data': traces,\n 'layout': go.Layout(\n grid={'rows': 1, \n 'columns': len(indices), \n 'pattern': \"independent\"},\n margin=dict(t=30, b=0, l=0, r=0),\n plot_bgcolor='#35353b',\n paper_bgcolor='#35353b',\n font={'color': '#d8d8d8'},\n height=70)}\n return figure\n \n except: \n try:\n inds = ['WIG', 'WIG20', 'MWIG40', 'SWIG80', 'WIG_ODZIEZ', 'WIG_LEKI']\n traces = []\n for index, number in zip(inds, range(len(inds))):\n code = 'WSE/' + index\n df = pdr.get_data_quandl(\n code, \n api_key='fHsXs9kzqak6UF1haCww', \n start=datetime.today().date()-timedelta(days=5))\n traces.append(go.Indicator(\n mode='number',\n title=index,\n title_font_size=14,\n number_font_size=20,\n value=df['Close'][0],\n domain={'row': 1, \n 'column': number}))\n figure = {'data': traces,\n 'layout': go.Layout(\n grid={'rows': 1, \n 'columns': len(inds), \n 'pattern': \"independent\"},\n margin=dict(t=30, b=0, l=0, r=0),\n plot_bgcolor='#35353b',\n paper_bgcolor='#35353b',\n font={'color': '#d8d8d8'},\n height=70)}\n return figure\n except:\n figure = {\n 'data': [go.Indicator(title='Could not connect to stooq to get index data',\n title_font_size=20,\n number_font_size=1)],\n 'layout': go.Layout(\n plot_bgcolor='#35353b',\n paper_bgcolor='#35353b',\n font={'color': 'tomato'},\n height=70)}\n return figure\n\n#callback for updating exchange rates at the top of the app\n@app.callback(Output('currencies', 'figure'),\n [Input('interval_curr', 'n_intervals')])\ndef update_exchange_rates(n):\n traces = []\n try:\n req1 = requests.get('https://free.currconv.com/api/v7/convert?q=USD_PLN,EUR_PLN&compact=ultra&apiKey=ed6303b119a9753725a8')\n req2 = requests.get('https://free.currconv.com/api/v7/convert?q=CHF_PLN,GBP_PLN&compact=ultra&apiKey=ed6303b119a9753725a8')\n usd = json.loads(req1.content)['USD_PLN']\n eur = json.loads(req1.content)['EUR_PLN']\n chf = json.loads(req2.content)['CHF_PLN']\n gbp = json.loads(req2.content)['GBP_PLN']\n curr = [usd, eur, chf, gbp]\n\n for name, rate, number in zip(currencies, curr, range(len(currencies))):\n traces.append(go.Indicator(\n mode='number',\n title=name,\n title_font_size=14,\n number_font_size=20,\n value=rate,\n number_valueformat = '.5f',\n domain={'row': 1, 'column': number}))\n\n figure = {'data': traces,\n 'layout': go.Layout(\n grid={'rows': 1, \n 'columns': len(currencies), \n 'pattern': \"independent\"},\n margin=dict(t=23, b=0, l=0, r=0),\n plot_bgcolor='#35353b',\n paper_bgcolor='#35353b',\n font={'color': '#d8d8d8'},\n height=60)}\n return figure\n except:\n try:\n req = requests.get('http://api.nbp.pl/api/exchangerates/tables/a/')\n usd = json.loads(req.content)[0]['rates'][1]['mid']\n eur = json.loads(req.content)[0]['rates'][7]['mid']\n chf = json.loads(req.content)[0]['rates'][9]['mid']\n gbp = json.loads(req.content)[0]['rates'][10]['mid']\n curr = [usd, eur, chf, gbp]\n\n for name, rate, number in zip(currencies, curr, range(len(currencies))):\n traces.append(go.Indicator(\n mode='number',\n title=name,\n title_font_size=14,\n number_font_size=20,\n value=rate,\n number_valueformat = '.4f',\n domain={'row': 1, 'column': number}))\n\n figure = {'data': traces,\n 'layout': go.Layout(\n grid={'rows': 1, \n 'columns': len(currencies), \n 'pattern': \"independent\"},\n margin=dict(t=23, b=0, l=0, r=0),\n plot_bgcolor='#35353b',\n paper_bgcolor='#35353b',\n font={'color': '#d8d8d8'},\n height=60)}\n return figure\n \n except:\n figure = {\n 'data': [go.Indicator(title='Could not connect to API to get exchange rates data',\n title_font_size=15,\n number_font_size=1)],\n 'layout': go.Layout(\n plot_bgcolor='#35353b',\n paper_bgcolor='#35353b',\n font={'color': 'tomato'},\n height=70)}\n return figure\n\n#callback for updating main ticks dropdown menu\n@app.callback(Output('dropdown', 'options'),\n [Input('check', 'value')])\ndef update_dropdown(value):\n if value == None or len(value) == 0:\n return dropdown_opts_current\n else:\n return dropdown_opts_all\n\n@app.callback(Output('slider_output', 'children'),\n [Input('slider', 'value')])\ndef update_output(value):\n return 'Number of sets: {}'.format(value)\n \n#callback for updating main time-series graph\n@app.callback(Output('line-graph', 'figure'),\n [Input('apply_button', 'n_clicks')],\n [State('dropdown', 'value'),\n State('date-picker', 'start_date'),\n State('date-picker', 'end_date')])\ndef update_line(n, ticks, start_date, end_date):\n traces = []\n for name, color in zip(ticks, colors):\n tick = 'WSE/' + name\n df = pdr.get_data_quandl(tick, \n api_key='fHsXs9kzqak6UF1haCww', \n start=start_date, \n end=end_date)\n traces.append({'x': df.index, \n 'y': df['Close'], \n 'name': name, \n 'mode': 'lines',\n 'line': dict(color=color)\n })\n figure = {'data': traces,\n 'layout': {'height': 300,\n 'title': 'Stock chart for {}'.format(', '.join(ticks)),\n 'yaxis': {'gridcolor': '#35353b'},\n 'xaxis': {'gridcolor': '#35353b'},\n 'plot_bgcolor': '#434247',\n 'paper_bgcolor': '#434247',\n 'margin': dict(t=40, b=30, l=60, r=60),\n 'font': {'color': '#d8d8d8'}}}\n return figure\n\n#callback for updating candlestick graph\n@app.callback(Output('candle-graph', 'figure'),\n [Input('apply_button', 'n_clicks')],\n [State('candle_dropdown', 'value'),\n State('date-picker', 'start_date'),\n State('date-picker', 'end_date')])\ndef update_candle(n, tick, start_date, end_date):\n code = 'WSE/' + tick\n df = pdr.get_data_quandl(code, \n api_key='fHsXs9kzqak6UF1haCww', \n start=start_date, \n end=end_date)\n figure = {'data':[go.Candlestick(\n x=df.index,\n open=df['Open'],\n close=df['Close'],\n low=df['Low'], \n high=df['High'],\n increasing_line_color= 'lime', \n decreasing_line_color= 'red')],\n 'layout': go.Layout(height=350,\n title='Candlestick chart for {}'.format(tick),\n plot_bgcolor='#434247',\n paper_bgcolor='#434247',\n yaxis={'gridcolor': '#35353b'},\n xaxis={'gridcolor': '#35353b'},\n font={'color': '#d8d8d8'},\n margin=dict(t=30, b=0, l=60, r=60))}\n return figure\n\n#callback for updating currencies chart\n@app.callback(Output('curr-graph', 'figure'),\n [Input('menu_currs', 'value')])\ndef update_curr_chart(currs):\n traces = []\n for curr, color in zip(currs, colors_curr):\n req = requests.get('http://api.nbp.pl/api/exchangerates/rates/a/{}/{}/{}/'.format(\n curr,\n datetime.today().date()-timedelta(days=364), \n datetime.today().date()))\n df = pd.DataFrame(data=json.loads(req.content)['rates'])\n\n traces.append({'x': df['effectiveDate'], \n 'y': df['mid'], \n 'name': curr, \n 'mode': 'lines',\n 'line': dict(color=color)})\n figure = {'data': traces,\n 'layout': {\n 'title': 'Echange rates for {}'.format(', '.join(currs)),\n 'yaxis': {'gridcolor': '#35353b'},\n 'xaxis': {'gridcolor': '#35353b'},\n 'plot_bgcolor': '#434247',\n 'paper_bgcolor': '#434247',\n 'margin': dict(t=30, b=30, l=60, r=60),\n 'font': {'color': '#d8d8d8'}}}\n return figure\n#callback for updating currencies chart\n@app.callback(Output('sharpe', 'figure'),\n [Input('apply_button', 'n_clicks')],\n [State('dropdown', 'value'),\n State('slider', 'value'),\n State('date-picker', 'start_date'),\n State('date-picker', 'end_date')\n ])\ndef update_sharpe(n, ticks, tries, start_date, end_date):\n if len(ticks) >= 2:\n df = pd.DataFrame()\n ret_arr = np.zeros(tries)\n vol_arr = np.zeros(tries)\n sr_arr = np.zeros(tries)\n\n for name in ticks:\n tick = 'WSE/' + name\n df_tick = pdr.get_data_quandl(tick, \n api_key='fHsXs9kzqak6UF1haCww', \n start=start_date, \n end=end_date)\n df_tick.rename({'Close': name}, axis=1, inplace=True)\n df = df.join(df_tick[name], how='outer')\n\n logs = np.log(df/df.shift(-1))\n all_weights = np.zeros((tries, len(logs.columns)))\n\n for i in range(tries):\n weights = np.array(np.random.random(len(logs.columns)))\n weights = weights/np.sum(weights)\n all_weights[i,:] = weights\n ret_arr[i] = np.sum(logs.mean() * weights * len(logs))\n vol_arr[i] = np.sqrt(np.dot(weights.T, np.dot(logs.cov() * len(logs), weights)))\n sr_arr[i] = ret_arr[i] / vol_arr[i]\n\n weights_as_string = []\n for weight in all_weights:\n weights_as_string.append(', '.join(str(round(i, 3)) for i in weight))\n\n df = pd.DataFrame({'returns': ret_arr,\n 'volatility': vol_arr,\n 'sharpe': sr_arr,\n 'weights': weights_as_string})\n\n figure = px.scatter(df, x='volatility', \n y='returns', \n color='sharpe',\n color_continuous_scale='RdPu_r',\n hover_data=['weights'])\n \n figure.update_traces(marker=dict(size=9, line=dict(width=1)))\n\n figure.update_layout(\n title='Portfolio allocation for {}'.format(', '.join(ticks)),\n yaxis={'gridcolor': '#35353b'},\n xaxis={'gridcolor': '#35353b'},\n xaxis_title='Volatility',\n yaxis_title='Returns',\n plot_bgcolor='#434247',\n paper_bgcolor='#434247',\n margin=dict(t=40, b=30, l=60, r=60),\n font={'color': '#d8d8d8'})\n return figure","sub_path":"financial_dashboard/financial.py","file_name":"financial.py","file_ext":"py","file_size_in_byte":27661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"256072998","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2015 Red Hat, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport flask\nimport json\nimport os\nfrom pprint import pprint\nimport sys\n\nimport server.auth\nimport server.db.models\nimport server.utils\n\nfrom eve import Eve\nfrom eve_sqlalchemy import SQL\nfrom eve_sqlalchemy.validation import ValidatorSQL\nfrom flask import abort\nfrom sqlalchemy.sql import text\n\nfrom dci_databrowser import dci_databrowser\n\n\ndef site_map():\n for rule in app.url_map.iter_rules():\n pprint(rule)\n\n\ndef load_docs(app):\n try:\n from eve_docs import eve_docs\n from flask.ext.bootstrap import Bootstrap\n Bootstrap(app)\n app.register_blueprint(eve_docs, url_prefix='/docs')\n except ImportError:\n print(\"Failed to load eve_docs.\")\n\n\ndef set_real_owner(resource, items):\n \"\"\"Hack to allow the 'admin' user to change the team_id.\"\"\"\n if flask.request.authorization.username != 'admin':\n return\n # NOTE(Gonéri): the fields returned by flask.request.get_json() are\n # already mangled by the Role Based Access Control.\n request_fields = json.loads(flask.request.data.decode('utf-8'))\n if \"team_id\" in request_fields:\n items[0]['team_id'] = request_fields['team_id']\n\n\nclass DciControlServer(Eve):\n\n _DCI_MODEL = None\n\n def __init__(self, dci_model, **kwargs):\n super(DciControlServer, self).__init__(**kwargs)\n\n DciControlServer._DCI_MODEL = dci_model\n DciControlServer._DCI_MODEL.metadata.bind = DciControlServer.\\\n _DCI_MODEL.engine\n self._db = self.data.driver\n self._db.Model = DciControlServer._DCI_MODEL.base\n self._init_hooks()\n\n @staticmethod\n def pick_jobs(documents):\n session = DciControlServer._DCI_MODEL.get_session()\n query = text(\"\"\"\n SELECT\n testversions.id\n FROM\n testversions, remotecis\n WHERE testversions.id NOT IN (\n SELECT\n jobs.testversion_id\n FROM jobs\n WHERE jobs.remoteci_id=:remoteci_id\n AND\n jobs.created_at > now() - interval '1 day'\n ) AND testversions.test_id=remotecis.test_id AND\n remotecis.id=:remoteci_id\n LIMIT 1\n \"\"\")\n\n for d in documents:\n if 'testversion_id' in d:\n continue\n r = DciControlServer._DCI_MODEL.engine.execute(\n query, remoteci_id=d['remoteci_id']).fetchone()\n if r is None:\n abort(412, \"No test to run left.\")\n testversion = session.query(\n DciControlServer._DCI_MODEL.base.classes.testversions).\\\n get(str(r[0]))\n d['testversion_id'] = testversion.id\n session.close()\n\n @staticmethod\n def stop_running_jobs(documents):\n session = DciControlServer._DCI_MODEL.get_session()\n Jobs = DciControlServer._DCI_MODEL.base.classes.jobs\n Jobstates = DciControlServer._DCI_MODEL.base.classes.jobstates\n for d in documents:\n jobs = session.query(Jobs).filter(\n Jobs.remoteci_id == d['remoteci_id']).all()\n for job in jobs:\n jobstate = job.jobstates.filter(\n Jobstates.job_id == job.id).first()\n if jobstate.status == 'ongoing':\n session.add(\n Jobstates(\n job_id=job.id,\n status='unfinished',\n comment='The remoteci has started a new job.',\n team_id=d['team_id']))\n session.commit()\n session.close()\n\n @staticmethod\n def aggregate_job_data(response):\n session = DciControlServer._DCI_MODEL.get_session()\n data = {}\n job = session.query(DciControlServer._DCI_MODEL.base.classes.jobs).\\\n get(response['id'])\n my_datas = (\n job.testversion.version.product.data,\n job.testversion.version.data,\n job.testversion.test.data,\n job.remoteci.data)\n for my_data in my_datas:\n if my_data:\n data = server.utils.dict_merge(data, my_data)\n session.close()\n response['data'] = data\n\n @staticmethod\n def get_jobs_extra(response):\n if not flask.request.args.get('extra_data'):\n return\n\n session = DciControlServer._DCI_MODEL.get_session()\n for job in response[\"_items\"]:\n extra_data = {}\n\n # Get the jobstate\n Jobstates = DciControlServer._DCI_MODEL.base.classes.jobstates\n jobstate = session.query(Jobstates).\\\n order_by(Jobstates.created_at.desc()).\\\n filter(Jobstates.job_id == job[\"id\"]).first()\n if jobstate:\n extra_data[\"last_status\"] = jobstate.status\n extra_data[\"last_update\"] = jobstate.created_at\n\n # Get the remote ci name\n Remotecis = DciControlServer._DCI_MODEL.base.classes.remotecis\n remoteci = session.query(Remotecis).\\\n filter(Remotecis.id == job[\"remoteci_id\"]).one()\n if remoteci:\n extra_data[\"remoteci\"] = remoteci.name\n\n # Get the testversion\n Testversions = DciControlServer._DCI_MODEL.base.classes.\\\n testversions\n testversion = session.query(Testversions).get(\n job[\"testversion_id\"])\n if testversion:\n # Get the version\n Versions = DciControlServer._DCI_MODEL.base.classes.versions\n version = session.query(Versions).get(testversion.version_id)\n if version:\n extra_data[\"version\"] = version.name\n\n # Get the product\n Products = DciControlServer._DCI_MODEL.base.classes.\\\n products\n product = session.query(Products).get(version.product_id)\n if product:\n extra_data[\"product\"] = product.name\n\n # Get the test\n Tests = DciControlServer._DCI_MODEL.base.classes.tests\n test = session.query(Tests).get(testversion.test_id)\n if test:\n extra_data[\"test\"] = test.name\n\n job[\"extra_data\"] = extra_data\n session.close()\n\n @staticmethod\n def get_versions_extra(response):\n if not flask.request.args.get('extra_data'):\n return\n\n session = DciControlServer._DCI_MODEL.get_session()\n versions_to_remove = []\n for version in response[\"_items\"]:\n version[\"extra_data\"] = []\n\n Testversions = DciControlServer._DCI_MODEL.base.classes.\\\n testversions\n testversions = session.query(Testversions).\\\n filter(Testversions.version_id == version[\"id\"]).all()\n\n for testversion in testversions:\n extra_data = {}\n\n Tests = DciControlServer._DCI_MODEL.base.classes.tests\n test = session.query(Tests).get(testversion.test_id)\n if test:\n extra_data[\"test\"] = test.name\n\n Jobs = DciControlServer._DCI_MODEL.base.classes.jobs\n job = session.query(Jobs).\\\n filter(Jobs.testversion_id == testversion.id).first()\n if job:\n extra_data[\"job_id\"] = job.id\n Remotecis = DciControlServer._DCI_MODEL.base.classes.\\\n remotecis\n remoteci = session.query(Remotecis).get(job.remoteci_id)\n if remoteci:\n extra_data[\"remoteci\"] = remoteci.name\n\n Jobstates = DciControlServer._DCI_MODEL.base.classes.\\\n jobstates\n jobstate = job.jobstates.filter(\n Jobstates.job_id == job.id).first()\n if jobstate:\n extra_data[\"status\"] = jobstate.status\n else:\n versions_to_remove.append(version)\n continue\n version[\"extra_data\"].append(extra_data)\n\n for version in versions_to_remove:\n response[\"_items\"].remove(version)\n session.close()\n\n @staticmethod\n def get_remotecis_extra(response):\n if not (flask.request.args.get('extra_data') and\n flask.request.args.get('version_id')):\n return\n\n version_id = flask.request.args.get('version_id')\n session = DciControlServer._DCI_MODEL.get_session()\n Remotecis = DciControlServer._DCI_MODEL.base.classes.remotecis\n remotecisTotal = session.query(Remotecis).count()\n\n rate = {\"success\": 0, \"failure\": 0, \"ongoing\": 0,\n \"not_started\": remotecisTotal}\n for remoteci in response[\"_items\"]:\n Testversions = DciControlServer._DCI_MODEL.base.classes.\\\n testversions\n testversions = session.query(Testversions).\\\n filter(Testversions.version_id == version_id).all()\n\n for testversion in testversions:\n Jobs = DciControlServer._DCI_MODEL.base.classes.jobs\n job = session.query(Jobs).\\\n filter((Jobs.testversion_id == testversion.id) and\n (Jobs.remoteci_id == remoteci[\"id\"])).first()\n if job:\n Jobstates = DciControlServer._DCI_MODEL.base.classes.\\\n jobstates\n jobstate = job.jobstates.filter(\n Jobstates.job_id == job.id).first()\n if jobstate:\n rate[jobstate.status] += 1\n rate[\"not_started\"] -= 1\n if rate[\"not_started\"] < 0:\n rate[\"not_started\"] = 0\n response[\"extra_data\"] = rate\n\n def _init_hooks(self):\n self.on_insert += set_real_owner\n self.on_insert_jobs += DciControlServer.pick_jobs\n self.on_insert_jobs += DciControlServer.stop_running_jobs\n self.on_fetched_item_jobs += DciControlServer.aggregate_job_data\n self.on_fetched_resource_jobs += DciControlServer.get_jobs_extra\n self.on_fetched_resource_versions += DciControlServer.\\\n get_versions_extra\n self.on_fetched_resource_remotecis += DciControlServer.\\\n get_remotecis_extra\n\n self.register_blueprint(dci_databrowser)\n load_docs(self)\n\n\ndef create_app(db_uri=None):\n if not db_uri:\n db_uri = os.environ.get(\n 'OPENSHIFT_POSTGRESQL_DB_URL',\n 'postgresql://boa:boa@127.0.0.1:5432/dci_control_server')\n dci_model = server.db.models.DCIModel(db_uri)\n settings = {\n 'SQLALCHEMY_DATABASE_URI': db_uri,\n 'LAST_UPDATED': 'updated_at',\n 'DATE_CREATED': 'created_at',\n 'ID_FIELD': 'id',\n 'ITEM_URL': 'regex(\"[\\.-a-z0-9]{8}-[-a-z0-9]{4}-'\n '[-a-z0-9]{4}-[-a-z0-9]{4}-[-a-z0-9]{12}\")',\n 'ITEM_LOOKUP_FIELD': 'id',\n 'ETAG': 'etag',\n 'DEBUG': True,\n 'URL_PREFIX': 'api',\n 'X_DOMAINS': '*',\n 'X_HEADERS': 'Authorization',\n 'DOMAIN': dci_model.generate_eve_domain_configuration(),\n # The following two lines will output the SQL statements\n # executed by SQLAlchemy. Useful while debugging and in\n # development. Turned off by default\n # --------\n 'SQLALCHEMY_ECHO': False,\n 'SQLALCHEMY_RECORD_QUERIES': False,\n }\n basic_auth = server.auth.DCIBasicAuth(dci_model)\n return DciControlServer(dci_model, settings=settings,\n validator=ValidatorSQL, data=SQL, auth=basic_auth)\n\n\nif __name__ == \"__main__\":\n port = 5000\n if len(sys.argv) > 1:\n port = int(sys.argv[1])\n app = create_app()\n site_map()\n app.run(debug=True, port=port)\n","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"208072940","text":"import torch\nfrom torch import nn\nfrom torchvision import datasets, transforms, models\nimport argparse\nfrom collections import OrderedDict\nimport json\n\n# Train a new network on a data set with train.py\n# Basic usage: python train.py data_directory\n# Prints out training loss, validation loss, and validation accuracy as the network trains\n\ndef main():\n args = setCommandArgs()\n model, model_name = build_model(args)\n train_model(args, model)\n save_model(args, model, model_name)\n\ndef train_model(command_args, model):\n trainloader, validloader, testloader = get_loaders(command_args)\n\n device = torch.device(\"cuda\" if command_args.gpu else \"cpu\")\n print(\"using device: {}. learning rate: {}\".format(device, command_args.learning_rate))\n\n model.to(device);\n criterion = nn.NLLLoss()\n\n optimizer = torch.optim.Adam(model.classifier.parameters(), lr=command_args.learning_rate)\n\n epochs = command_args.epochs\n running_loss = 0\n print_every = 25\n\n for epoch in range(epochs):\n steps = 0\n for inputs, labels in trainloader:\n steps += 1\n # Move input and label tensors to the default device\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n optimizer.zero_grad()\n\n logps = model.forward(inputs)\n loss = criterion(logps, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n running_test_loss = 0\n accuracy = 0\n model.eval() # set to eval mode to not use dropout\n with torch.no_grad():\n for inputs, labels in testloader:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n logps = model.forward(inputs)\n test_batch_loss = criterion(logps, labels)\n\n running_test_loss += test_batch_loss.item()\n\n # Calculate accuracy\n ps = torch.exp(logps)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n\n print(f\"Epoch {epoch+1}/{epochs} ({steps}/{len(trainloader)}): \"\n f\"Train loss: {running_loss/print_every:.3f}.. \"\n f\"Test loss: {running_test_loss/len(testloader):.3f}.. \"\n f\"Test accy: {100*accuracy/len(testloader):.3f}%\")\n\n running_loss = 0\n model.train() # set back to train mode\n\ndef save_model(command_args, model, model_name):\n with open('cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)\n\n checkpoint = {'input_size': [3, 224, 224],\n 'output_size': 103,\n 'features': model.features,\n 'classifier': model.classifier,\n 'state_dict': model.state_dict(),\n 'cat_to_name': cat_to_name,\n 'arch': command_args.arch}\n\n checkpoint_filename = f\"checkpoint_{model_name}.pth\"\n\n save_path = f\"{command_args.save_dir}/{checkpoint_filename}\"\n\n torch.save(checkpoint, save_path)\n\n print(f\"model checkpoint saved: {save_path}\")\n\n\ndef build_model(args):\n model, model_name = get_pretrained_model(args.arch)\n print(f\"using arch: {model_name}\")\n\n if(isinstance(model.classifier, torch.nn.modules.container.Sequential)):\n for i in range(len(model.classifier)):\n if isinstance(model.classifier[i], torch.nn.modules.linear.Linear):\n input_features = model.classifier[i].in_features\n break\n else:\n input_features = model.classifier.in_features\n\n for param in model.parameters():\n param.requires_grad = False\n\n # create a new layer to perform classifications. The dataset has 102 labels\n classifier = nn.Sequential(OrderedDict([\n ('drop1', nn.Dropout(0.2)),\n ('fc1', nn.Linear(input_features, args.hidden_units)),\n ('relu', nn.ReLU()),\n ('drop', nn.Dropout(0.2)),\n ('fc2', nn.Linear(args.hidden_units, 102)),\n ('output', nn.LogSoftmax(dim=1))]))\n\n # replace last layer with new trainable one\n model.classifier = classifier\n\n return model, model_name\n\ndef get_pretrained_model(arch):\n if arch == 'vgg16':\n model = models.vgg16(pretrained=True)\n model_name = 'vgg16'\n elif arch == 'alexnet':\n model = models.alexnet(pretrained=True)\n model_name = 'alexnet'\n else:\n model = models.densenet121(pretrained=True)\n print(f\"arch {arch} not found. Using default model.\");\n model_name = 'densenet121'\n return model, model_name\n\n\ndef get_loaders(command_args):\n data_dir = command_args.data_dir\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n test_dir = data_dir + '/test'\n\n train_transform = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n valid_transform = transforms.Compose([transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n test_transform = transforms.Compose([transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n # TODO: Load the datasets with ImageFolder\n train_dataset = datasets.ImageFolder(train_dir, transform=train_transform)\n valid_dataset = datasets.ImageFolder(valid_dir, transform=valid_transform)\n test_dataset = datasets.ImageFolder(test_dir, transform=test_transform)\n\n # TODO: Using the image datasets and the trainforms, define the dataloaders\n trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)\n validloader = torch.utils.data.DataLoader(valid_dataset, batch_size=64, shuffle=True)\n testloader = torch.utils.data.DataLoader(test_dataset, batch_size=64)\n\n return trainloader, validloader, testloader\n\ndef setCommandArgs():\n parser = argparse.ArgumentParser(\n description='Options for training a model'\n )\n\n parser.add_argument('data_dir', help='directory to train/test/validation data')\n parser.add_argument('--save_dir', action=\"store\", dest=\"save_dir\", required=True)\n parser.add_argument('--arch', action=\"store\", dest=\"arch\", default=\"vgg13\", help='valid architecures are vgg16, alexnet, and densenet121')\n parser.add_argument('--learning_rate', action=\"store\", dest=\"learning_rate\", default=0.01, type=float)\n parser.add_argument('--hidden_units', action=\"store\", dest=\"hidden_units\", default=512, type=int)\n parser.add_argument('--epochs', action=\"store\", dest=\"epochs\", default=20, type=int)\n parser.add_argument('--gpu', action=\"store_true\", dest=\"gpu\", default=False)\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n main()\n#python train.py flowers --save_dir . --learning_rate 0.001 --arch vgg16 --hidden_units 1000 --epochs 3 --gpu","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"602969526","text":"import scipy.io as sio\nimport numpy as np\nimport torch\n\n\n\n\ndef load_data(filename, batch_size):\n mat_content = sio.loadmat(filename)\n train_data = mat_content['train_data']\n train_data = train_data[0]\n train_label = mat_content['train_label']\n train_label = train_label[0]\n test_data = mat_content['test_data']\n test_data = test_data[0]\n test_label = mat_content['test_label']\n test_label = test_label[0]\n\n\n num_train = train_data.shape[0]\n num_test = test_data.shape[0]\n input_size = train_data[1].shape[1]\n print(input_size)\n\n train_itr = []\n\n for i in range(0,num_train, batch_size):\n if (i+batch_size)>num_train:\n current_data = train_data[i:num_train]\n current_label = train_label[i:num_train]\n else:\n current_data = train_data[i:i+batch_size]\n current_label = train_label[i:i+batch_size]\n data_length =[len(sample) for sample in current_data]\n pad_data = np.zeros((len(data_length), max(data_length), input_size))\n for i , sample_len in enumerate(data_length):\n sample = current_data[i]\n\n pad_data[i, 0:sample_len, :] = sample\n\n data = torch.from_numpy(pad_data).float()\n print('data: ', data.shape)\n label = torch.from_numpy(current_label)\n print('label: ', label.shape)\n current_batch = [data, label]\n train_itr.append(current_batch)\n\n\n test_itr = []\n\n for i in range(0,num_test, batch_size):\n if (i+batch_size)>num_test:\n current_data = test_data[i:num_test]\n current_label = test_label[i:num_test]\n else:\n current_data = test_data[i:i+batch_size]\n current_label = test_label[i:i+batch_size]\n data_length = [len(sample) for sample in current_data]\n pad_data = np.zeros((len(data_length), max(data_length), input_size))\n for i, sample_len in enumerate(data_length):\n sample = current_data[i]\n\n pad_data[i,0:sample_len, :] = sample\n\n data = torch.from_numpy(pad_data).float()\n print('data: ', data.shape)\n label = torch.from_numpy(current_label)\n print('label: ', label.shape)\n current_batch = [data, label]\n test_itr.append(current_batch)\n\n return train_itr, test_itr","sub_path":"Homework 3/Assignment_3 copy/code/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"420530643","text":"# -*- coding: utf-8 -*-\n\"\"\"\nflash-test (Brick/Bricklet/Extension Flash and Test tool)\nCopyright (C) 2015 Matthias Bolte <matthias@tinkerforge.com>\n\nbricklet_joystick.py: Joystick plugin\n\nThis program is free software; you can redistribute it and/or\nmodify it under the terms of the GNU General Public License\nas published by the Free Software Foundation; either version 2\nof the License, or (at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\nGeneral Public License for more details.\n\nYou should have received a copy of the GNU General Public\nLicense along with this program; if not, write to the\nFree Software Foundation, Inc., 59 Temple Place - Suite 330,\nBoston, MA 02111-1307, USA.\n\"\"\"\n\nfrom PyQt5 import Qt, QtGui, QtCore\n\nfrom ..tinkerforge.bricklet_joystick import BrickletJoystick\nfrom ..bricklet_base import BrickletBase, get_bricklet_firmware_filename\nfrom ..callback_emulator import CallbackEmulator\n\nLIMIT = 99\n\nclass Plugin(BrickletBase):\n TODO_TEXT = u\"\"\"\\\n1. Verbinde Joystick Bricklet mit Port C\n2. Drücke \"Flashen\"\n3. Warte bis Master Brick neugestartet hat (Tool Status ändert sich auf \"Plugin gefunden\")\n4. Überprüfe Joystick und Taster:\n * Joystick wird in allen vier Ecken erkannt\n * Tasterdruck wird angezeigt\n5. Das Bricklet ist fertig, mit einem Joystick-Knopf in normale ESD-Tüte stecken, zuschweißen, Aufkleber aufkleben\n6. Gehe zu 1\n\"\"\"\n\n qtcb_pressed = QtCore.pyqtSignal()\n qtcb_released = QtCore.pyqtSignal()\n\n def __init__(self, *args):\n BrickletBase.__init__(self, *args)\n\n self.cbe_position = None\n self.position_transition = [0, 0, 0, 0] # [TR, BR, BL, TL] 0 = not arrived, 1 = arrived, 2 = arrived and left\n self.button_transition = 0 # 0 = not pressed, 1 = pressed, 2 = pressed and released\n self.last_position_status = '?', '?', '?', '?'\n self.last_button_status = '?'\n\n self.qtcb_pressed.connect(lambda: self.cb_button(True))\n self.qtcb_released.connect(lambda: self.cb_button(False))\n\n def start(self):\n BrickletBase.start(self)\n\n def stop(self):\n super().stop()\n if self.cbe_position != None:\n self.cbe_position.set_period(0)\n\n def get_device_identifier(self):\n return BrickletJoystick.DEVICE_IDENTIFIER\n\n def flash_clicked(self):\n self.flash_bricklet(get_bricklet_firmware_filename(BrickletJoystick.DEVICE_URL_PART))\n\n def new_enum(self, device_information):\n if self.cbe_position != None:\n self.cbe_position.set_period(0)\n\n self.joystick = BrickletJoystick(device_information.uid, self.get_ipcon())\n self.joystick.register_callback(self.joystick.CALLBACK_PRESSED, self.qtcb_pressed.emit)\n self.joystick.register_callback(self.joystick.CALLBACK_RELEASED, self.qtcb_released.emit)\n\n self.cbe_position = CallbackEmulator(self.joystick.get_position, self.cb_position)\n self.cbe_position.set_period(100)\n\n self.show_device_information(device_information)\n\n self.position_transition = [0, 0, 0, 0]\n self.button_transition = 0\n self.last_position_status = '?', '?', '?', '?'\n self.last_button_status = '?'\n self.last_position = 0, 0\n\n x, y = self.joystick.get_position()\n pressed = self.joystick.is_pressed()\n\n self.update_transition(x, y, pressed)\n\n def update_transition(self, x, y, pressed):\n if x != None and y != None:\n # TR\n if self.position_transition[0] == 0 and x >= LIMIT and y >= LIMIT:\n self.position_transition[0] = 1\n elif self.position_transition[0] == 1 and not (x >= LIMIT and y >= LIMIT):\n self.position_transition[0] = 2\n\n # BR\n if self.position_transition[1] == 0 and x >= LIMIT and y <= -LIMIT:\n self.position_transition[1] = 1\n elif self.position_transition[1] == 1 and not (x >= LIMIT and y <= -LIMIT):\n self.position_transition[1] = 2\n\n # BL\n if self.position_transition[2] == 0 and x <= -LIMIT and y <= -LIMIT:\n self.position_transition[2] = 1\n elif self.position_transition[2] == 1 and not (x <= -LIMIT and y <= -LIMIT):\n self.position_transition[2] = 2\n\n # TL\n if self.position_transition[3] == 0 and x <= -LIMIT and y >= LIMIT:\n self.position_transition[3] = 1\n elif self.position_transition[3] == 1 and not (x <= -LIMIT and y >= LIMIT):\n self.position_transition[3] = 2\n\n if pressed != None:\n if self.button_transition == 0 and pressed:\n self.button_transition = 1\n elif self.button_transition == 1 and not pressed:\n self.button_transition = 2\n\n if x != None and y != None:\n tr = '\\u25C7'\n if x >= LIMIT and y >= LIMIT:\n tr = '\\u25C6'\n\n br = '\\u25C7'\n if x >= LIMIT and y <= -LIMIT:\n br = '\\u25C6'\n\n bl = '\\u25C7'\n if x <= -LIMIT and y <= -LIMIT:\n bl = '\\u25C6'\n\n tl = '\\u25C7'\n if x <= -LIMIT and y >= LIMIT:\n tl = '\\u25C6'\n\n self.last_position_status = tr, br, bl, tl\n else:\n tr, br, bl, tl = self.last_position_status\n\n if pressed != None:\n bs = '\\u25C7'\n if pressed:\n bs = '\\u25C6'\n\n self.last_button_status = bs\n else:\n bs = self.last_button_status\n\n if self.position_transition != [2, 2, 2, 2] or self.button_transition != 2:\n status = 'Warte auf '\n set_value = self.mw.set_value_action\n\n for ps in self.position_transition:\n if ps == 0:\n status += '\\u25C6 '\n elif ps == 1:\n status += '\\u25C7 '\n elif ps == 2:\n status += '\\u2611 '\n else:\n status += '? '\n\n status += 'und '\n\n if self.button_transition == 0:\n status += '\\u25C6'\n elif self.button_transition == 1:\n status += '\\u25C7'\n elif self.button_transition == 2:\n status += '\\u2611'\n else:\n status += str(self.button_transition)\n else:\n status = 'Test OK!'\n set_value = self.mw.set_value_okay\n\n if x != None and y != None:\n self.last_position = x, y\n else:\n x, y = self.last_position\n\n set_value(\"Position: ({0}, {1})\\nEcken und Taster: {2} {3} {4} {5} und {6}, {7} (\\u25C6 = drin/gedrückt, \\u25C7 = draußen/losgelassen, \\u2611 = OK)\"\n .format(x, y, tr, br, bl, tl, bs, status))\n\n def cb_position(self, data):\n x, y = data\n self.update_transition(x, y, None)\n\n def cb_button(self, pressed):\n self.update_transition(None, None, pressed)\n","sub_path":"src/flash-test/plugin_system/plugins/bricklet_joystick.py","file_name":"bricklet_joystick.py","file_ext":"py","file_size_in_byte":7182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"89919750","text":"import pytest\nimport geneselection.datasets.dataset as gds\nfrom .create_h5ad import CreateH5ad\n\nclass StubData(object):\n def __init__(self):\n self.rows = 5\n self.columns = 38270\n self.dstruct = CreateH5ad(rows=self.rows, columns=self.columns)\n\n def get_annData(self):\n return self.dstruct\n\n\n@pytest.fixture\ndef rna_seq():\n return StubData()\n\n\ndef test_len(rna_seq):\n ad = rna_seq.get_annData()\n ds = gds.gsdataset_from_anndata(ad)\n assert len(ds) == rna_seq.rows\n\n\ndef test_access(rna_seq):\n ad = rna_seq.get_annData()\n ds = gds.gsdataset_from_anndata(ad)\n kval = ds[2]\n row = kval['X']\n assert len(row) == rna_seq.columns\n\n\ndef test_row(rna_seq):\n ad = rna_seq.get_annData()\n ds = gds.gsdataset_from_anndata(ad)\n kval = ds[2]\n row = kval['X']\n for i in range(rna_seq.columns):\n assert float(row[i]) == rna_seq.dstruct.X[2, i]\n\n\n","sub_path":"geneselection/tests/dataset_test.py","file_name":"dataset_test.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"475167972","text":"#!/usr/bin/env python3\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.linalg import norm as nm\n\n__author__ = \"Jinghua Feng\"\n__copyright__ = \"Copyright 2018, Diagonals in High Dimensions\"\n__credits__ = [\"Jinghua Feng\"]\n__license__ = \"GPL\"\n__version__ = \"0.1.0\"\n__maintainer__ = \"Jinghua Feng\"\n__email__ = \"tracygooo@gmail.com\"\n__status__ = \"Dev\"\n\n#============================================================\n# Set up the font size for plots\n#============================================================\nfont = { 'size' : 20 }\nmpl.rc( 'xtick' , labelsize = 20 )\nmpl.rc( 'ytick' , labelsize = 20 )\nmpl.rc( 'font' , **font )\n\n\n#============================================================\n# Statistical analysis of angle vector and output\n#============================================================\ndef stat( angle , dim ):\n a_min = min( angle )\n a_max = max( angle )\n a_range = a_max - a_min\n a_mean = np.mean( angle )\n a_var = np.var( angle )\n print( '\\nd = {}'.format( dim ) )\n print( '\\tmin = {}\\n\\tmax = {}\\n\\tvalue range = {}\\n\\tmean = {}\\n\\tvariance = {}\\n'.format( a_min , a_max , a_range , a_mean , a_var) )\n\n\nif __name__ == \"__main__\":\n\n pair_num = 100000\n dim = [ 10 , 100 , 1000 ]\n d_len = len( dim )\n\n # ============================================================\n # Compute angles between half diagonals in dim dimensions\n # ============================================================\n angle = np.zeros( ( d_len , pair_num ) )\n for i in np.arange( d_len ) :\n for j in np.arange( pair_num ) :\n # Generate a pair of half diagonals randomly\n hd1 = np.random.uniform( -1 , 1 , dim[ i ] )\n hd2 = np.random.uniform( -1 , 1 , dim[ i ] )\n # Compute angle between the two half diagonals\n c_angle = np.dot( hd1 , hd2 ) / nm( hd1 ) / nm( hd2 )\n angle[ i , j ] = np.degrees( np.arccos( c_angle ) )\n\n # ============================================================\n # Plot angle vs. probability\n # ============================================================\n fig = plt.figure(0)\n fig.suptitle( 'Probability mass function for different dimensions' )\n x_ax = np.arange( 0 , 180 , 1 )\n for i in np.arange( d_len ) :\n stat( angle[ i ] , dim[ i ] )\n plt.subplot( d_len , 1 , i + 1 )\n plt.hist( angle[ i ] , x_ax , normed = True )\n plt.savefig( 'angle_vs_probability.png' , format = 'png' )\n plt.show()\n","sub_path":"kernel-principle-component-a2/Assign2-part1.py","file_name":"Assign2-part1.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"21683224","text":"import os\nimport prettytable\nimport json\n\nfrom cmd import Cmd\n\nfrom ontrack import Ontrack\n\nclass Shell(Cmd):\n\n def set_instance(self, instance):\n self.ontrack_instance = instance\n\n\n def do_projects(self, args):\n \"\"\" List all projects and their branches\"\"\"\n projects = self.ontrack_instance.get_projects()\n for project in projects['resources']:\n print(\"Project : %s\" % project['name'])\n branches = self.ontrack_instance.get_branches(project['name'])\n for branch in branches['resources']:\n print(\"-%s\" % branch['name'])\n\n def do_project(self, args):\n \"\"\" Print information about project.\"\"\"\n name = args\n\n project = self.ontrack_instance.get_project(name, True)\n if project is None:\n print(\"No project found with name : %s\" % name)\n return\n\n branches = self.ontrack_instance.get_branches(name)\n table = prettytable.PrettyTable(['Branch', 'latest'])\n for branch in branches['resources']:\n if not branch['disabled'] and 'TEMPLATE' not in branch['type']:\n branch_name = branch['name']\n branch_latest_build = self.ontrack_instance.get_branch_status(name, branch_name)['latestBuild']['description']\n table.add_row((branch_name,branch_latest_build))\n print(table)\n\n def do_status(self, args):\n \"\"\" Return of a branch.\"\"\"\n arguments = args.split()\n p = arguments[0]\n b = arguments[1]\n\n status = self.ontrack_instance.get_branch_status(p,b)\n print(status)\n\n def do_latest_build(self,args):\n arguments = args.split()\n p = arguments[0]\n b = arguments[1]\n\n status = self.ontrack_instance.get_branch_status(p, b)\n print(status['latestBuild']['description'])\n\n\n def do_build(self, args):\n arguments = args.split()\n p = arguments[0]\n b = arguments[1]\n build_name = arguments[2]\n\n project = self.ontrack_instance.get_project(p, True)\n\n builds = self.ontrack_instance.get_build(p,b,build_name)\n pretty = json.dumps(builds, indent=4, sort_keys=True)\n print(pretty)\n\n\n def do_promote(self, args):\n arguments = args.split()\n p = arguments[0]\n b = arguments[1]\n bld = arguments[2]\n p = arguments[3]\n\n\n\n\n\n def do_clear(self, args=None):\n\n \"\"\"Clear screen\"\"\"\n # TODO : linux only\n os.system('clear')\n\n\n def do_exit(self, args):\n \"\"\" Quit the jenkinshelper shell \"\"\"\n exit(0)\n\n\n \"\"\"\n def builds(self):\n\n parser = argparse.ArgumentParser(\n description='Get builds information')\n # prefixing the argument with -- means it's optional\n parser.add_argument('project')\n parser.add_argument('branch')\n args = parser.parse_args(sys.argv[2:])\n\n project = args.project\n branch = args.branch\n\n builds = self.ontrack_instance.get_builds(project, branch)\n\n print(builds)\n \"\"\"\n\nif __name__ == \"__main__\":\n\n server = Ontrack('http://192.168.126.76:9082/', 'admin', 'gu1nness')\n\n prompt = Shell()\n prompt.set_instance(server)\n prompt.prompt = 'ontrack > '\n prompt.cmdloop('Starting prompt...')\n\n","sub_path":"ontrack/OntrackShell.py","file_name":"OntrackShell.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"198678337","text":"import const\nfrom html_parser import HTMLParser\nimport os\nimport time\nimport requests\nimport pickle\nimport logging\nimport datetime\nimport numpy as np\nfrom selenium import webdriver\nclass StatusNotOkException(Exception):\n def __init__(self, url, code):\n self.url = url\n self.code = code\n def __str__(self):\n '请求失败\\n\\t状态码为: {}\\n\\turl为: {}'.format(self.code, self.url)\n\nclass Crawler:\n def __init__(self, parser=HTMLParser()):\n self.params = {\n 'Accept': '*/*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36 Edg/90.0.818.51',\n 'Connection': 'keep-alive'\n }\n self.cookies = {}\n self.parser = parser\n self.base_url = 'https://segmentfault.com'\n self.questions_url = 'https://segmentfault.com/t/{}/questions?type=votes&page={}'\n self.detail_urls = list()\n self.detail_urls_dump_name = 'detail_u_d_n.pkl'\n if self.detail_urls_dump_name in os.listdir(const.TEMP_PATH):\n with open(os.path.join(const.TEMP_PATH, self.detail_urls_dump_name), 'rb') as f:\n self.detail_urls = pickle.load(f)\n self.done_urls = set()\n self.done_urls_dump_name = 'dump_u_d_n.pkl'\n if self.done_urls_dump_name in os.listdir(const.TEMP_PATH):\n with open(os.path.join(const.TEMP_PATH, self.done_urls_dump_name), 'rb') as f:\n self.done_urls = pickle.load(f)\n self.languages = ['java', 'python', 'c', 'sql']\n self._init_browser()\n \n def _init_browser(self):\n logging.info('start to init browser')\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--headless')\n chrome_options.add_argument('--disable-gpu')\n\n self.browser = webdriver.Chrome(chrome_options=chrome_options)\n logging.info('init browser successfully')\n\n def start(self, languages=None, max_page=100):\n if languages == None:\n languages = self.languages\n languages = languages and self.languages\n if len(self.detail_urls) == 0:\n self._crawl_questions(languages, max_page) \n self._crawl_details()\n\n def _crawl_questions(self, languages, max_page):\n for language in languages:\n logging.info('start to crawl questions of {}'.format(language))\n for page in range(1, max_page + 1):\n logging.info('start to crawl page {}'.format(page))\n url = self.questions_url.format(language, page)\n try:\n self._get(url)\n except StatusNotOkException as e:\n print(e)\n break\n self.detail_urls += self.parser.parse_questions(self.page_source, self.base_url)\n time.sleep(np.random.choice(a=[1, 2, 3], size=1, p=[0.6, 0.3, 0.1]).item())\n with open(os.path.join(const.TEMP_PATH, self.detail_urls_dump_name), 'wb') as f:\n pickle.dump(self.detail_urls, f)\n \n def _crawl_details(self):\n logging.info('start to crawl details with a number of {}'.format(len(self.detail_urls)))\n for url in self.detail_urls:\n if url not in self.done_urls:\n logging.info('start to crawl {}\\'s detail'.format(url))\n try:\n self._get(url)\n except StatusNotOkException as e:\n print(e)\n break\n try:\n self.parser.parse_detail(self.page_source)\n self.done_urls.add(url)\n except Exception as e:\n logging.error('error when parse {}, error message is {}'.format(url, e))\n break\n time.sleep(np.random.choice(a=[2, 3, 4], size=1, p=[0.6, 0.3, 0.1]).item())\n with open(os.path.join(const.TEMP_PATH, self.done_urls_dump_name), 'wb') as f:\n pickle.dump(self.done_urls, f)\n\n def _get(self, url, use_browser=True):\n if use_browser:\n self.browser.get(url)\n self.page_source = self.browser.page_source\n else:\n self.response = requests.get(url, params=self.params, cookies=self.cookies)\n if self.response.status_code != 200:\n logging.error('get {} failed with code {}'.format(url, self.response.status_code))\n raise StatusNotOkException(url, self.response.status_code)\n if not self.response.cookies is None:\n self.cookies = self.response.cookies\n self.response.encoding='utf-8'\n self.page_source = self.response.text\n \n\nif __name__ == '__main__':\n LOG_FORMAT = \"%(asctime)s - %(levelname)s - %(message)s\"\n DATE_FORMAT = \"%m/%d/%Y %H:%M:%S %p\"\n logging.basicConfig(filename=os.path.join(const.LOG_PATH, '{}.log'.format(datetime.datetime.now().strftime('%Y-%m-%d'))),\n level=logging.INFO, format=LOG_FORMAT, datefmt=DATE_FORMAT)\n # crawler = Crawler()\n # crawler.start()\n HTMLParser().fix_dataset()\n","sub_path":"Scripts/Crawler/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":5225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"113307086","text":"class SwappingDigits:\n def minNumber(self, num):\n #swap last small digit with first big digit ==> wrong assumption \n #just check for every possible \n #swap (max number from left) with min number on right of this digit, make sure we are not putting 0 at place 1\n reqmax= 0\n reqmin= 0\n for i in range(len(num)):\n if num[i]== max(num):\n reqmax= i\n break\n for j in range(len(num)-1, reqmax):\n if num[j]== min(num[reqmax+1:]):\n reqmin= j\n break #not completed","sub_path":"TopCoder/tc srm583/250.py","file_name":"250.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"198753091","text":"# Install first via terminal: pip install overpy\n# Code to get cities and their lattitude and longitude for selected countries\nimport csv\nimport json\nimport time\n\nimport overpy\n\niso_codes = {}\n# Key is city name value is a list of country codes\ncities = {}\n\n# City coordinates - keys is City name\ncity_coords = {}\n\nmajor_iso_codes = ['CN', 'TH', 'JP', 'SG',]\n# major_iso_codes = ['AU', 'KR', 'MY', 'DE',]\n\n# 'US', 'FR', 'VN', 'CA']\n\n\ndef read_iso_codes():\n global iso_codes\n\n with open('iso-codes.csv', mode='r') as infile:\n reader = csv.reader(infile)\n # Skip 2 header rows\n next(reader)\n next(reader)\n for r in reader:\n print(f\"{r[3]} {r[0]}\")\n iso_codes[r[3]] = r[0]\n\n\ndef get_cities_for_country(iso_code):\n global cities\n api = overpy.Overpass()\n\n query = f'area[\"ISO3166-1\"=\"{iso_code}\"][admin_level=2];node[\"place\"=\"city\"](area);out center;'\n\n r = api.query(query)\n time.sleep(15)\n # r = api.query(\"\"\"\n # area[\"ISO3166-1\"=\"US\"][admin_level=2];\n # node[\"place\"=\"city\"](area);\n # out;\n # \"\"\")\n\n print(f\"There are {len(r.nodes)} cities for {iso_code}\")\n for n in r.nodes:\n print(f\"Latitude: {n.lat} Longitude: {n.lon}\")\n if 'name' in n.tags:\n if 'name:en' in n.tags:\n city = n.tags['name:en']\n else:\n city = n.tags['name']\n print(city)\n if city in cities:\n cities[city].append(iso_code)\n else:\n cities[city] = []\n cities[city].append(iso_code)\n\n if city not in city_coords:\n city_coords[city] = (float(n.lat), float(n.lon))\n return r\n\n\ndef main():\n read_iso_codes()\n print(iso_codes)\n\n for iso_code in major_iso_codes:\n print(f\"Getting cities for {iso_code} country: {iso_codes[iso_code]}\")\n get_cities_for_country(iso_code)\n\n # print(cities)\n\n with open('cities.json', 'w') as fp:\n json.dump(cities, fp, indent=4)\n\n with open('cities_coords.json', 'w') as fp:\n json.dump(city_coords, fp, indent=4)\n\n\nmain()\n\n\nif False:\n get_cities_for_country('BH')\n with open('cities.json', 'w') as fp:\n json.dump(cities, fp, indent=4)\n\n with open('cities_coords.json', 'w') as fp:\n json.dump(city_coords, fp, indent=4)\n","sub_path":"Corona_virus/openStreetMapCities.py","file_name":"openStreetMapCities.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"641696281","text":"import sqlite3\n\nimport sqlalchemy\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom flask_app.app import db, ArticleFakeChecker2\n\nMAIN_URL = \"https://euvsdisinfo.eu/news/\"\nMAIN_URL_PAGE_FROM2 = \"https://euvsdisinfo.eu/news/page/\"\n\n\ndef parse_main_pages():\n \"\"\"parse pages to get main information\"\"\"\n try:\n last_article = db.session.query(ArticleFakeChecker2).order_by(ArticleFakeChecker2.id.desc()).first()\n max_id_pos_start = str(last_article).find(\"id=\")\n max_id_pos_end = str(last_article).find(\"title=\")\n max_id = str(last_article)[max_id_pos_start + 3: max_id_pos_end - 2]\n max_id = int(max_id) + 1\n except ValueError:\n max_id = 1\n\n print(\"max_id\", max_id)\n flag_old_news = 0\n n_page = 0\n while flag_old_news != 1:\n n_page = n_page + 1\n article_date = \"\"\n print(\"n_page\", n_page)\n if n_page == 1:\n url = MAIN_URL\n\n else:\n url = MAIN_URL_PAGE_FROM2 + str(n_page + 1) + '/'\n\n html_page = requests.get(url,\n headers={\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0)\"\n \"Gecko/20100101 Firefox/74.0\"}).text\n\n soup = BeautifulSoup(html_page, 'html.parser')\n all_articles = soup.find_all(\"a\", {\"class\": \"b-post__link\"})\n\n for article in all_articles:\n\n print()\n url_article = article.get(\"href\")\n\n article_title, article_date, article_text = parse_article_pages(url_article)\n article_text = str(article_text).strip()\n print(\"article_title\", article_title)\n try:\n db.session.rollback()\n if db.session.query(ArticleFakeChecker2.id).filter_by(title=article_title).scalar() is not None:\n print(\"Found in db\")\n continue\n\n except sqlite3.IntegrityError:\n continue\n except sqlalchemy.exc.IntegrityError:\n continue\n\n resource = \"https://euvsdisinfo.eu/\"\n print(\"article_text\", article_text)\n print(\"article_date\", article_date)\n\n new_article = ArticleFakeChecker2(id=max_id,\n title=article_title,\n title_en=article_title,\n text=article_text,\n date=article_date,\n resource=resource,\n url=url_article)\n\n max_id += 1\n\n try:\n db.session.add(new_article)\n db.session.commit()\n db.session.flush()\n db.create_all()\n except sqlalchemy.exc.IntegrityError:\n continue\n except sqlalchemy.exc.DataError:\n continue\n\n if str(article_date).split(\", \")[-1].strip() == \"2018\" or n_page >= 700:\n flag_old_news = 1\n\n try:\n db.session.rollback()\n db.session.commit()\n db.create_all()\n except sqlalchemy.exc.IntegrityError:\n continue\n\n\ndef parse_article_pages(url):\n \"\"\"parse special page\"\"\"\n html_page = requests.get(url).text\n\n soup = BeautifulSoup(html_page, 'html.parser')\n\n try:\n all_title = soup.find_all(\"h1\", {\"class\": \"entry-title\"})\n title = BeautifulSoup(str(all_title[0]), \"lxml\").text\n except IndexError as error:\n print(\"error\", error)\n title = \"\"\n\n try:\n all_span = soup.find_all(\"span\", {\"class\": \"et_pb_post_date\"})\n date = BeautifulSoup(str(all_span[0]), \"lxml\").text\n except IndexError as error:\n print(\"error\", error)\n date = \"\"\n\n try:\n all_text = soup.find_all(\"div\", {\"class\": \"entry-content\"})\n clean_text = BeautifulSoup(str(all_text[0]), \"lxml\").text\n except IndexError as error:\n print(\"error\", error)\n clean_text = \"\"\n\n return title, date, clean_text\n\n\nif __name__ == '__main__':\n parse_main_pages()\n","sub_path":"site_parse/parse_fakecheckers/parse_euvsdisinfo2.py","file_name":"parse_euvsdisinfo2.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"293499928","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport random\n\nfrom devops.helpers import helpers\nfrom devops.helpers import ssh_client\nfrom paramiko import rsakey\n\nfrom fuel_ccp_tests import logger\nfrom fuel_ccp_tests.helpers import utils\n\nLOG = logger.logger\n\n\nclass UnderlaySSHManager(object):\n \"\"\"Keep the list of SSH access credentials to Underlay nodes.\n\n This object is initialized using config.underlay.ssh.\n\n :param config_ssh: JSONList of SSH access credentials for nodes:\n [\n {\n node_name: node1,\n address_pool: 'public-pool01',\n host: ,\n port: ,\n keys: [],\n keys_source_host: None,\n login: ,\n password: ,\n },\n {\n node_name: node1,\n address_pool: 'private-pool01',\n host:\n port:\n keys: []\n keys_source_host: None,\n login:\n password:\n },\n {\n node_name: node2,\n address_pool: 'public-pool01',\n keys_source_host: node1\n ...\n }\n ,\n ...\n ]\n\n self.node_names(): list of node names registered in underlay.\n self.remote(): SSHClient object by a node name (w/wo address pool)\n or by a hostname.\n \"\"\"\n config_ssh = None\n config_lvm = None\n\n def __init__(self, config_ssh):\n \"\"\"Read config.underlay.ssh object\n\n :param config_ssh: dict\n \"\"\"\n if self.config_ssh is None:\n self.config_ssh = []\n\n if self.config_lvm is None:\n self.config_lvm = {}\n\n self.add_config_ssh(config_ssh)\n\n def add_config_ssh(self, config_ssh):\n\n if config_ssh is None:\n config_ssh = []\n\n for ssh in config_ssh:\n ssh_data = {\n # Required keys:\n 'node_name': ssh['node_name'],\n 'host': ssh['host'],\n 'login': ssh['login'],\n 'password': ssh['password'],\n # Optional keys:\n 'address_pool': ssh.get('address_pool', None),\n 'port': ssh.get('port', None),\n 'keys': ssh.get('keys', []),\n }\n\n if 'keys_source_host' in ssh:\n node_name = ssh['keys_source_host']\n remote = self.remote(node_name)\n keys = self.__get_keys(remote)\n ssh_data['keys'].extend(keys)\n\n self.config_ssh.append(ssh_data)\n\n def remove_config_ssh(self, config_ssh):\n if config_ssh is None:\n config_ssh = []\n\n for ssh in config_ssh:\n ssh_data = {\n # Required keys:\n 'node_name': ssh['node_name'],\n 'host': ssh['host'],\n 'login': ssh['login'],\n 'password': ssh['password'],\n # Optional keys:\n 'address_pool': ssh.get('address_pool', None),\n 'port': ssh.get('port', None),\n 'keys': ssh.get('keys', []),\n }\n self.config_ssh.remove(ssh_data)\n\n def __get_keys(self, remote):\n keys = []\n remote.execute('cd ~')\n key_string = './.ssh/id_rsa'\n if remote.exists(key_string):\n with remote.open(key_string) as f:\n keys.append(rsakey.RSAKey.from_private_key(f))\n return keys\n\n def __ssh_data(self, node_name=None, host=None, address_pool=None):\n\n ssh_data = None\n\n if host is not None:\n for ssh in self.config_ssh:\n if host == ssh['host']:\n ssh_data = ssh\n break\n\n elif node_name is not None:\n for ssh in self.config_ssh:\n if node_name == ssh['node_name']:\n if address_pool is not None:\n if address_pool == ssh['address_pool']:\n ssh_data = ssh\n break\n else:\n ssh_data = ssh\n if ssh_data is None:\n raise Exception('Auth data for node was not found using '\n 'node_name=\"{}\" , host=\"{}\" , address_pool=\"{}\"'\n .format(node_name, host, address_pool))\n return ssh_data\n\n def node_names(self):\n \"\"\"Get list of node names registered in config.underlay.ssh\"\"\"\n\n names = [] # List is used to keep the original order of names\n for ssh in self.config_ssh:\n if ssh['node_name'] not in names:\n names.append(ssh['node_name'])\n return names\n\n def enable_lvm(self, lvmconfig):\n \"\"\"Method for enabling lvm oh hosts in environment\n\n :param lvmconfig: dict with ids or device' names of lvm storage\n :raises: devops.error.DevopsCalledProcessError,\n devops.error.TimeoutError, AssertionError, ValueError\n \"\"\"\n def get_actions(lvm_id):\n return [\n (\"sed -i -e 's/\\\\(\\\\budev_sync = \\\\)1/\\\\10/' \"\n \"-e 's/\\\\(\\\\budev_rules = \\\\)1/\\\\10/' /etc/lvm/lvm.conf\"),\n \"systemctl enable lvm2-lvmetad.service\",\n \"systemctl enable lvm2-lvmetad.socket\",\n \"systemctl start lvm2-lvmetad.service\",\n \"systemctl start lvm2-lvmetad.socket\",\n \"pvcreate {} && pvs\".format(lvm_id),\n \"vgcreate default {} && vgs\".format(lvm_id),\n \"lvcreate -L 1G -T default/pool && lvs\",\n ]\n lvmpackages = [\"lvm2\", \"liblvm2-dev\", \"thin-provisioning-tools\"]\n for node_name in self.node_names():\n lvm = lvmconfig.get(node_name, None)\n if not lvm:\n continue\n if 'id' in lvm:\n lvmdevice = '/dev/disk/by-id/{}'.format(lvm['id'])\n elif 'device' in lvm:\n lvmdevice = '/dev/{}'.format(lvm['device'])\n else:\n raise ValueError(\"Unknown LVM device type\")\n if lvmdevice:\n self.apt_install_package(\n packages=lvmpackages, node_name=node_name, verbose=True)\n for command in get_actions(lvmdevice):\n self.sudo_check_call(command, node_name=node_name,\n verbose=True)\n self.config_lvm = dict(lvmconfig)\n\n def host_by_node_name(self, node_name, address_pool=None):\n ssh_data = self.__ssh_data(node_name=node_name,\n address_pool=address_pool)\n return ssh_data['host']\n\n def remote(self, node_name=None, host=None, address_pool=None):\n \"\"\"Get SSHClient by a node name or hostname.\n\n One of the following arguments should be specified:\n - host (str): IP address or hostname. If specified, 'node_name' is\n ignored.\n - node_name (str): Name of the node stored to config.underlay.ssh\n - address_pool (str): optional for node_name.\n If None, use the first matched node_name.\n \"\"\"\n ssh_data = self.__ssh_data(node_name=node_name, host=host,\n address_pool=address_pool)\n return ssh_client.SSHClient(\n host=ssh_data['host'],\n port=ssh_data['port'] or 22,\n username=ssh_data['login'],\n password=ssh_data['password'],\n private_keys=ssh_data['keys'])\n\n def check_call(\n self, cmd,\n node_name=None, host=None, address_pool=None,\n verbose=False, timeout=None,\n error_info=None,\n expected=None, raise_on_err=True):\n \"\"\"Execute command on the node_name/host and check for exit code\n\n :type cmd: str\n :type node_name: str\n :type host: str\n :type verbose: bool\n :type timeout: int\n :type error_info: str\n :type expected: list\n :type raise_on_err: bool\n :rtype: list stdout\n :raises: devops.error.DevopsCalledProcessError\n \"\"\"\n remote = self.remote(node_name=node_name, host=host,\n address_pool=address_pool)\n return remote.check_call(\n command=cmd, verbose=verbose, timeout=timeout,\n error_info=error_info, expected=expected,\n raise_on_err=raise_on_err)\n\n def apt_install_package(self, packages=None, node_name=None, host=None,\n **kwargs):\n \"\"\"Method to install packages on ubuntu nodes\n\n :type packages: list\n :type node_name: str\n :type host: str\n :raises: devops.error.DevopsCalledProcessError,\n devops.error.TimeoutError, AssertionError, ValueError\n\n Other params of check_call and sudo_check_call are allowed\n \"\"\"\n expected = kwargs.pop('expected', None)\n if not packages or not isinstance(packages, list):\n raise ValueError(\"packages list should be provided!\")\n install = \"apt-get install -y {}\".format(\" \".join(packages))\n # Should wait until other 'apt' jobs are finished\n pgrep_expected = [0, 1]\n pgrep_command = \"pgrep -a -f apt\"\n helpers.wait(\n lambda: (self.check_call(\n pgrep_command, expected=pgrep_expected, host=host,\n node_name=node_name, **kwargs).exit_code == 1\n ), interval=30, timeout=1200,\n timeout_msg=\"Timeout reached while waiting for apt lock\"\n )\n # Install packages\n self.sudo_check_call(\"apt-get update\", node_name=node_name, host=host,\n **kwargs)\n self.sudo_check_call(install, expected=expected, node_name=node_name,\n host=host, **kwargs)\n\n def sudo_check_call(\n self, cmd,\n node_name=None, host=None, address_pool=None,\n verbose=False, timeout=None,\n error_info=None,\n expected=None, raise_on_err=True):\n \"\"\"Execute command with sudo on node_name/host and check for exit code\n\n :type cmd: str\n :type node_name: str\n :type host: str\n :type verbose: bool\n :type timeout: int\n :type error_info: str\n :type expected: list\n :type raise_on_err: bool\n :rtype: list stdout\n :raises: devops.error.DevopsCalledProcessError\n \"\"\"\n remote = self.remote(node_name=node_name, host=host,\n address_pool=address_pool)\n with remote.get_sudo(remote):\n return remote.check_call(\n command=cmd, verbose=verbose, timeout=timeout,\n error_info=error_info, expected=expected,\n raise_on_err=raise_on_err)\n\n def dir_upload(self, host, source, destination):\n \"\"\"Upload local directory content to remote host\n\n :param host: str, remote node name\n :param source: str, local directory path\n :param destination: str, local directory path\n \"\"\"\n with self.remote(node_name=host) as remote:\n remote.upload(source, destination)\n\n def get_random_node(self):\n \"\"\"Get random node name\n\n :return: str, name of node\n \"\"\"\n return random.choice(self.node_names())\n\n def yaml_editor(self, file_path, node_name=None, host=None,\n address_pool=None):\n \"\"\"Returns an initialized YamlEditor instance for context manager\n\n Usage (with 'underlay' fixture):\n\n # Local YAML file\n with underlay.yaml_editor('/path/to/file') as editor:\n editor.content[key] = \"value\"\n\n # Remote YAML file on k8s host\n with underlay.yaml_editor('/path/to/file',\n host=config.k8s.kube_host) as editor:\n editor.content[key] = \"value\"\n \"\"\"\n # Local YAML file\n if node_name is None and host is None:\n return utils.YamlEditor(file_path=file_path)\n\n # Remote YAML file\n ssh_data = self.__ssh_data(node_name=node_name, host=host,\n address_pool=address_pool)\n return utils.YamlEditor(\n file_path=file_path,\n host=ssh_data['host'],\n port=ssh_data['port'] or 22,\n username=ssh_data['login'],\n password=ssh_data['password'],\n private_keys=ssh_data['keys'])\n","sub_path":"fuel_ccp_tests/managers/underlay_ssh_manager.py","file_name":"underlay_ssh_manager.py","file_ext":"py","file_size_in_byte":13123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"378093600","text":"'''\n- A halper file with functions that help mange the image preprocecing,loading and locating\n\n\nList Of All Functions:\n\n1.Load_Images(image_path,images) - load images from a given path\n\n2.resize_by_factor(images,factor) - resizes all the given PIL images to a given size\n\n3.is_image_saved(folders,imagename) - searaches trough the folders and chacks if the imagename given is in any of them. if it is return True/ if not returns False\n\n4.get_image_names(num_of_images) - looks in the image folder for all the imagenames\n\n5.get_image_by_name((images,imagename) - finds an image PIL by name in the image array given\n\n6.get_trashholds() - returns trashhold array and an index\n\nDate - 05/29/2018\nVersion - 002\nName - image_utils.py\n\n'''\n\n\nfrom PIL import Image\nimport numpy as np\nimport os\nimport time\n#\n# cwd = os.getcwd()\n# SIMILAR_IMAGES_DIR = \"similar_images\\\\\"\n# UNSIMILAR_IMAGES_DIR = \"unsimilar_images\\\\\"\n# IMAGES_DIR_NAME = \"images_gopro\\\\\"\n#\n# SIMILAR_IMAGES_PATH = os.path.join(cwd,SIMILAR_IMAGES_DIR)\n# UNSIMILAR_IMAGES_PATH = os.path.join(cwd,UNSIMILAR_IMAGES_DIR)\n# FULL_IMAGE_PATH = os.path.join(cwd,IMAGES_DIR_NAME)\n\n# num_of_images = 15\n\n\ndef Load_Images(image_path,images,num_of_images=15):\n '''\n Arguments :\n image_path - path to the image\n images - an empty image array\n\n Does:\n -load images from a given path\n\n Returns :\n images - an array of all the images and names [[image name,PIL image]]\n '''\n start = time.time()\n print(\"pharsing images\")\n images_names = list(os.listdir(image_path))\n for image_name in images_names[:num_of_images]:\n image_full_path = os .path.join(image_path,image_name)\n image = Image.open(image_full_path)\n images.append([image_name,image])\n\n end = time.time()\n print(\"this took {} sec!\".format(end-start))\n print(\"loaded {} images!\".format(len(images)))\n return images\n\n\ndef resize_by_factor(images,factor):\n '''\n Arguments:\n images - an array of images and names [imagename,img PIL]\n factor - a size to resize all the images from the sizes array\n\n Does:\n -resizes all the given PIL images to a given size\n\n Returns:\n new_images - an array with np array of the image and the image name [imagename,np.array(image)]\n '''\n start = time.time()\n print(\"resizing images\")\n new_images = []\n for cell in images:\n image = cell[1]\n image_name = cell[0]\n new_image = image.resize((factor,factor),Image.ANTIALIAS)\n\n new_images.append([image_name,np.array(new_image)])\n\n print(\"sizes of all images are {} px by {} px by {} channels\".format(new_images[0][1].shape[0],new_images[0][1].shape[1],new_images[0][1].shape[2]))\n end = time.time()\n print(\"this took {} sec!\".format(end-start))\n return new_images\n\ndef is_image_saved(folders,imagename,SIMILAR_IMAGES_PATH):\n '''\n Arguments:\n -folder - an array like object like so : [[foldername1,[img1 name,img2 name,...]],[foldername12,[img1 name,img2 name,...]],[foldername...,[img1 name,img2 name,...]]]\n -imagename - an imagename sposedly n the array\n\n Does:\n -searaches trough the folders and chacks if the imagename given is in any of them\n -if it is return True/ if not returns False\n\n Returns:\n -True/False\n '''\n #folders = [folder_base-0,folder_base-1,folder_base-...]\n for foldername in folders:\n full_folder_path = os.path.join(SIMILAR_IMAGES_PATH,foldername)\n images_in_folder = os.listdir(full_folder_path)\n #print(\"images_in_folder\",images_in_folder)\n for image_in_folder in images_in_folder:\n if(imagename == image_in_folder or imagename in image_in_folder):\n print(\"found image {} in real folder : {}\".format(imagename,foldername))\n return True\n break\n\n print(\"havnt found image {} in any real folder! return False!\".format(imagename))\n return False\n\n\n\n\ndef get_image_names(num_of_images,FULL_IMAGE_PATH):\n '''\n Arguments:\n -num_of_images - how many image name to phrase\n\n Does:\n -looks in the image folder for all the imagenames\n\n Returns:\n -a list of the imagename\n '''\n all_image_names = os.listdir(FULL_IMAGE_PATH)\n image_names_by_num_of_examples = all_image_names[:num_of_images]\n return image_names_by_num_of_examples\n\n\ndef get_image_by_name(images,imagename):\n '''\n Arguments:\n images - images array list so : [[imagename,img_pil]]\n\n Does:\n -finds an image PIL by name in the image array given\n\n Returns:\n -the image PIL if found\n -None in nothing as been found\n '''\n for cell in images:\n cell_image_name = cell[0]\n if(cell_image_name == imagename):\n image_pil = cell[1]\n print(\"found image {} in images!\".format(imagename))\n return image_pil\n\n print(\"haven't found image {} in images returning None!!\".format(imagename))\n return None\n\n\n\ndef get_trashholds():\n '''\n Arguments:\n -Nonr\n\n Returns:\n -TRASHOLDS - array of difrent trashhold values\n -TRASHOLDS_INDEX - the starting trashhold index in the array\n\n '''\n TRASHOLDS = [0.4,0.5,0.51,0.52,0.53,0.54,0.55,0.56,0,57,0.58,0.585,0.59,0.595,0.6,0.61,0.62,0.63,0.64,0.65,0.66,0.67,0.68,0.69,0.7,0.71,0.72,0.73,0.74,0.75,0.76,0.77,0.80]\n TRASHOLDS_INDEX = 12\n return TRASHOLDS,TRASHOLDS_INDEX\n","sub_path":"Versions/ver-003/image_utils.py","file_name":"image_utils.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"155352520","text":"#!/usr/bin/env python\n# -- coding: utf-8 --\n\n\"\"\"\n\"\"\"\n\n__author__ = \"\"\n__copyright__ = \"Copyright 2013\"\n\nfrom itertools import chain, imap\n\nclass WorldObject(object):\n \"\"\"\n Attributes:\n - name\n - transform\n - pos\n - rot\n - scale\n - pivot\n - mass\n - visibility\n - parent\n - children\n \"\"\"\n\n def __init__(self,\n name=\"worldobject\",\n transform=None,\n pos=None,\n rot=None,\n scale=None,\n pivot=None,\n parent=None,\n mass=None,\n material=None,\n visible=True):\n \"\"\"Constructor.\n \\param name (\\c str) Object name\n \\param transform (\\c mat4) Initial transform\n \\param pos (\\c vec3) Initial position\n \\param rot (\\c mat3) Initial rotation\n \\param scale (\\c vec3) Initial scaling\n \\param pivot (\\c vec3) Initial pivot point (takes precedence over offsetTransform)\n \\param parent (\\c WorldObject or \\c str) Parent object or None\n \\param mass (\\c float) Total mass\n \\param material (\\c Material) Material class (or a sequence of materials)\n \\param visible (\\c Bool) Visibility flag\n \"\"\"\n self._name = name\n self._transform = transform\n self._pos = pos\n self._rot = rot\n self._scale = scale\n self._pivot = pivot\n self._mass = mass\n self._material = material\n self._visible = visible\n self._children = []\n\n if parent is not None:\n parent.addChild(self)\n self._parent = parent\n\n def __iter__(self):\n for v in chain(*imap(iter, self._children)):\n yield v\n yield self\n\n def addChild(self, pWorldObject ):\n self._children.append(pWorldObject)\n\n def child(self, pName):\n \"\"\"\n Returns an object with a given name\n \"\"\"\n obj = self.findChildByName(pName)\n return obj\n\n def findChildByName(self, pName):\n \"\"\"\n Returns an object with a given name\n \"\"\"\n for obj in self._children:\n if obj._name == pName:\n return obj\n return None\n\n def intersect(self, pRay):\n pass\n\nif __name__ == '__main__':\n pass","sub_path":"src/core/primitives/worldobject.py","file_name":"worldobject.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"230543782","text":"# Status information (HeartBeat) protocol 0x13\n\ndef heartbeat(raw_gps_data):\n\n def info_content(raw_gps_data):\n\n content = bin(int(raw_gps_data[8:10], 16))[2:].zfill(8)\n\n def oil_electro():\n if int(content[0]) == 1:\n return 'Oil and electricity disconnected'\n else:\n return 'Oil and electricity connected'\n\n def gps_tracking():\n if int(content[1]) == 1:\n return 'Gps tracking is on'\n else:\n return 'Gps tracking is off'\n\n def alarm():\n if content[2:5] == '100':\n return 'SOS'\n if content[2:5] == '011':\n return 'Low Battery Alarm'\n if content[2:5] == '010':\n return 'Power Cut Alarm'\n if content[2:5] == '001':\n return 'Shock Alarm'\n if content[2:5] == '000':\n return 'Normal'\n\n def charge():\n if int(content[5]) == 1:\n return 'Charge On'\n else:\n return 'Charge Off'\n\n def acc():\n if int(content[6]) == 1:\n return 'ACC High'\n else:\n return 'ACC Low'\n\n def defence():\n if int(content[7]) == 1:\n return 'Defense Activated'\n else:\n return 'Defense Deactivated'\n\n return oil_electro(), gps_tracking(), alarm(), charge(), acc(), defence()\n\n def voltage_level_alarm(raw_gps_data):\n content = raw_gps_data[10:12]\n if content == b'00':\n return 'No Power'\n if content == b'01':\n return 'Extremely Low Battery'\n if content == b'02':\n return 'Very Low Battery'\n if content == b'03':\n return 'Low Battery'\n if content == b'04':\n return 'Medium'\n if content == b'05':\n return 'High'\n if content >= b'06':\n return 'Very High'\n\n def gsm_signal_alarm(raw_gps_data):\n content = raw_gps_data[12:14]\n if content == b'00':\n return 'No signal'\n if content == b'01':\n return 'Extremely weak signal'\n if content == b'02':\n return 'Very weak signal'\n if content == b'03':\n return 'Good signal'\n if content == b'04':\n return 'Strong signal'\n\n def alarm_language(raw_gps_data):\n\n def former_bit():\n\n former_bit = raw_gps_data[14:16]\n\n if former_bit == b'00':\n return 'Normal'\n if former_bit == b'01':\n return 'SOS'\n if former_bit == b'02':\n return 'Power Cut Alarm'\n if former_bit == b'03':\n return 'Shock Alarm'\n if former_bit == b'04':\n return 'Fence In Alarm'\n if former_bit == b'05':\n return 'Fence Out Alarm'\n\n def latter_bit():\n\n latter_bit = raw_gps_data[16:18]\n\n if latter_bit == b'01':\n return 'Chinese'\n if latter_bit == b'02':\n return 'English'\n return former_bit(), latter_bit()\n\n return {\n \"info_content\": info_content(raw_gps_data),\n \"voltage_level_alarm\": voltage_level_alarm(raw_gps_data),\n \"gsm_signal_alarm\": gsm_signal_alarm(raw_gps_data),\n \"alarm_language\": alarm_language(raw_gps_data)\n }","sub_path":"ShelterSmartHome/apps/gpsnode/gt06/gpsdata_heartbeat.py","file_name":"gpsdata_heartbeat.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"454588686","text":"import time\nimport datetime\nimport telepot\nimport RPi.GPIO as GPIO\n\ndef getTemperature():\n\n filepath='1.txt'\n f=open(filepath,'r')\n data=f.read()\n f.close()\n m =10\n return float (m/10)\n\ndef handle(msg):\n\n chat_id=msg['from']['id']\n command=msg['text']\n if command=='/on':\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(7,GPIO.OUT)\n GPIO.output(7,1)\n GPIO.cleanup()\n bot.sendMessage(chat_id,str('Okey On!'))\n\n elif command =='/off':\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(7,GPIO.OUT)\n GPIO.output(7,0)\n GPIO.cleanup()\n bot.sendMessage(chat_id,str('Okey Off!'))\n elif command=='/temp':\n bot.sendMessage(chat_id,str(getTemperature())+'C')\n elif command=='/time':\n bot.sendMessage(chat_id,'time now:'+str(datetime.datetime.now()))\n\n\n elif command=='/start':\n bot.sendMessage(chat_id,str('Hi! I am homebot!'))\n i=4\nbot=telepot.Bot('156276665:AAGaJ6ar6ZUgxDlhgMJ_yJbSxrEnFSM8YkQ')\nbot.message_loop(handle)\nwhile 1:\n time.sleep(10)\n\n","sub_path":"dacha.py","file_name":"dacha.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"222830030","text":"\"\"\" Documentation links\r\n\r\n'tempfile' module - https://docs.python.org/3/library/tempfile.html\r\n\r\n\"\"\"\r\nimport os\r\nimport shutil\r\nimport time\r\nimport hashlib\r\nfrom datetime import datetime\r\n\r\n\r\nclass TmpDir:\r\n def __init__(self, path: str):\r\n self.path = path\r\n self.tmp_path = None\r\n\r\n def __enter__(self):\r\n # generate unique name for temporary folder based on current timestamp\r\n hashname = hashlib.sha256(\r\n str(datetime.now()).encode()\r\n ).hexdigest()[:32]\r\n\r\n self.tmp_path = os.path.abspath(\r\n os.path.join(self.path, hashname))\r\n os.mkdir(self.tmp_path)\r\n return self.tmp_path\r\n\r\n def __exit__(self, exc_type, exc_value, traceback):\r\n shutil.rmtree(self.tmp_path)\r\n self.tmp_path = None\r\n\r\n\r\nif __name__ == '__main__':\r\n with TmpDir('.\\\\') as folder:\r\n open(os.path.join(folder, 'data.txt'), 'w').close()\r\n time.sleep(10)\r\n","sub_path":"python_training/tmpdir.py","file_name":"tmpdir.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"297583934","text":"import pygame\r\nimport random\r\n\r\n\r\nclass Myszka:\r\n def __init__(self):\r\n self.__myszx = random.randint(10, 1150)\r\n self.__myszy = random.randint(10, 500)\r\n self.__mysz = pygame.image.load('mouse.png')\r\n self.__myszka_dlugosc = 100\r\n self.__myszka_szerokosc = 50\r\n\r\n def wyswietl_mysz(self):\r\n myszka = pygame.transform.scale(self.__mysz,\r\n (self.__myszka_dlugosc,\r\n self.__myszka_szerokosc))\r\n return [myszka, (self.__myszx, self.__myszy)]\r\n\r\n def wez_polozenie(self):\r\n return [[self.__myszx, self.__myszy],\r\n [self.__myszx + self.__myszka_dlugosc,\r\n self.__myszy + self.__myszka_szerokosc]]\r\n\r\n","sub_path":"kitku/myszka.py","file_name":"myszka.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"428092073","text":"\"\"\"\nCopyright 2014 Rackspace\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport random\nimport time\n\nimport IPy\nimport netaddr\n\n\nfrom cloudcafe.common.tools.datagen import rand_name, random_cidr\nfrom cloudcafe.networking.networks.common.behaviors \\\n import NetworkingBaseBehaviors, NetworkingResponse\nfrom cloudcafe.networking.networks.common.constants \\\n import NeutronResponseCodes\nfrom cloudcafe.networking.networks.common.exceptions \\\n import InvalidIPException, NetworkIDMissingException,\\\n ResourceBuildException, ResourceDeleteException, ResourceGetException,\\\n ResourceListException, ResourceUpdateException\n\n\nclass SubnetsBehaviors(NetworkingBaseBehaviors):\n\n def __init__(self, subnets_client, subnets_config):\n super(SubnetsBehaviors, self).__init__()\n self.config = subnets_config\n self.client = subnets_client\n\n def verify_ip(self, ip_cidr, ip_range=None):\n \"\"\"\n @summary: Verify if it is a valid CIDR or IP address within an\n IP range if given.\n @param ip_cidr: IP or CIDR to verify\n @type ip_cidr: string\n @param ip_range: IP or CIDR is expected to be within this range.\n For ex. 10.0.0.0/8, 172.16.0.0/12 or 192.168.0.0/16 for valid\n private IPv4 ranges or fd00::/8 for valid private IPv6 range\n @type ip_range: string\n @return: True if it is a valid IP (or CIDR) or False if not\n @rtype: bool\n \"\"\"\n\n try:\n res = IPy.IP(ip_cidr)\n\n # Check the IP/CIDR is within the range, if given\n if ip_range:\n if res not in IPy.IP(ip_range):\n msg = '{0} not within {1} range'.format(ip_cidr, ip_range)\n self._log.debug(msg)\n return False\n return True\n except ValueError as e:\n self._log.error(e.message)\n return False\n\n def verify_private_ip(self, ip_cidr, ip_version, ip_range=None,\n check_prefixlen=True, suffix_max=None):\n \"\"\"\n @summary: Verify the IP or CIDR is of a private network\n @param ip_cidr: IP or CIDR to verify\n @type ip_cidr: string\n @param ip_version: IP version 4 or 6\n @type ip_version: int\n @param ip_range: IP or CIDR is expected to be within this range.\n For ex. 10.0.0.0/8, 172.16.0.0/12 or 192.168.0.0/16 for valid\n private IPv4 ranges or fd00::/8 for valid private IPv6 range\n @type ip_range: string\n @param check_prefixlen: flag to check or not the IP/CIDR prefix length\n @type check_prefixlen: bool\n @param suffix_max: if the check prefixlen flag set this is the prefix\n that the IP or CIDR should be less or equal than\n @type suffix_max: int\n @return: True if it is a valid private IP (or CIDR) or False if not\n @rtype: bool\n \"\"\"\n if ip_version == 4:\n ip_range = ip_range or self.config.private_ipv4_range\n suffix_max = suffix_max or self.config.ipv4_suffix_max\n elif ip_version == 6:\n ip_range = ip_range or self.config.private_ipv6_range\n suffix_max = suffix_max or self.config.ipv6_suffix_max\n\n # This should not happen ip_version should be 4 or 6\n else:\n msg = 'Invalid IP version {0}'.format(ip_version)\n raise InvalidIPException(msg)\n\n ip_check = self.verify_ip(ip_cidr, ip_range)\n if check_prefixlen:\n prefixlen_check = self.verify_prefixlen(\n ip_cidr=ip_cidr, suffix_max=suffix_max)\n return ip_check and prefixlen_check\n return ip_check\n\n def verify_prefixlen(self, ip_cidr, suffix_max):\n \"\"\"\n @summary: Verify an IP or CIDR is within the expected prefix length,\n for an IP this should be 32, for CIDRs it can be /12-/30 on IPv4\n and /8-/64 on IPv6\n @param ip_cidr: IP or CIDR to verify\n @type ip_cidr: string\n @param suffix_max: the prefix that the IP or CIDR should be less or\n equal than\n @type suffix_max: int\n @return: True/False if the IP or CIDR has the expected prefix length\n @rtype: bool\n \"\"\"\n\n # Valid CIDR is expected starting at /12 for IPv4\n # and /8 for IPv6, if not a ValueError is raised and False returned\n try:\n prefix_length = IPy.IP(ip_cidr).prefixlen()\n except ValueError as e:\n self._log.error(e.message)\n return False\n\n # Default values are /32 for an IP and for CIDRs /30 for IPv4 and\n # /64 for IPv6\n suffix_max = int(suffix_max)\n if prefix_length <= suffix_max:\n return True\n else:\n msg = ('Unexpected prefix length of {prefix_length} for {ip_cidr},'\n 'expected value less or equal than {suffix_max}').format(\n prefix_length=prefix_length, ip_cidr=ip_cidr,\n suffix_max=suffix_max)\n self._log.debug(msg)\n return False\n\n def create_ipv4_cidr(self, ipv4_suffix=None, ipv4_prefix=None,\n ip_range=None, suffix_max=None):\n \"\"\"\n @summary: Creates an IPv4 cidr with given or default values\n @param ipv4_suffix: the CIDR suffix, by default 24\n @type ipv4_suffix: int\n @param ipv4_prefix: the CIDR prefix, can have * for random numbers\n between 1 and 254, by default 192.168.*.0\n @type ipv4_prefix: string\n @param ip_range: CIDR is expected to be within this range, by default\n 192.168.0.0/16 for the private IPv4 range\n @type ip_range: string\n @param suffix_max: the prefix that the CIDR should be less or\n equal than, by default 30\n @return: an IPv4 CIDR\n @rtype: string\n \"\"\"\n ipv4_suffix = ipv4_suffix or self.config.ipv4_suffix\n ipv4_prefix = ipv4_prefix or self.config.ipv4_prefix\n\n kwargs = {'mask': ipv4_suffix, 'ip_pattern': ipv4_prefix}\n cidr = random_cidr(**kwargs)\n if self.verify_private_ip(ip_cidr=cidr, ip_version=4,\n ip_range=ip_range, suffix_max=suffix_max):\n return cidr\n else:\n msg = 'Invalid IPv4 cidr {0}'.format(cidr)\n raise InvalidIPException(msg)\n\n def create_ipv6_cidr(self, ipv6_suffix=None, ipv6_prefix=None,\n ip_range=None, suffix_max=None, randomize=True):\n \"\"\"\n @summary: Creates an IPv6 cidr with given or default values\n @param ipv6_suffix: the CIDR suffix, by default 64\n @type ipv6_suffix: int\n @param ipv6_prefix: the CIDR prefix, by default fd00::\n @type ipv6_prefix: string\n @param ip_range: CIDR is expected to be within this range, by default\n fd00::/8 for the private IPv6 range\n @type ip_range: string\n @param suffix_max: the prefix that the CIDR should be less or\n equal than, by default 64\n @type suffix_max: int\n @param randomize: randomize 32 bits of the 40-bit global identifier in\n the routing prefix to prevent collisions when two private networks\n are interconnected\n @type randomize: bool\n @return: an IPv6 CIDR\n @rtype: string\n \"\"\"\n ipv6_suffix = ipv6_suffix or self.config.ipv6_suffix\n ipv6_prefix = ipv6_prefix or self.config.ipv6_prefix\n\n if randomize:\n b1 = random.getrandbits(16)\n b2 = random.getrandbits(16)\n prefix_dual_octets = ipv6_prefix.split(':')\n prefix_dual_octets[1] = '{:x}'.format(b1)\n prefix_dual_octets[2] = '{:x}'.format(b2)\n ipv6_prefix = '{0}::'.format(':'.join(prefix_dual_octets))\n\n # To be used with /64 IPv6 networks, overwriting suffix if other\n if ipv6_suffix != 64:\n msg = ('Subnet create_ipv6_cidr behavior method using '\n 'default 64 suffix instead of {0} when creating a '\n 'random cidr').format(ipv6_suffix)\n self._log.info(msg)\n ipv6_suffix = 64\n\n cidr = '{0}/{1}'.format(ipv6_prefix, ipv6_suffix)\n if self.verify_private_ip(ip_cidr=cidr, ip_version=6,\n ip_range=ip_range, suffix_max=suffix_max):\n return cidr\n else:\n msg = 'Invalid IPv6 cidr {0}'.format(cidr)\n raise InvalidIPException(msg)\n\n def get_random_ip(self, cidr):\n \"\"\"\n @summary: gets a random IP address within a CIDR excluding first and\n last IPs\n @param cidr: represents IP range to get the IP from and should be in\n the form <network_address>/<prefix>\n @type cidr: string\n @return: IP address\n @rtype: string\n \"\"\"\n if not self.verify_ip(cidr):\n msg = 'Invalid CIDR {0}'.format(cidr)\n raise InvalidIPException(msg)\n\n net = netaddr.IPNetwork(cidr)\n increment = random.randint(1, net.size - 2)\n ip = str(netaddr.IPAddress(net.first + int(increment)))\n\n return ip\n\n def get_next_ip(self, cidr, num=0):\n \"\"\"\n @summary: gets the IP address of a CIDR starting at the first IP\n @param cidr: represents IP range to get the IP from and should be in\n the form <network_address>/<prefix>\n @type cidr: string\n @param num: number of places from the first IP of the CIDR\n @type num: int\n @return: IP address\n @rtype: string\n \"\"\"\n if not self.verify_ip(cidr):\n msg = 'Invalid CIDR {0}'.format(cidr)\n raise InvalidIPException(msg)\n\n net = netaddr.IPNetwork(cidr)\n\n if num < net.size and num >= 0:\n ip = str(netaddr.IPAddress(net.first + int(num)))\n else:\n msg = ('Invalid next value. Expected value greater than 0 and less'\n ' than the network size of {0}').format(net.size)\n raise InvalidIPException(msg)\n\n return ip\n\n def get_previous_ip(self, cidr, num=0):\n \"\"\"\n @summary: gets an IP address within a CIDR from the last IP\n @param cidr: represents IP range to get the IP from and should be in\n the form <network_address>/<prefix>\n @type cidr: string\n @param num: number of places from the last IP of the CIDR\n @type num: int\n @return: IP address\n @rtype: string\n \"\"\"\n\n if not self.verify_ip(cidr):\n msg = 'Invalid CIDR {0}'.format(cidr)\n raise InvalidIPException(msg)\n\n net = netaddr.IPNetwork(cidr)\n\n if num < net.size and num >= 0:\n ip = str(netaddr.IPAddress(net.last - int(num)))\n else:\n msg = ('Invalid next value. Expected value greater than 0 and less'\n ' than the network size of {0}').format(net.size)\n raise InvalidIPException(msg)\n\n return ip\n\n def get_ips(self, cidr, num=1):\n \"\"\"\n @summary: get n random IPs within a cidr\n @param cidr: represents IP range to get the IPs from and should be in\n the form <network_address>/<prefix>\n @type cidr: string\n @param num: number of IPs to get\n @type num: int\n @return: IP list\n @rtype: list\n \"\"\"\n ips = [self.get_random_ip(cidr) for x in range(num)]\n return ips\n\n def get_fixed_ip(self, subnet_id, cidr, num=1):\n \"\"\"\n @summary: gets a Subnet fixed IP\n @param subnet_id: Subnet ID\n @type subnet_id: string\n @param cidr: represents IP range to get the IP from and should be in\n the form <network_address>/<prefix>\n @type cidr: string\n @param num: number of places from the first IP of the CIDR for the\n fixed IP address\n @type num: int\n @return: fixed IP\n @rtype: dict\n \"\"\"\n ip = self.get_next_ip(cidr=cidr, num=num)\n return dict(subnet_id=subnet_id, ip_address=ip)\n\n def get_fixed_ips(self, subnet, num=1, timeout=None):\n \"\"\"\n @summary: generates multiple fixed ips within a subnet\n @param subnet: subnet entity object\n @type subnet: models.response.subnet.Subnet\n @param num: number of fixed IPs to get\n @type num: int\n @param timeout: timeout for replacing duplicate IPs\n @type timeout: int\n @return: fixed IPs\n @rtype: list\n \"\"\"\n cidr = subnet.cidr\n ips = self.get_ips(cidr=cidr, num=num)\n ips_count = len(ips)\n\n # Removing duplicate IPs in case of any and trying to replace\n ips = list(set(ips))\n if ips_count != len(ips):\n duplicate_count = ips_count - len(ips)\n timeout = timeout or self.config.resource_get_timeout\n endtime = time.time() + int(timeout)\n while duplicate_count > 0 and time.time() < endtime:\n new_ip = self.get_random_ip(cidr)\n if new_ip not in ips:\n ips.append(new_ip)\n duplicate_count -= 1\n fixed_ips = [dict(subnet_id=subnet.id, ip_address=ip) for ip in ips]\n return fixed_ips\n\n def get_allocation_pool(self, cidr, first_increment=1, last_decrement=1,\n start_increment=None, end_increment=None):\n \"\"\"\n @summary: gets default allocation pool for an IPv4/IPv6 address\n @param cidr: represents IP range for the subnet and should be in the\n form <network_address>/<prefix>\n @type cidr: string\n @param first_increment: places from the fist IP of the CIDR to the\n first IP of the allocation pool\n @type first_increment: int\n @param last_decrement: places from the last IP of the CIDR to the last\n IP of the allocation pool\n @type last_decrement: int\n @param start_increment: if given, start IP of allocation pool\n @type start_increment: int\n @param end_increment: if given, end IP of allocation pool\n @type end_increment: int\n @return: allocation pool\n @rtype: dict\n \"\"\"\n\n if not self.verify_ip(cidr):\n raise InvalidIPException\n net = netaddr.IPNetwork(cidr)\n\n if start_increment and end_increment:\n first_ip = str(netaddr.IPAddress(net.first + start_increment))\n last_ip = str(netaddr.IPAddress(net.first + end_increment))\n else:\n first_ip = str(netaddr.IPAddress(net.first + first_increment))\n last_ip = str(netaddr.IPAddress(net.last - last_decrement))\n\n return dict(start=first_ip, end=last_ip)\n\n def get_allocation_pools(self, cidr, start_increment, ip_range, interval,\n num):\n \"\"\"\n @summary: Generates allocation pools subnet data\n @param cidr: cidr for allocation pools\n @type cidr: string\n @param start_increment: increment from first cidr address to first\n allocation pool IP address\n @type start_increment: int\n @param ip_range: ip addresses from start IP to end IP of allocation\n pool\n @type ip_range: int\n @param interval: ip addresses from end of allocation pool to start IP\n of the next allocation pool (if multiple)\n @type interval: int\n @param num: number of allocation pools to create within the cidr\n @type num: int\n @return: allocation pools\n @rtype: list\n \"\"\"\n allocation_pools = []\n for _ in range(num):\n end_increment = start_increment + ip_range\n allocation_pool = self.get_allocation_pool(cidr=cidr,\n start_increment=start_increment, end_increment=end_increment)\n allocation_pools.append(allocation_pool)\n start_increment = end_increment + interval\n return allocation_pools\n\n def get_host_routes(self, cidr, ips):\n \"\"\"\n @summary: create 1 or more host routes\n @param cidr: host_route destination CIDR\n @type cidr: string\n @param ips: host_routes nexthops\n @type ips: list(str)\n \"\"\"\n host_routes = [dict(destination=cidr, nexthop=ip) for ip in ips]\n return host_routes\n\n def format_dns_nameservers(self, dns_nameservers):\n \"\"\"\n @summary: formats dns_nameservers for assertions removing zeros on\n IPv6 addresses\n @param dns_nameservers: list of dns_nameservers\n @type dns_nameservers: list(str)\n @return: formated dns_nameservers\n @rtype: list(str)\n \"\"\"\n dns_ns = [str(netaddr.IPAddress(svr)) for svr in dns_nameservers]\n return dns_ns\n\n def format_allocation_pools(self, allocation_pools):\n \"\"\"\n @summary: formats allocation pools for assertions removing zeros on\n IPv6 addresses\n @param allocation_pools: list of allocation pools\n @type allocation_pools: list(dict)\n @return: formated allocation pools\n @rtype: list(dict)\n \"\"\"\n formated_allocation_pools = []\n for pool in allocation_pools:\n result = dict(start=str(netaddr.IPAddress(pool['start'])),\n end=str(netaddr.IPAddress(pool['end'])))\n formated_allocation_pools.append(result)\n return formated_allocation_pools\n\n def create_subnet(self, network_id, ip_version=None, cidr=None, name=None,\n tenant_id=None, gateway_ip=None, dns_nameservers=None,\n allocation_pools=None, host_routes=None,\n enable_dhcp=None, resource_build_attempts=None,\n raise_exception=True, use_exact_name=False,\n poll_interval=None):\n \"\"\"\n @summary: Creates and verifies a Subnet is created as expected\n @param name: human readable name for the subnet, may not be unique.\n (CRUD: CRU)\n @type name: string\n @param tenant_id: owner of the network. (CRUD: CR)\n @type tenant_id: string\n @param network_id: network subnet is associated with (CRUD: CR)\n @type network_id: string\n @param ip_version: IP version 4 or 6 (CRUD: CR), if the CIDR is given\n this is optional and the CIDR one will be taken\n @type ip_version: int\n @param cidr: represents IP range for the subnet and should be in the\n form <network_address>/<prefix> (CRUD: CR)\n @type cidr: string\n @param gateway_ip: default gateway used by devices in the subnet\n (CRUD: CRUD)\n @type gateway_ip: string\n @param dns_nameservers: DNS name servers used by subnet hosts\n (CRUD: CRU)\n @type dns_nameservers: list(str)\n @param allocation_pools: sub range of cidr available for dynamic\n allocation to ports (CRUD: CR)\n @type allocation_pools: list(dict)\n @param host_routes: routes that should be used by devices with IPs from\n this subnet (does not includes the local route, CRUD: CRU)\n @type host_routes: list(dict)\n @param enable_dhcp: whether DHCP is enabled (CRUD:CRU)\n @type enable_dhcp: bool\n @param resource_build_attempts: number of API retries\n @type resource_build_attempts:int\n @param raise_exception: flag to raise an exception if the Subnet was\n not created or to return None\n @type raise_exception: bool\n @param use_exact_name: flag if the exact name given should be used\n @type use_exact_name: bool\n @param poll_interval: sleep time interval between API retries\n @type poll_interval: int\n @return: NetworkingResponse object with api response and failure list\n @rtype: common.behaviors.NetworkingResponse\n \"\"\"\n if not network_id:\n raise NetworkIDMissingException\n if cidr:\n if self.verify_ip(cidr):\n ip_version = IPy.IP(cidr).version()\n else:\n raise InvalidIPException\n else:\n if ip_version == 6:\n cidr = self.create_ipv6_cidr()\n else:\n\n # Setting the default create version to 4 if not given\n ip_version = 4\n cidr = self.create_ipv4_cidr()\n\n if name is None:\n name = rand_name(self.config.starts_with_name)\n elif not use_exact_name:\n name = rand_name(name)\n\n poll_interval = poll_interval or self.config.api_poll_interval\n resource_build_attempts = (resource_build_attempts or\n self.config.api_retries)\n\n result = NetworkingResponse()\n err_msg = 'Subnet Create failure'\n for attempt in range(resource_build_attempts):\n self._log.debug('Attempt {0} of {1} building subnet {2}'.format(\n attempt + 1, resource_build_attempts, name))\n\n resp = self.client.create_subnet(\n network_id=network_id, ip_version=ip_version, cidr=cidr,\n name=name, tenant_id=tenant_id, gateway_ip=gateway_ip,\n dns_nameservers=dns_nameservers,\n allocation_pools=allocation_pools, host_routes=host_routes,\n enable_dhcp=enable_dhcp)\n\n resp_check = self.check_response(resp=resp,\n status_code=NeutronResponseCodes.CREATE_SUBNET, label=name,\n message=err_msg, network_id=network_id)\n\n result.response = resp\n if not resp_check:\n return result\n\n # Failures will be an empty list if the update was successful the\n # first time\n result.failures.append(resp_check)\n time.sleep(poll_interval)\n\n else:\n err_msg = (\n 'Unable to create {0} subnet after {1} attempts: '\n '{2}').format(name, resource_build_attempts, result.failures)\n self._log.error(err_msg)\n if raise_exception:\n raise ResourceBuildException(err_msg)\n return result\n\n def update_subnet(self, subnet_id, name=None, gateway_ip=None,\n dns_nameservers=None, host_routes=None,\n enable_dhcp=None, allocation_pools=None,\n resource_update_attempts=None, raise_exception=False,\n poll_interval=None):\n \"\"\"\n @summary: Updates and verifies a specified Subnet\n @param subnet_id: The UUID for the subnet\n @type subnet_id: string\n @param name: human readable name for the subnet, may not be unique\n (CRUD: CRU)\n @type name: string\n @param gateway_ip: default gateway used by devices in the subnet\n (CRUD: CRUD)\n @type gateway_ip: string\n @param dns_nameservers: DNS name servers used by subnet hosts\n (CRUD: CRU)\n @type dns_nameservers: list(str)\n @param host_routes: routes that should be used by devices with IPs\n from this subnet (does not includes the local route (CRUD: CRU)\n @type host_routes: list(dict)\n @param enable_dhcp: whether DHCP is enabled (CRUD:CRU)\n @type enable_dhcp: bool\n @param allocation_pools: sub range of cidr available for dynamic\n allocation to ports (CRUD: CRU)\n @type allocation_pools: list(dict)\n @param resource_update_attempts: number of API retries\n @type resource_update_attempts: int\n @param raise_exception: flag to raise an exception if the\n Subnet was not updated or to return None\n @type raise_exception: bool\n @param poll_interval: sleep time interval between API retries\n @type poll_interval: int\n @return: NetworkingResponse object with api response and failure list\n @rtype: common.behaviors.NetworkingResponse\n \"\"\"\n poll_interval = poll_interval or self.config.api_poll_interval\n resource_update_attempts = (resource_update_attempts or\n self.config.api_retries)\n\n result = NetworkingResponse()\n err_msg = 'Subnet Update failure'\n for attempt in range(resource_update_attempts):\n self._log.debug('Attempt {0} of {1} updating subnet {2}'.format(\n attempt + 1, resource_update_attempts, subnet_id))\n\n resp = self.client.update_subnet(\n subnet_id=subnet_id, name=name, gateway_ip=gateway_ip,\n dns_nameservers=dns_nameservers, host_routes=host_routes,\n enable_dhcp=enable_dhcp, allocation_pools=allocation_pools)\n\n resp_check = self.check_response(resp=resp,\n status_code=NeutronResponseCodes.UPDATE_SUBNET,\n label=subnet_id, message=err_msg)\n\n result.response = resp\n if not resp_check:\n return result\n\n # Failures will be an empty list if the update was successful the\n # first time\n result.failures.append(resp_check)\n time.sleep(poll_interval)\n\n else:\n err_msg = (\n 'Unable to update {0} subnet after {1} attempts: '\n '{2}').format(subnet_id, resource_update_attempts,\n result.failures)\n self._log.error(err_msg)\n if raise_exception:\n raise ResourceUpdateException(err_msg)\n return result\n\n def get_subnet(self, subnet_id, resource_get_attempts=None,\n raise_exception=False, poll_interval=None):\n \"\"\"\n @summary: Shows and verifies a specified subnet\n @param subnet_id: The UUID for the subnet\n @type subnet_id: string\n @param resource_get_attempts: number of API retries\n @type resource_get_attempts: int\n @param raise_exception: flag to raise an exception if the get\n Subnet was not as expected or to return None\n @type raise_exception: bool\n @param poll_interval: sleep time interval between API retries\n @type poll_interval: int\n @return: NetworkingResponse object with api response and failure list\n @rtype: common.behaviors.NetworkingResponse\n \"\"\"\n poll_interval = poll_interval or self.config.api_poll_interval\n resource_get_attempts = (resource_get_attempts or\n self.config.api_retries)\n\n result = NetworkingResponse()\n err_msg = 'Subnet Get failure'\n for attempt in range(resource_get_attempts):\n self._log.debug('Attempt {0} of {1} getting subnet {2}'.format(\n attempt + 1, resource_get_attempts, subnet_id))\n\n resp = self.client.get_subnet(subnet_id=subnet_id)\n\n resp_check = self.check_response(resp=resp,\n status_code=NeutronResponseCodes.GET_SUBNET,\n label=subnet_id, message=err_msg)\n\n result.response = resp\n if not resp_check:\n return result\n\n # Failures will be an empty list if the get was successful the\n # first time\n result.failures.append(resp_check)\n time.sleep(poll_interval)\n\n else:\n err_msg = (\n 'Unable to GET {0} subnet after {1} attempts: '\n '{2}').format(subnet_id, resource_get_attempts,\n result.failures)\n self._log.error(err_msg)\n if raise_exception:\n raise ResourceGetException(err_msg)\n return result\n\n def list_subnets(self, subnet_id=None, network_id=None, cidr=None,\n tenant_id=None, gateway_ip=None, ip_version=None,\n enable_dhcp=None, name=None, limit=None, marker=None,\n page_reverse=None, resource_list_attempts=None,\n raise_exception=False, poll_interval=None):\n \"\"\"\n @summary: Lists subnets and verifies the response is the expected\n @param subnet_id: subnet ID to filter by\n @type subnet_id: string\n @param network_id: network ID to filter by\n @type network_id: string\n @param cidr: cider to filter by\n @type cidr: string\n @param tenant_id: owner of the network to filter by\n @type tenant_id: string\n @param gateway_ip: gateway_ip to filter by\n @type gateway_ip: string\n @param ip_version: IP version 4 or 6 to filter by\n @type ip_version: int\n @param enable_dhcp: enable_dhcp status to filter by\n @type enable_dhcp: bool\n @param name: subnet name to filter by\n @type name: string\n @param limit: page size\n @type limit: int\n @param marker: Id of the last item of the previous page\n @type marker: string\n @param page_reverse: direction of the page\n @type page_reverse: bool\n @param resource_list_attempts: number of API retries\n @type resource_list_attempts: int\n @param raise_exception: flag to raise an exception if the list\n Subnet was not as expected or to return None\n @type raise_exception: bool\n @param poll_interval: sleep time interval between API retries\n @type poll_interval: int\n @return: NetworkingResponse object with api response and failure list\n @rtype: common.behaviors.NetworkingResponse\n \"\"\"\n poll_interval = poll_interval or self.config.api_poll_interval\n resource_list_attempts = (resource_list_attempts or\n self.config.api_retries)\n\n result = NetworkingResponse()\n err_msg = 'Subnet List failure'\n for attempt in range(resource_list_attempts):\n self._log.debug('Attempt {0} of {1} with subnet list'.format(\n attempt + 1, resource_list_attempts))\n\n resp = self.client.list_subnets(\n subnet_id=subnet_id, network_id=network_id, cidr=cidr,\n tenant_id=tenant_id, gateway_ip=gateway_ip,\n ip_version=ip_version, enable_dhcp=enable_dhcp, name=name,\n limit=limit, marker=marker, page_reverse=page_reverse)\n\n resp_check = self.check_response(resp=resp,\n status_code=NeutronResponseCodes.LIST_SUBNETS,\n label='', message=err_msg)\n\n result.response = resp\n if not resp_check:\n return result\n\n # Failures will be an empty list if the list was successful the\n # first time\n result.failures.append(resp_check)\n time.sleep(poll_interval)\n\n else:\n err_msg = (\n 'Unable to LIST subnets after {0} attempts: '\n '{1}').format(resource_list_attempts, result.failures)\n self._log.error(err_msg)\n if raise_exception:\n raise ResourceListException(err_msg)\n return result\n\n def delete_subnet(self, subnet_id, resource_delete_attempts=None,\n raise_exception=False, poll_interval=None):\n \"\"\"\n @summary: Deletes and verifies a specified subnet is deleted\n @param subnet_id: The UUID for the subnet\n @type subnet_id: string\n @param resource_delete_attempts: number of API retries\n @type resource_delete_attempts: int\n @param raise_exception: flag to raise an exception if the deleted\n Subnet was not as expected or to return None\n @type raise_exception: bool\n @param poll_interval: sleep time interval between API retries\n @type poll_interval: int\n @return: NetworkingResponse object with api response and failure list\n @rtype: common.behaviors.NetworkingResponse\n \"\"\"\n poll_interval = poll_interval or self.config.api_poll_interval\n resource_delete_attempts = (resource_delete_attempts or\n self.config.api_retries)\n\n result = NetworkingResponse()\n for attempt in range(resource_delete_attempts):\n self._log.debug('Attempt {0} of {1} deleting subnet {2}'.format(\n attempt + 1, resource_delete_attempts, subnet_id))\n\n resp = self.client.delete_subnet(subnet_id=subnet_id)\n result.response = resp\n\n # Delete response is without entity so resp_check can not be used\n if (resp.ok and\n resp.status_code == NeutronResponseCodes.DELETE_SUBNET):\n return result\n\n err_msg = ('{subnet} Subnet Delete failure, expected status '\n 'code: {expected_status}. Response: {status} {reason} '\n '{content}').format(\n subnet=subnet_id,\n expected_status=NeutronResponseCodes.DELETE_SUBNET,\n status=resp.status_code, reason=resp.reason,\n content=resp.content)\n self._log.error(err_msg)\n result.failures.append(err_msg)\n time.sleep(poll_interval)\n\n else:\n err_msg = (\n 'Unable to DELETE {0} subnet after {1} attempts: '\n '{2}').format(subnet_id, resource_delete_attempts,\n result.failures)\n self._log.error(err_msg)\n if raise_exception:\n raise ResourceDeleteException(err_msg)\n return result\n\n def clean_subnet(self, subnet_id, timeout=None, poll_interval=None):\n \"\"\"\n @summary: deletes a subnet within a time out\n @param subnet_id: The UUID for the subnet\n @type subnet_id: string\n @param timeout: seconds to wait for the subnet to be deleted\n @type timeout: int\n @param poll_interval: sleep time interval between API delete/get calls\n @type poll_interval: int\n @return: None if delete was successful or the undeleted subnet_id\n @rtype: None or string\n \"\"\"\n timeout = timeout or self.config.resource_delete_timeout\n poll_interval = poll_interval or self.config.api_poll_interval\n endtime = time.time() + int(timeout)\n log_msg = 'Deleting {0} subnet within a {1}s timeout '.format(\n subnet_id, timeout)\n self._log.info(log_msg)\n resp = None\n while time.time() < endtime:\n try:\n self.client.delete_subnet(subnet_id=subnet_id)\n resp = self.client.get_subnet(subnet_id=subnet_id)\n except Exception as err:\n err_msg = ('Encountered an exception deleting a subnet with'\n 'the clean_subnet method. Exception: {0}').format(err)\n self._log.error(err_msg)\n if (resp is not None and\n resp.status_code == NeutronResponseCodes.NOT_FOUND):\n return None\n time.sleep(poll_interval)\n\n err_msg = 'Unable to delete {0} subnet within a {1}s timeout'.format(\n subnet_id, timeout)\n self._log.error(err_msg)\n return subnet_id\n\n def clean_subnets(self, subnets_list, timeout=None, poll_interval=None):\n \"\"\"\n @summary: deletes each subnet from a list calling clean_subnet\n @param subnets_list: list of subnets UUIDs\n @type subnets_list: list(str)\n @param timeout: seconds to wait for the subnet to be deleted\n @type timeout: int\n @param poll_interval: sleep time interval between API delete/get calls\n @type poll_interval: int\n @return: list of undeleted subnets UUIDs\n @rtype: list(str)\n \"\"\"\n log_msg = 'Deleting subnets: {0}'.format(subnets_list)\n self._log.info(log_msg)\n undeleted_subnets = []\n for subnet in subnets_list:\n result = self.clean_subnet(subnet_id=subnet, timeout=timeout,\n poll_interval=poll_interval)\n if result:\n undeleted_subnets.append(result)\n if undeleted_subnets:\n err_msg = 'Unable to delete subnets: {0}'.format(\n undeleted_subnets)\n self._log.error(err_msg)\n return undeleted_subnets\n","sub_path":"cloudcafe/networking/networks/subnets_api/behaviors.py","file_name":"behaviors.py","file_ext":"py","file_size_in_byte":36714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"377770421","text":"#!/usr/bin/python\nimport wx\nimport random\nblues=['#0000ff','#4682b4','#00008b','#00bfff','#0d4f60']\nclass MainFrame(wx.Frame):\n\n def __init__(self):\n wx.Frame.__init__(self, None, title='Led')\n panel = wx.Panel(self)\n self.led1 = wx.StaticBox(panel, wx.ID_ANY, \"\", size=(20,20), pos=(10,10))\n self.led2 = wx.StaticBox(panel, wx.ID_ANY, \"\", size=(20,20), pos=(31,10))\n self.led3 = wx.StaticBox(panel, wx.ID_ANY, \"\", size=(20,20), pos=(52,10))\n self.led4 = wx.StaticBox(panel, wx.ID_ANY, \"\", size=(20,20), pos=(73,10))\n self.led5 = wx.StaticBox(panel, wx.ID_ANY, \"\", size=(20,20), pos=(94,10))\n self.led1.SetBackgroundColour(wx.Colour( 255, 0, 0 ) )\n self.led2.SetBackgroundColour(blues[1])\n self.led3.SetBackgroundColour(blues[2])\n self.led4.SetBackgroundColour(blues[3])\n self.led5.SetBackgroundColour(blues[4])\n self.counter = -5\n self.timer = wx.Timer(self)\n self.timer.Start(1000)\n self.Bind(wx.EVT_TIMER, self.OnTimer)\n self.Show()\n\n def OnTimer(self, evt):\n self.led1.SetBackgroundColour(wx.Colour( 0, 255, 0 ) )\n self.led2.SetBackgroundColour(random.choice(blues))\n self.led3.SetBackgroundColour(random.choice(blues))\n self.led4.SetBackgroundColour(random.choice(blues))\n self.led5.SetBackgroundColour(random.choice(blues))\n\nif __name__ == '__main__':\n app = wx.App()\n frame = MainFrame()\n app.MainLoop()\n","sub_path":"led.py","file_name":"led.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"440937753","text":"import unittest\nimport numpy as np\nimport vigra\nfrom lazyflow.graph import Graph\nfrom ilastik.applets.objectClassification.opObjectClassification import \\\n OpRelabelSegmentation, OpObjectTrain, OpObjectPredict\nfrom ilastik.applets.objectExtraction.opObjectExtraction import \\\n OpRegionFeatures, OpAdaptTimeListRoi\n\nFEATURES = \\\n[\n [ 'Count',\n 'RegionCenter',\n 'Coord<ArgMaxWeight>',\n 'Coord<Minimum>',\n 'Coord<Maximum>' ],\n []\n]\n\n\ndef segImage():\n img = np.zeros((2, 50, 50, 50, 1), dtype=np.int)\n img[0, 0:10, 0:10, 0:10, 0] = 1\n img[0, 20:25, 20:25, 20:25, 0] = 2\n img[1, 0:10, 0:10, 0:10, 0] = 1\n img[1, 10:20, 10:20, 10:20, 0] = 2\n img[1, 20:25, 20:25, 20:25, 0] = 3\n \n img = img.view(vigra.VigraArray)\n img.axistags = vigra.defaultAxistags('txyzc') \n return img\n\n\nclass TestOpRelabelSegmentation(unittest.TestCase):\n def setUp(self):\n g = Graph()\n self.op = OpRelabelSegmentation(graph=g)\n\n def test(self):\n segimg = segImage()\n map_ = {0 : np.array([10, 20, 30]),\n 1 : np.array([40, 50, 60, 70])}\n self.op.Image.setValue(segimg)\n self.op.ObjectMap.setValue(map_)\n self.op.Features._setReady() # hack because we do not use features\n img = self.op.Output.value\n\n self.assertEquals(img[0, 49, 49, 49, 0], 10)\n self.assertEquals(img[1, 49, 49, 49, 0], 40)\n self.assertTrue(np.all(img[0, 0:10, 0:10, 0:10, 0] == 20))\n self.assertTrue(np.all(img[0, 20:25, 20:25, 20:25, 0] == 30))\n self.assertTrue(np.all(img[1, 0:10, 0:10, 0:10, 0] == 50))\n self.assertTrue(np.all(img[1, 10:20, 10:20, 10:20, 0] == 60))\n self.assertTrue(np.all(img[1, 20:25, 20:25, 20:25, 0] == 70))\n\n\nclass TestOpObjectTrain(unittest.TestCase):\n def setUp(self):\n segimg = segImage()\n\n rawimg = np.indices(segimg.shape).sum(0).astype(np.float32)\n rawimg = rawimg.view(vigra.VigraArray)\n rawimg.axistags = vigra.defaultAxistags('txyzc')\n\n g = Graph()\n self.featsop = OpRegionFeatures(FEATURES, graph=g)\n self.featsop.LabelImage.setValue(segimg)\n self.featsop.RawImage.setValue( rawimg )\n\n self._opRegFeatsAdaptOutput = OpAdaptTimeListRoi(graph=g)\n self._opRegFeatsAdaptOutput.Input.connect(self.featsop.Output)\n\n self.op = OpObjectTrain(graph=g)\n self.op.Features.resize(1)\n self.op.Features[0].connect(self._opRegFeatsAdaptOutput.Output)\n self.op.FixClassifier.setValue(False)\n self.op.ForestCount.setValue(1)\n\n def test_train(self):\n labels = {0 : np.array([0, 1, 2]),\n 1 : np.array([0, 1, 1, 2])}\n self.op.Labels.resize(1)\n self.op.Labels.setValue(labels)\n \n assert self.op.Classifier.ready()\n\nclass TestOpObjectPredict(unittest.TestCase):\n def setUp(self):\n segimg = segImage()\n labels = {0 : np.array([0, 1, 2]),\n 1 : np.array([0, 0, 0, 0,])}\n\n rawimg = np.indices(segimg.shape).sum(0).astype(np.float32)\n rawimg = rawimg.view(vigra.VigraArray)\n rawimg.axistags = vigra.defaultAxistags('txyzc')\n\n g = Graph()\n self.featsop = OpRegionFeatures(FEATURES, graph=g)\n self.featsop.LabelImage.setValue(segimg)\n self.featsop.RawImage.setValue( rawimg )\n assert self.featsop.Output.ready()\n\n self._opRegFeatsAdaptOutput = OpAdaptTimeListRoi(graph=g)\n self._opRegFeatsAdaptOutput.Input.connect(self.featsop.Output)\n assert self._opRegFeatsAdaptOutput.Output.ready()\n\n self.trainop = OpObjectTrain(graph=g)\n self.trainop.Features.resize(1)\n self.trainop.Features[0].connect(self._opRegFeatsAdaptOutput.Output)\n self.trainop.Labels.resize(1)\n self.trainop.Labels.setValues([labels])\n self.trainop.FixClassifier.setValue(False)\n self.trainop.ForestCount.setValue(1)\n assert self.trainop.Classifier.ready()\n\n self.op = OpObjectPredict(graph=g)\n self.op.Classifier.connect(self.trainop.Classifier)\n self.op.Features.connect(self._opRegFeatsAdaptOutput.Output)\n assert self.op.Predictions.ready()\n\n def test_train(self):\n preds = self.op.Predictions([0, 1]).wait()\n self.assertTrue(np.all(preds[0] == np.array([0, 1, 2])))\n self.assertTrue(np.all(preds[1] == np.array([0, 1, 1, 2])))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_applets/objectClassification/testOperators.py","file_name":"testOperators.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"446651402","text":"# Copyright (c) 2015 Rackspace, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport uuid\n\nimport ddt\nfrom hypothesis import given\nfrom hypothesis import strategies\n\nfrom tests.functional.transport.pecan import base\n\n\n@ddt.ddt\nclass TestServicesState(base.FunctionalTest):\n\n def setUp(self):\n super(TestServicesState, self).setUp()\n\n self.project_id = str(uuid.uuid4())\n self.service_name = str(uuid.uuid1())\n self.flavor_id = str(uuid.uuid1())\n\n # create a mock flavor to be used by new service creations\n flavor_json = {\n \"id\": self.flavor_id,\n \"providers\": [\n {\n \"provider\": \"mock\",\n \"links\": [\n {\n \"href\": \"http://mock.cdn\",\n \"rel\": \"provider_url\"\n }\n ]\n }\n ]\n }\n response = self.app.post('/v1.0/flavors',\n params=json.dumps(flavor_json),\n headers={\n \"Content-Type\": \"application/json\",\n \"X-Project-ID\": self.project_id})\n\n self.assertEqual(201, response.status_code)\n\n # create an initial service to be used by the tests\n self.service_json = {\n \"name\": self.service_name,\n \"domains\": [\n {\"domain\": \"test.mocksite.com\"},\n {\"domain\": \"blog.mocksite.com\"}\n ],\n \"origins\": [\n {\n \"origin\": \"mocksite.com\",\n \"port\": 80,\n \"ssl\": False\n }\n ],\n \"flavor_id\": self.flavor_id,\n \"caching\": [\n {\n \"name\": \"default\",\n \"ttl\": 3600\n }\n ],\n \"restrictions\": [\n {\n \"name\": \"website only\",\n \"type\": \"whitelist\",\n \"rules\": [\n {\n \"name\": \"mocksite.com\",\n \"referrer\": \"www.mocksite.com\"\n }\n ]\n }\n ]\n }\n\n response = self.app.post('/v1.0/services',\n params=json.dumps(self.service_json),\n headers={\n 'Content-Type': 'application/json',\n 'X-Project-ID': self.project_id})\n self.assertEqual(202, response.status_code)\n self.assertTrue('Location' in response.headers)\n\n self.service_id = (response.headers['Location']\n [response.headers['Location'].rfind('/') + 1:])\n\n self.req_body = {\n 'project_id': self.project_id,\n 'service_id': self.service_id,\n }\n\n @ddt.data(u'deployed', u'failed')\n def test_services_state_valid_states(self, status):\n response = self.app.get(\n '/v1.0/services/{0}'.format(self.service_id),\n headers={'X-Project-ID': self.project_id}\n )\n\n self.assertEqual(200, response.status_code)\n\n self.req_body['status'] = status\n response = self.app.post(\n '/v1.0/admin/services/status',\n params=json.dumps(self.req_body),\n headers={'Content-Type': 'application/json',\n 'X-Project-ID': str(uuid.uuid4())})\n\n self.assertEqual(response.status_code, 201)\n\n @given(strategies.text())\n def test_services_state_invalid_states(self, status):\n # invalid status field\n self.req_body['status'] = status\n response = self.app.post(\n '/v1.0/admin/services/status',\n params=json.dumps(self.req_body),\n headers={'Content-Type': 'application/json',\n 'X-Project-ID': str(uuid.uuid4())},\n expect_errors=True)\n\n self.assertEqual(response.status_code, 400)\n\n @given(strategies.text())\n def test_services_state_invalid_service_id(self, service_id):\n # invalid service_id field\n self.req_body['status'] = 'deployed'\n self.req_body['service_id'] = service_id\n response = self.app.post(\n '/v1.0/admin/services/status',\n params=json.dumps(self.req_body),\n headers={'Content-Type': 'application/json',\n 'X-Project-ID': str(uuid.uuid4())},\n expect_errors=True)\n\n self.assertEqual(response.status_code, 400)\n\n def test_services_state_invalid_project_id(self):\n # NOTE(TheSriram): the min size is assigned to 257, since\n # project_id regex allows up to 256 chars\n # invalid project_id field\n project_id = '_'.join([str(uuid.uuid4()) for i in range(7)])\n self.assertTrue(len(project_id) > 256)\n self.req_body['project_id'] = project_id\n self.req_body['status'] = 'deployed'\n response = self.app.post(\n '/v1.0/admin/services/status',\n params=json.dumps(self.req_body),\n headers={'Content-Type': 'application/json',\n 'X-Project-ID': str(uuid.uuid4())},\n expect_errors=True)\n\n self.assertEqual(response.status_code, 400)\n","sub_path":"tests/functional/transport/pecan/controllers/test_set_service_status.py","file_name":"test_set_service_status.py","file_ext":"py","file_size_in_byte":5923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"564297139","text":"from math import pi\n\ndef área_círculo(rad):\n area = pi*rad**2\n return area\ndef longitud_circunferencia(rad):\n long = 2 * pi * rad\n return long\n\nradio = float(input(\"Introduce el radio: \"))\narea = área_círculo(radio)\nlongitud = longitud_circunferencia(radio)\n\nprint(\"Área: {0:.2f}\".format(area),\"\\n\"\"Longitud: {0:.2f}\".format(longitud))","sub_path":"Prac3/ej01.py","file_name":"ej01.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"419975320","text":"import pandas as pd\nimport sys\nimport os\n\ndef reorder_index(team_csv):\n location = sys.path[0] + '\\\\teamcsvs'\n team_df = pd.DataFrame.from_csv(location + '\\\\' + team_csv)\n cols_df = pd.DataFrame.from_csv(location + '\\\\ARI.csv')\n cols = cols_df.columns\n team_df = team_df[cols]\n team_df.to_csv(location + '\\\\' + team_csv)\n\nfiles = os.listdir(sys.path[0] + '\\\\teamcsvs')\nfor f in files:\n reorder_index(f)\n","sub_path":"NFL2016/reorder_index.py","file_name":"reorder_index.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"31141422","text":"# The MIT License (MIT)\n# Copyright © 2021 Yuma Rao\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated \n# documentation files (the “Software”), to deal in the Software without restriction, including without limitation \n# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, \n# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of \n# the Software.\n\n# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO\n# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL \n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION \n# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \n# DEALINGS IN THE SOFTWARE.\n\nfrom loguru import logger\nimport torch\n\nclass ModelInformationNotFoundException(Exception):\n pass\n\nclass ModelToolbox:\n def __init__(self, model_class, optimizer_class):\n self.model_class = model_class\n self.optimizer_class = optimizer_class\n\n\n def save_model(self, miner_path, model_info):\n \"\"\"Saves the model locally. \n\n Args:\n model_info (:obj:`dict`, `required`): Dictionary containing the epoch we are saving at, the loss, and the PyTorch model object.\n\n Raises:\n :obj:`ModelInformationNotFoundException`: Raised whenever the loss, epoch, or PyTorch model object is missing from the input dictionary.\n \"\"\"\n try:\n if 'epoch' not in model_info.keys():\n raise ModelInformationNotFoundException(\"Missing 'epoch' in torch save dict\")\n\n if 'loss' not in model_info.keys():\n raise ModelInformationNotFoundException(\"Missing 'loss' in torch save dict\")\n \n if 'model_state_dict' not in model_info.keys():\n raise ModelInformationNotFoundException(\"Missing 'model' in torch save dict\")\n\n if 'optimizer_state_dict' not in model_info.keys():\n raise ModelInformationNotFoundException(\"Missing 'optimizer' in torch save dict\")\n \n logger.info( 'Saving/Serving model: epoch: {}, loss: {}, path: {}/model.torch'.format(model_info['epoch'], model_info['loss'], miner_path))\n torch.save(model_info,\"{}/model.torch\".format(miner_path))\n\n except ModelInformationNotFoundException as e:\n logger.error(\"Encountered exception trying to save model: {}\", e)\n \n def load_model(self, config):\n \"\"\" Loads a model saved by save_model() and returns it. \n\n Returns:\n model (:obj:`torch.nn.Module`) : Model that was saved earlier, loaded back up using the state dict and optimizer. \n optimizer (:obj:`torch.optim`) : Model optimizer that was saved with the model.\n \"\"\"\n model = self.model_class( config )\n optimizer = self.optimizer_class(model.parameters(), lr = config.miner.learning_rate, momentum=config.miner.momentum)\n \n try:\n checkpoint = torch.load(\"{}/model.torch\".format(config.miner.full_path))\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n epoch = checkpoint['epoch']\n loss = checkpoint['loss']\n\n logger.info( 'Reloaded model: epoch: {}, loss: {}, path: {}/model.torch'.format(epoch, loss, config.miner.full_path))\n except Exception as e:\n logger.warning ( 'Exception {}. Could not find model in path: {}/model.torch', e, config.miner.full_path )\n\n\n return model, optimizer\n\n\n","sub_path":"bittensor/utils/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"202656336","text":"\"\"\"\nNeigbor joining algorithm.\n\nTODO: find_lowest_score() returns a different (but still correct)\n value every time its called because qmatrix is a dictionary.\n Try make it consistent, or maybe don't? Not sure if it effects\n anything (later on). sorted() does sort letters alphabetically,\n so keep that in mind.\n\nTODO: Make the tree with PyGame.\n\"\"\"\nfrom pprint import pprint\n\ndef create_distancematrix(otus, data):\n \"\"\"\n otus = string or list with otus e.g. 'ABCDE'\n data = tuple/list containging tuples/lists. Each\n of these tuples/lists represents a row\n of the distance matrix.\n\n For example:\n create_distancematrix('AB', ((0, 4), (4, 0))) makes:\n \n | A | B\n --- --- ---\n A | 0 | 4\n --- --- ---\n B | 4 | 0\n\n The function returns the table in a dictionary of \n dictionaries. So folowing the example, the return\n value looks like this:\n {\n 'A': { # row \n 'A': 0, # column\n 'B': 4,\n },\n 'B': {\n 'A': 4,\n 'B': 0\n }\n } \n\n Feel free to make the distancematrix yourself.\n \"\"\"\n distancematrix = {}\n for otu_row, row_data in zip(otus, data):\n distancematrix[otu_row] = {}\n for otu_col, column_data in zip(otus, row_data):\n distancematrix[otu_row][otu_col] = column_data\n\n return distancematrix\n\ndef prettify_matrix(distancematrix):\n \"\"\"\n \"\"\"\n otus = list(distancematrix.keys())\n number_of_stripes = len(otus)+1\n\n # Header\n matrix = ' '\n for otu in otus:\n matrix += '| {} '.format(otu)\n matrix += '\\n'\n matrix += '{}\\n'.format('--- ' * number_of_stripes) \n \n # The rows\n for otu_row in otus:\n matrix += ' {} '.format(otu_row)\n for otu_col in otus[:-1]:\n matrix += '| {} '.format(distancematrix[otu_row][otu_col])\n matrix += '| {}\\n'.format(distancematrix[otu_row][otus[-1]])\n matrix += '{}\\n'.format('--- ' * number_of_stripes) \n\n return matrix\n\ndef calculate_net_divergence(distancematrix):\n \"\"\"\n Returns a dictionary with the net divergence of each of\n the OTU in the distancematrix. The net divergence is \n simply calculated by adding together all the distances of \n an OTU relative to all the other OTUs.\n\n For example, the distancematrix \n | A | B | C\n --- --- --- ---\n A | 0 | 4 | 6\n --- --- --- ---\n B | 4 | 0 | 9\n --- --- --- ---\n C | 6 | 9 | 0\n\n returns the following dictionary:\n\n {\n 'A': 10, # 0 + 4 + 6\n 'B': 13, # 4 + 0 + 9\n 'C': 15 # 6 + 9 + 0\n }\n \"\"\"\n net_divergence = {}\n for otu_row, row in distancematrix.items():\n net_divergence[otu_row] = 0\n for distance in row.values():\n net_divergence[otu_row] += distance\n\n return net_divergence\n\ndef create_qmatrix(distancematrix, net_divergence=None):\n \"\"\"\n Creates a Q-matrix from a distance matrix. Return \n value has the same structure as distancematrix.\n\n Formula: \n Q(i, j) = (n-2) * d(i, j) - SUM(d(i, k)) - SUM(d(j, k))\n Where\n Q(i, j) = The value for OTUs i and j in de Q-matrix.\n n = Number of OTUs.\n d(i, j) = The value of i, j from the distance matrix.\n SUM() = This is supposed to represent Sigma.\n d(i, k) = The net divergence of i.\n\n When an OTU gets grouped(?) with itself, the value is\n automatically 0, since something can't have a distance\n between itself.\n \"\"\"\n if net_divergence is None:\n net_divergence = calculate_net_divergence(distancematrix)\n n = len(net_divergence)\n\n qmatrix = {}\n for otu_row, row in distancematrix.items():\n qmatrix[otu_row] = {}\n for otu_col, distance in row.items():\n if otu_row == otu_col:\n qmatrix[otu_row][otu_col] = 0\n continue \n netdiv_i = net_divergence[otu_row] # SUM(d(i, k))\n netdiv_j = net_divergence[otu_col] # SUM(d(j, k))\n value = (n - 2) * distance - netdiv_i - netdiv_j \n qmatrix[otu_row][otu_col] = value\n\n return qmatrix\n\ndef find_lowest_score(qmatrix):\n \"\"\"\n Finds the OTU set with the lowest score in a Q-matrix. Returns\n a tuple containing the two OTUs. For example:\n ('B', 'D')\n \"\"\"\n # Makes a tuple with the first two OTUs.\n lowest = (list(qmatrix.keys())[0],) * 2\n for otu_row, row in qmatrix.items():\n for otu_col, value in row.items():\n if otu_row == otu_col:\n continue \n if value < qmatrix[lowest[0]][lowest[1]]:\n lowest = (otu_row, otu_col)\n\n return lowest\n\ndef calculate_distance_nodes(distancematrix, net_divergence, lowest):\n \"\"\"\n The new node that gets created is called U. This function finds\n the distance between lowest[0] and U -and- lowest[1] and U. To\n find these, we need the following formulas:\n\n S(i, u) = 0,5 * d(i, j) + 1/(2 * (n-2)) * (SUM(d(i, k)) - SUM(d(j, k)))\n To find the distance between I and U.\n i = lowest[0] and j = lowest[1]\n d(i, j) = The value of i, j from the distance matrix.\n n = The number of OTUs in the distance matrix.\n SUM() = This is supposed to represent Sigma.\n d(i, k) = The net divergence of i, so net_divergence[lowest[i]]\n S(j, u) = d(i, j) - S(i, u)\n To find the distance between J and U.\n \"\"\"\n n = len(net_divergence)\n i, j = lowest\n\n # The distance between I to U.\n i_to_u = distancematrix[i][j] / 2 # PEP8..\n i_to_u += 1/(2 * (n-2)) * (net_divergence[i] - net_divergence[j])\n\n # The distance between J to U.\n j_to_u = distancematrix[i][j] - i_to_u\n\n return {i: i_to_u, j: j_to_u}\n\ndef create_new_distancematrix(distancematrix, distance_nodes, new_node,\n old_otus, old_data):\n \"\"\"\n new_node is the name of the node which is being created this loop.\n This is necessairy because if we keep using the name \"U\", after the\n second loop the distance matrix is going to have 2 OTUs named U, which\n is going to wreck the whole thing. I'd also rather not call it IJ, AB\n EF, etc. because this program may or may not get confused when one of\n the OTUs consists of two letters. \n\n Formula:\n d(u, k) = 0,5[d(i, k) + d(j, k) - d(i, j)]\n \"\"\"\n new_distancematrix = {}\n\n # We only need to calculate the distance to the the \"old\" OTUs.\n allowed_otus = [otu for otu in old_otus \n if otu not in distance_nodes.keys()]\n\n # Start by adding the data that does not change.\n for otu_row, row in distancematrix.items():\n if not otu_row in allowed_otus:\n continue\n new_distancematrix[otu_row] = {}\n for otu_col, distance in row.items():\n if otu_col in allowed_otus:\n new_distancematrix[otu_row][otu_col] = distance\n\n # Add the new data.\n i, j = distance_nodes.keys()\n new_distancematrix[new_node] = {}\n for allowed_otu in allowed_otus:\n d = (distancematrix[i][allowed_otu] + distancematrix[j][allowed_otu]\n - distancematrix[i][j]) / 2\n new_distancematrix[new_node][allowed_otu] = int(d)\n new_distancematrix[allowed_otu][new_node] = int(d)\n new_distancematrix[new_node][new_node] = 0\n\n return new_distancematrix\n\ndef main():\n otus = 'ABC'\n data = (\n (0, 4, 6),\n (4, 0, 9),\n (6, 9, 0)\n )\n\n otus = 'ABCDEF'\n data = (\n (0, 5, 4, 7, 6, 8),\n (5, 0, 7, 10, 9, 11),\n (4, 7, 0, 7, 6, 8),\n (7, 10, 7, 0, 5, 9),\n (6, 9, 6, 5, 0, 8),\n (8, 11, 8, 9, 8, 0)\n )\n\n # The names of the new nodes.\n new_nodes = 'UVWXYZ'\n\n distancematrix = create_distancematrix(otus, data)\n for new_node in new_nodes:\n print(prettify_matrix(distancematrix))\n\n if len(otus) == 2:\n # All distances are calculated.\n break\n\n net_divergence = calculate_net_divergence(distancematrix)\n qmatrix = create_qmatrix(distancematrix, net_divergence)\n lowest_otus = find_lowest_score(qmatrix)\n distance_nodes = calculate_distance_nodes(distancematrix, \n net_divergence, lowest_otus)\n\n # After each loop, the distancematrix (and thus the otus too)\n # changes, so we need to update this info.\n distancematrix = create_new_distancematrix(distancematrix, \n distance_nodes, new_node, otus, data)\n otus = ''.join(list(distancematrix.keys()))\n\nif __name__ == '__main__':\n main()","sub_path":"neighbor_joining.py","file_name":"neighbor_joining.py","file_ext":"py","file_size_in_byte":8563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"531830100","text":"import uuid\nfrom unittest.mock import patch\n\nfrom django.test import TestCase\n\nfrom pillow_retry.models import PillowError\nfrom pillowtop.es_utils import initialize_index_and_mapping\n\nfrom corehq.apps.es import CaseES, CaseSearchES\nfrom corehq.apps.es.tests.utils import es_test\nfrom corehq.elastic import get_es_new\nfrom corehq.form_processor.interfaces.dbaccessors import CaseAccessors\nfrom corehq.form_processor.tests.utils import (\n FormProcessorTestUtils,\n run_with_all_backends,\n)\nfrom corehq.pillows.mappings.case_mapping import CASE_INDEX_INFO\nfrom corehq.pillows.mappings.case_search_mapping import CASE_SEARCH_INDEX_INFO\nfrom corehq.util.elastic import ensure_index_deleted\nfrom corehq.util.es.elasticsearch import ConnectionError\nfrom corehq.util.test_utils import create_and_save_a_case, trap_extra_setup\nfrom testapps.test_pillowtop.utils import process_pillow_changes\n\n\n@es_test\nclass CasePillowTest(TestCase):\n domain = 'case-pillowtest-domain'\n\n @classmethod\n def setUpClass(cls):\n super(CasePillowTest, cls).setUpClass()\n ensure_index_deleted(CASE_INDEX_INFO.index)\n ensure_index_deleted(CASE_SEARCH_INDEX_INFO.index)\n\n def setUp(self):\n super(CasePillowTest, self).setUp()\n self.process_case_changes = process_pillow_changes('DefaultChangeFeedPillow')\n self.process_case_changes.add_pillow('case-pillow', {'skip_ucr': True})\n FormProcessorTestUtils.delete_all_cases()\n with trap_extra_setup(ConnectionError):\n self.elasticsearch = get_es_new()\n initialize_index_and_mapping(self.elasticsearch, CASE_INDEX_INFO)\n initialize_index_and_mapping(self.elasticsearch, CASE_SEARCH_INDEX_INFO)\n\n def tearDown(self):\n ensure_index_deleted(CASE_INDEX_INFO.index)\n ensure_index_deleted(CASE_SEARCH_INDEX_INFO.index)\n FormProcessorTestUtils.delete_all_cases_forms_ledgers(self.domain)\n PillowError.objects.all().delete()\n super(CasePillowTest, self).tearDown()\n\n @run_with_all_backends\n def test_case_pillow(self):\n case_id, case_name = self._create_case_and_sync_to_es()\n\n # confirm change made it to elasticserach\n results = CaseES().run()\n self.assertEqual(1, results.total)\n case_doc = results.hits[0]\n self.assertEqual(self.domain, case_doc['domain'])\n self.assertEqual(case_id, case_doc['_id'])\n self.assertEqual(case_name, case_doc['name'])\n\n @run_with_all_backends\n def test_case_pillow_error_in_case_es(self):\n self.assertEqual(0, PillowError.objects.filter(pillow='case-pillow').count())\n with patch('corehq.pillows.case_search.domain_needs_search_index', return_value=True), \\\n patch('corehq.pillows.case.transform_case_for_elasticsearch') as case_transform, \\\n patch('corehq.pillows.case_search.transform_case_for_elasticsearch') as case_search_transform:\n case_transform.side_effect = Exception('case_transform error')\n case_search_transform.side_effect = Exception('case_search_transform error')\n case_id, case_name = self._create_case_and_sync_to_es()\n\n # confirm change did not make it to case search index\n results = CaseSearchES().run()\n self.assertEqual(0, results.total)\n\n # confirm change did not make it to case index\n results = CaseES().run()\n self.assertEqual(0, results.total)\n\n self.assertEqual(1, PillowError.objects.filter(pillow='case-pillow').count())\n\n @run_with_all_backends\n def test_case_soft_deletion(self):\n case_id, case_name = self._create_case_and_sync_to_es()\n\n # verify there\n results = CaseES().run()\n self.assertEqual(1, results.total)\n\n # soft delete the case\n with self.process_case_changes:\n CaseAccessors(self.domain).soft_delete_cases([case_id])\n self.elasticsearch.indices.refresh(CASE_INDEX_INFO.index)\n\n # ensure not there anymore\n results = CaseES().run()\n self.assertEqual(0, results.total)\n\n def _create_case_and_sync_to_es(self):\n case_id = uuid.uuid4().hex\n case_name = 'case-name-{}'.format(uuid.uuid4().hex)\n with self.process_case_changes:\n create_and_save_a_case(self.domain, case_id, case_name)\n self.elasticsearch.indices.refresh(CASE_INDEX_INFO.index)\n self.elasticsearch.indices.refresh(CASE_SEARCH_INDEX_INFO.index)\n return case_id, case_name\n","sub_path":"testapps/test_pillowtop/tests/test_case_pillow.py","file_name":"test_case_pillow.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"632049033","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 3 13:07:33 2019\n\n@author: ererkka\n\"\"\"\n\nDATASTORE_PATH = 'data/datastore.h5'\n\n\nSCENARIO_LENGTH = 52 * 7 * 24\n\nCASES = ['PF', 'PF+STO', 'STO', 'DET+STO', 'DET']\n\nCASE_LABELS = ['PF\\nPF', 'PF\\nSTO', 'STO\\nSTO', 'DET\\nSTO', 'DET\\nDET']\n\nUNITTYPE_NAMES = {\n 'Biomass': 'Biomass boiler',\n 'Gas': 'Gas turbine',\n 'Oil': 'Oil fired',\n 'Wind': 'Wind power',\n 'Hydropower': 'Hydropower',\n 'HydroPumped': 'Pumped hydropower',\n 'HydroPumped_Pump': 'Pumped hydropower (pumping)'\n }\n\nUNITTYPE_LABELS = {\n 'Biomass boiler': 'Biomass\\nboiler',\n 'Gas turbine': 'Gas\\nturbine',\n 'Oil fired': 'Oil\\nfired',\n 'Wind power': 'Wind\\npower',\n 'Hydropower': 'Hydro-\\npower',\n 'Pumped hydropower': 'Pumped\\nhydro',\n 'Pumped hydropower (pumping)': 'Pumped hydro\\n(pumping)'\n }\n\n# https://matplotlib.org/examples/color/named_colors.html\nUNITYPE_COLOURS = {\n 'Biomass boiler': 'saddlebrown',\n 'Gas turbine': 'magenta',\n 'Oil fired': 'grey',\n 'Wind power': 'limegreen',\n 'Pumped hydropower': 'navy',\n 'Pumped hydropower (pumping)': 'navy',\n 'Hydropower': 'dodgerblue' \n }\n\nUNITS_UNITTYPES = {\n 'Biomass1': 'Biomass boiler',\n 'Biomass1CCS': 'Biomass boiler',\n 'Biomass2': 'Biomass boiler',\n 'Biomass2CCS': 'Biomass boiler',\n 'Gas': 'Gas turbine',\n 'GasCCS': 'Gas turbine',\n 'Oil': 'Oil fired',\n 'OilCCS': 'Oil fired'}\n\nNODES = {'83NO': 'NORGEMIDT', \n '38DK': 'DANM-VEST', \n '79NO': 'SORLAND', \n '80NO': 'TELEMARK'\n }\n","sub_path":"src/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"488359154","text":"#!/usr/bin/env python3 \r\n#-*- coding: utf-8 -*-\r\n\r\nfrom config import INFO, LINE, WARNING\r\nfrom exploits.netwave import netwaveCam\r\nimport requests\r\nimport os, sys\r\n\r\nrequests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)\r\n\r\n\r\nexploits = {\r\n \"Netcam\": \"anony/mjpg.cgi\",\r\n \"Netwave\": \"/proc/kcore\"\r\n}\r\n\r\n\r\n\r\ndef check_exploit(camera, url, s):\r\n print(\"\\n{}Exploit\\n\".format(INFO))\r\n for exploit in exploits:\r\n if camera == exploit and camera in [\"Netcam\", \"Netwave\"]:\r\n print(\"{}Testing vulnerabilitie {} for {} camera \\n\".format(INFO, exploits[exploit], exploit))\r\n url_exploit = \"{}{}\".format(url, exploits[exploit])\r\n try:\r\n req_exploit = requests.head(url_exploit, verify=False, timeout=3, allow_redirects=False)\r\n except:\r\n if camera != \"Netwave\":\r\n req_exploit = requests.get(url_exploit, verify=False, timeout=3, allow_redirects=False)\r\n else:\r\n req_exploit = False\r\n if req_exploit and req_exploit.status_code == 200:\r\n print(\"{}The vulnerabilitie worked, go on {}\\n\".format(WARNING, url_exploit))\r\n if camera == \"Netwave\":\r\n netwave = netwaveCam()\r\n netwave.exploit_netwave(url_exploit, netwave)\r\n if camera == \"Hikvision\":\r\n pass\r\n print(LINE)","sub_path":"modules/check_exploit.py","file_name":"check_exploit.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"239348115","text":"#-*- coding: utf-8 -*-\n#https://www.youtube.com/watch?v=78t_yHuGg-0&t=600s\nfrom sys import stdout\nimport copy\ndef sol(S, chosen=\"\"):\n #print(\"miso1: S:%s, chosen:%s, len(S):%s\" % (S, chosen, len(S)))\n if S == \"\":\n print(chosen)\n else:\n sz = len(S)\n for i in range(sz):\n # choose\n #print(\"miso2: S:%s, chosen:%s, i:%s, len(S):%s\" % (S, chosen, i, len(S)))\n c = S[i]\n idx = S.find(c)\n preserved = S\n S = S.replace(c, '', 1)\n chosen+=c\n\n # permutate\n sol(S, chosen)\n\n # un-choose\n #print(\"S:%s, idx:%s, S[0:idx]:%s+c:%s+S[idx+1:]:%s, chosen:%s\" % (S, idx, S[0:idx], c, S[idx+1:], chosen))\n S = preserved\n chosen = chosen[0:-1]\n\n\ndef test():\n x = \"MARTY\"\n ret = sol(x, \"\")\n stdout.write(\"%s\\n\" % str(ret))\n\n\n# call the main method\nif __name__ == \"__main__\":\n import logging\n logging.basicConfig(level=logging.DEBUG, format=\"%(message)s\")\n #main()\n test()\n\n\n","sub_path":"problems/cs_106b_premutation.py","file_name":"cs_106b_premutation.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"443950112","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n# In[21]:\n\n\ndf = pd.read_csv(\"/Users/ayangedleh/Desktop/datacollection/Data/Data to Clean/retailarticles YTD (new)_merged.csv\", encoding= \"ISO-8859-1\")\ndf.head()\n\n\n# In[3]:\n\n\ncol = ['category','content', 'pub_date']\ndf = df[col]\n\n\n# In[30]:\n\n\ndf = df[pd.notnull(df['content'])]\ndf.columns = ['category', 'content','pub_date']\ndf.head()\n\n\n# In[23]:\n\n\ndf['category_id'] = df['Category'].factorize()[0]\ncategory_id_df = df[['Category', 'category_id']].drop_duplicates().sort_values('category_id')\ncategory_to_id = dict(category_id_df.values)\nid_to_category = dict(category_id_df[['category_id', 'Category']].values)\ndf.head()\n\n\n# In[26]:\n\n\nfig = plt.figure(figsize=(4,3))\ndf.groupby('Category').content.count().plot.bar(ylim=0)\nplt.show()\n\n\n# In[28]:\n\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nvector = TfidfVectorizer(sublinear_tf=True, min_df=1, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english')\nfeatures = vector.fit_transform(df.content).toarray()\nlabels = df.category_id\nfeatures.shape\n\n\n# In[29]:\n\n\nfrom sklearn.feature_selection import chi2\nimport numpy as np\nN = 50\n\nfor category, category_id in sorted(category_to_id.items()):\n features_chi2 = chi2(features, labels == category_id)\n indices = np.argsort(features_chi2[0])\n feature_names = np.array(vector.get_feature_names())[indices]\n unigrams = [v for v in feature_names if len(v.split(' ')) == 1]\n bigrams = [v for v in feature_names if len(v.split(' ')) == 2]\n print(\"# '{}':\".format(category))\n print(\" . Most correlated unigrams:\\n {}\".format('\\n '.join(unigrams[-N:])))\n print(\" . Most correlated bigrams:\\n. {}\".format('\\n '.join(bigrams[-N:]))) \n\n","sub_path":"Scripts/FeatureSelection.py","file_name":"FeatureSelection.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"177168037","text":"#!/bin/python3\n\"\"\"Face recognition from Image/all data.\"\"\"\nimport os\n\n# import errno\nimport subprocess\n\n# import csv\n# import subprocess\nimport sys\n\nimport cv2\nimport face_recognition\nimport numpy as np\nfrom Crypto.Cipher import AES, PKCS1_OAEP\nfrom Crypto.PublicKey import RSA\n\n# This is a demo of running face recognition on live video from your webcam. It's a little more complicated than the\n# other example, but it includes some basic performance tweaks to make things run a lot faster:\n# 1. Process each video frame at 1/4 resolution (though still display it at full resolution)\n# 2. Only detect faces in every other frame of video.\n\n# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam.\n# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this\n# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead.\n\n# Get a reference to webcam #0 (the default one)\n\n\ndef getpassword():\n file_in = open(\"encrypted_data.bin\", \"rb\")\n private_key = RSA.import_key(open(\"private.pem\").read())\n enc_session_key, nonce, tag, ciphertext = [\n file_in.read(x) for x in (private_key.size_in_bytes(), 16, 16, -1)\n ]\n\n # Decrypt the session key with the private RSA key\n cipher_rsa = PKCS1_OAEP.new(private_key)\n session_key = cipher_rsa.decrypt(enc_session_key)\n\n # Decrypt the data with the AES session key\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\n data = cipher_aes.decrypt_and_verify(ciphertext, tag)\n return data.decode(\"utf-8\")\n\n\nvideo_capture = cv2.VideoCapture(0)\nimagepath = \"/home/neo/Documents/Python/Images/me.jpg\"\n\n\nimage = face_recognition.load_image_file(imagepath)\nface_encoding = face_recognition.face_encodings(image)[0]\nknown_face_encodings = [face_encoding]\nknown_face_names = [\"me\"]\n\n# Initialize some variables\nface_locations = []\nface_encodings = []\nface_names = []\nprocess_this_frame = True\n\nprint(\"Placez-vous devant la caméra\")\nwhile True:\n # Grab a single frame of video\n ret, frame = video_capture.read()\n\n # Resize frame of video to 1/4 size for faster face recognition processing\n small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)\n\n # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)\n rgb_small_frame = small_frame[:, :, ::-1]\n\n # Only process every other frame of video to save time\n if process_this_frame:\n # Find all the faces and face encodings in the current frame of video\n face_locations = face_recognition.face_locations(rgb_small_frame)\n face_encodings = face_recognition.face_encodings(\n rgb_small_frame, face_locations\n )\n\n face_names = []\n for face_encoding in face_encodings:\n # See if the face is a match for the known face(s)\n matches = face_recognition.compare_faces(\n known_face_encodings, face_encoding\n )\n name = \"Unknown\"\n\n # # If a match was found in known_face_encodings, just use the first one.\n # if True in matches:\n # first_match_index = matches.index(True)\n # name = known_face_names[first_match_index]\n\n # Or instead, use the known face with the smallest distance to the new face\n face_distances = face_recognition.face_distance(\n known_face_encodings, face_encoding\n )\n best_match_index = np.argmin(face_distances)\n if matches[best_match_index]:\n name = known_face_names[best_match_index]\n\n face_names.append(name)\n\n process_this_frame = not process_this_frame\n if \"me\" in face_names:\n video_capture.release()\n cmd = \"/home/neo/Documents/Python/execroot.sh\"\n cmdlst = [cmd]\n for i in sys.argv[1:]:\n cmd += \" \" + i\n cmdlst.append(i)\n try:\n # process = os.execvpe(\"/home/neo/Documents/Python/execroot.sh\",\n # sys.argv[0:], os.environ)\n process = subprocess.run(cmd, shell=True, check=False)\n\n except BrokenPipeError:\n # devnull = os.open(os.devnull, os.O_WRONLY)\n # os.dup2(devnull, sys.stdout.fileno())\n # sys.exit(1)\n print(\"Error\")\n\n # print(\"Code : \" + str(process.returncode))\n exit(process.returncode)\n break\n # # Display the results\n # for (top, right, bottom, left), name in zip(face_locations, face_names):\n # # Scale back up face locations since the frame we detected in was scaled to 1/4 size\n # top *= 4\n # right *= 4\n # bottom *= 4\n # left *= 4\n\n # # Draw a box around the face\n # cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # # Draw a label with a name below the face\n # cv2.rectangle(\n # frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED\n # )\n # font = cv2.FONT_HERSHEY_DUPLEX\n # cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)\n\n # # Display the resulting image\n # cv2.imshow(\"Video\", frame)\n\n # # Hit 'q' on the keyboard to quit!\n # if cv2.waitKey(1) & 0xFF == ord(\"q\"):\n # break\n\n# Release handle to the webcam\nvideo_capture.release()\n# cv2.destroyAllWindows()\n","sub_path":"face_recognitionroot.py","file_name":"face_recognitionroot.py","file_ext":"py","file_size_in_byte":5459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"338194822","text":"# -*- coding: utf-8 -*-\n\nimport re, csv\nimport jieba\n\nfrom util.spider.ip_utils.db_utils import _clawer_db, create_connection, all_adv_region, all_edu_ip\n\ndef init_name_code(region_list: []) -> {}:\n name_code_map = {}\n for row in region_list:\n if row is not None and len(row) > 2:\n code = row[0]\n name = row[1]\n if name_code_map.get(name) is not None:\n print('name:' + name + ' code1:' + name_code_map.get(name) + ' code2:' + code)\n name_code_map[name] = code\n\n return name_code_map\n\ndef init_name_father(region_list: []) -> {}:\n name_father_map = {}\n for row in region_list:\n if row is not None and len(row) > 2:\n name = row[1]\n father = row[2]\n if name_father_map.get(name) is not None:\n print('name:' + name + ' father1:' + name_father_map.get(name) + ' father2:' + father)\n name_father_map[name] = father\n\n return name_father_map\n\ndef region_to_area(region_list: []) -> []:\n name_code_map = init_name_code(region_list)\n name_father_map = init_name_father(region_list)\n area_list = []\n\n for row in region_list:\n if row is not None and len(row) > 2:\n code = row[0]\n name = row[1]\n father = row[2]\n temp_data = [name]\n while father is not None:\n if father != '全球':\n temp_data.append(father)\n name = father\n father = name_father_map.get(name)\n\n # 国家,省,市\n area_data = [code]\n for i in range(0, 3):\n name = temp_data.pop() if len(temp_data) > 0 else None\n if name is not None:\n code = name_code_map.get(name)\n area_data.append(code)\n area_data.append(name)\n area_data.append(name)\n else:\n area_data.append('')\n area_data.append('')\n area_data.append('')\n\n # 省简称\n province_brief = area_data[6]\n data = re.findall('(.*?)(省|市|自治区|壮族自治区|回族自治区|维吾尔自治区)', province_brief)\n if (len(data) > 0):\n area_data[6] = data[0][0]\n # 市简称\n city_brief = area_data[9]\n data = re.findall('(.*?)(市|地区)', city_brief)\n if (len(data) > 0):\n area_data[9] = data[0][0]\n\n area_list.append(area_data)\n\n return area_list\n\n\ndef init_area(csv_file: str):\n conn = create_connection(_clawer_db)\n with conn:\n try:\n region_list = all_adv_region(conn)\n area_list = region_to_area(region_list)\n with open(csv_file, 'w') as output:\n writer = csv.writer(output, delimiter=',', lineterminator='\\n')\n writer.writerows(area_list)\n except Exception as e:\n print(e)\n\ndef extract_desc(data_list: []) -> []:\n result_list = []\n # 载入自定义词典\n jieba.load_userdict('/home/laomie/keywords.csv')\n\n for row in data_list:\n if (len(row) > 6):\n begin_ip_desc = row[5]\n desc_list = str(begin_ip_desc).split(' ')\n if len(desc_list) > 1 :\n area = desc_list[0]\n edu = desc_list[1]\n province = ''\n city = ''\n # 结巴分词提前省份,城市\n terms = jieba.cut(area)\n seg_list = ','.join(terms).split(',')\n if (len(seg_list) > 0):\n if (seg_list[0] == '中国'):\n temp_list = seg_list[1:]\n if (len(temp_list) > 0):\n province = temp_list[0]\n if (len(temp_list) > 1):\n city = temp_list[1]\n\n result = []\n result += row[:5]\n result += [province, city, edu]\n result += row[5:]\n result_list.append(result)\n\n return result_list\n\n\ndef extract_edu_ip(csv_file: str):\n conn = create_connection(_clawer_db)\n with conn:\n try:\n data_list = all_edu_ip(conn)\n result_list = extract_desc(data_list)\n with open(csv_file, 'w') as output:\n writer = csv.writer(output, delimiter=',', lineterminator='\\n')\n writer.writerows(result_list)\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n #init_area('/home/laomie/out.csv')\n extract_edu_ip('/home/laomie/out.csv')\n","sub_path":"util/spider/ip_utils/data_convertor.py","file_name":"data_convertor.py","file_ext":"py","file_size_in_byte":4696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"254810013","text":"import unittest\nfrom typing import List\n\nclass Solution(unittest.TestCase):\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\n \"\"\"\nGiven an array of integers that is already sorted in ascending order, find two numbers such that they add up to a specific target number.\n\nThe function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2.\n\nNote:\n\nYour returned answers (both index1 and index2) are not zero-based.\nYou may assume that each input would have exactly one solution and you may not use the same element twice.\nExample:\n\nInput: numbers = [2,7,11,15], target = 9\nOutput: [1,2]\nExplanation: The sum of 2 and 7 is 9. Therefore index1 = 1, index2 = 2.\n\"\"\"\n l, r = 0, len(numbers)-1\n\n while l < r:\n s = numbers[l] + numbers[r]\n if s == target:\n break\n elif s < target:\n l += 1\n else:\n r -= 1\n return [l+1, r+1]\n\n def testTwoSum(self):\n self.assertEqual([1,2], self.twoSum([2,7,11,15], 9))\n","sub_path":"src/main/python/two_sum_ii.py","file_name":"two_sum_ii.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"445789566","text":"# /usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport wx\n\nclass Window(wx.Frame):\n \"\"\"docstring for Window\"\"\"\n def __init__(self, parent, title):\n wx.Frame.__init__(self, parent, title = title, size = (300, 250))\n #creating text field\n self.control = wx.TextCtrl(self, style = wx.TE_MULTILINE)\n self.Show(True)\n #creating menu\n menu =wx.Menu()\n #append submenu\n itemAbout = menu.Append(wx.ID_ABOUT, 'About', 'Push the button to an info about this app')\n itemExit = menu.Append(wx.ID_EXIT, 'EXIT', 'Push the button to leave this app') #\n #creating menubar\n bar = wx.MenuBar()\n #append item of menu\n bar.Append(menu, 'Main')\n #indicates that it the menu is necessery to show in our form\n self.SetMenuBar(bar)\n # seting events of menu\n self.Bind(wx.EVT_MENU, self.OnAbout, itemAbout)\n self.Bind(wx.EVT_MENU, self.OnExit, itemExit)\n\n def OnAbout(self, e):\n dlg = wx.MessageDialog(self, \"Created 2vlysenko in the Kharkiv 26.12.2014!\", \"About app\", wx.OK)\n dlg.ShowModal()\n\n def OnExit(self, e):\n self.Destroy()\n\napp = wx.App()\nwindow = Window(None, '26.12.2014 15:30 wxPython')\napp.MainLoop()\n\n","sub_path":"wxPython/wxPython - first.py","file_name":"wxPython - first.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"234699061","text":"# Copyright (c) 2013, Rethink Robotics\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# 3. Neither the name of the Rethink Robotics nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\nimport roslib\nroslib.load_manifest('baxter_interface')\nimport rospy\n\nimport dataflow\nimport digital_io\n\nimport baxter_msgs.msg\n\nclass Navigator(object):\n \"\"\"\n Interface class for a Navigator on the Baxter robot.\n\n Inputs:\n Button 0 - press wheel\n Button 1 - above wheel\n Button 2 - below wheel\n Scroll wheel - 0-255\n\n Outputs:\n Inner LED\n Outer LED\n\n Signals:\n button0_changed - True/False\n button1_changed - True/False\n button2_changed - True/False\n wheel_changed - New wheel value\n\n Valid identifiers:\n left, right, torso_left, torso_right\n \"\"\"\n\n __LOCATIONS = ('left', 'right', 'torso_left', 'torso_right')\n\n def __init__(self, location):\n if not location in self.__LOCATIONS:\n raise AttributeError(\"Invalid Navigator name '%s'\" % (location,))\n self._id = location\n self._state = None\n self.button0_changed = dataflow.Signal()\n self.button1_changed = dataflow.Signal()\n self.button2_changed = dataflow.Signal()\n self.wheel_changed = dataflow.Signal()\n\n self._state_sub = rospy.Subscriber(\n '/sdk/robot/itb/%s_itb/state' % (self._id,),\n baxter_msgs.msg.ITB,\n self._on_state)\n\n self._inner_led = digital_io.DigitalIO(\n '%s_itb_light_inner' % (self._id,))\n\n self._outer_led = digital_io.DigitalIO(\n '%s_itb_light_outer' % (self._id,))\n\n dataflow.wait_for(lambda: self._state != None)\n\n @property\n def wheel(self):\n \"\"\"\n Current state of the wheel\n \"\"\"\n return self._state.wheel\n\n @property\n def button0(self):\n \"\"\"\n Current state of button 0\n \"\"\"\n return self._state.buttons[0]\n\n @property\n def button1(self):\n \"\"\"\n Current state of button 1\n \"\"\"\n return self._state.buttons[1]\n\n @property\n def button2(self):\n \"\"\"\n Current state of button 2\n \"\"\"\n return self._state.buttons[2]\n\n @property\n def inner_led(self):\n \"\"\"\n Current state of the inner LED\n \"\"\"\n return self._state.innerLight\n\n @inner_led.setter\n def inner_led(self, enable):\n \"\"\"\n Control the inner LED.\n\n @param enable - True to enable the light, False otherwise\n \"\"\"\n self._inner_led.set_output(enable)\n\n @property\n def outer_led(self):\n \"\"\"\n Current state of the outer LED.\n \"\"\"\n return self._state.outerLight\n\n @outer_led.setter\n def outer_led(self, enable):\n \"\"\"\n Control the outer LED.\n\n @param enable - True to enable the light, False otherwise\n \"\"\"\n return self._outer_led.set_output(enable)\n\n def _on_state(self, msg):\n if not self._state:\n self._state = msg\n\n if self._state == msg:\n return\n\n buttons = [self.button0_changed, self.button1_changed, self.button2_changed]\n for i, signal in enumerate(buttons):\n if self._state.buttons[i] != msg.buttons[i]:\n signal(msg.buttons[i])\n\n if self._state.wheel != msg.wheel:\n self.wheel_changed(msg.wheel)\n\n self._state = msg\n","sub_path":"baxter/baxter_interface/src/baxter_interface/navigator.py","file_name":"navigator.py","file_ext":"py","file_size_in_byte":4868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"283682359","text":"from __future__ import absolute_import\n__all__ = ['TCCShellNub']\n\nimport CPL\nimport Hub.Reply\nfrom .ShellNub import ShellNub\n\nclass TCCShellNub(ShellNub):\n \"\"\" An ShellNub with the plumbing required to recognize and record the TCC YourUserName as\n our .cid.\n \"\"\"\n\n def findUserNum(self, kvl):\n \"\"\" Find YourUserNum key in list of KVs. Return the CID or None. \"\"\"\n \n for k, v in kvl.items():\n if k == \"YourUserNum\":\n cid = int(v[0])\n return cid\n return None\n \n def copeWithInput(self, s):\n \"\"\" Override the default copeWithInput to set our .cid from the YourUserNum key. \"\"\"\n \n if self.debug > 5:\n CPL.log('TCCShell.copeWithInput', \"Nub %s read: %r, with buf=%r\" % (self.name, s, self.inputBuffer))\n\n while 1:\n # Connections to the TCC's tccuser captive account return lines\n # terminated by CRLF, but with the LF coming at the start of the \"next\n # line\". Odd, and to be investigated. In the meanwhile, strip leading LFs\n #\n if len(self.inputBuffer) > 0 and self.inputBuffer[0] == '\\n':\n self.inputBuffer = self.inputBuffer[1:]\n \n reply, leftover = self.decoder.decode(self.inputBuffer, s)\n s = None\n if self.debug > 5:\n CPL.log('TCCShell.copeWithInput', \"decoded: %s, yielding buf=%r\" % (reply, leftover))\n\n self.inputBuffer = leftover\n if not reply:\n break\n\n if self.log:\n try:\n txt = reply['RawText']\n except:\n txt = \"UNKNOWN INPUT\"\n self.log.log(txt, note='<')\n \n # Here's the special TCC bit: search for YourUserNum, \n if self.cid == None:\n newCID = self.findUserNum(reply['KVs'])\n if newCID != None:\n self.cid = newCID\n CPL.log('TCCShell.copeWithInput', \"setting CID=%s\" % (self.cid))\n self.connected()\n \n cmd = self.getCmdForReply(reply)\n r = Hub.Reply.Reply(cmd, reply['flag'], reply['KVs'])\n cmd.reply(r)\n \n","sub_path":"Hub/Nub/TCCShellNub.py","file_name":"TCCShellNub.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"557250491","text":"# other\nimport time\nimport numpy\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef run(serial, f_centre, f_width, signal_generator_address, lock_in_address, step_count, reading_count, file_prefix,\n check_progress, check_debug_info, check_timing_info):\n\n f_start = f_centre - (f_width / 2)\n f_stop = f_centre + (f_width / 2)\n\n fs = numpy.linspace(f_start, f_stop, step_count)\n\n if check_debug_info and check_timing_info:\n absolute_time_start = time.time()\n\n for j in range(0, reading_count):\n xs = numpy.linspace(0, 0, step_count)\n ys = numpy.linspace(0, 0, step_count)\n rs = numpy.linspace(0, 0, step_count)\n ts = numpy.linspace(0, 0, step_count)\n\n for i in range(0, step_count):\n if check_debug_info:\n print('%d/%d' % (i, step_count))\n # address signal generator\n serial.write('++addr %d' % signal_generator_address)\n # set signal generator frequency\n serial.write('frequency %.2f hz' % fs[i])\n # address lock-in amplifier\n serial.write('++addr %d' % lock_in_address)\n # get x\n serial.write('X.')\n xs[i] = float(serial.read_line())\n serial.write('Y.')\n # get y\n ys[i] = float(serial.read_line())\n rs[i] = numpy.sqrt((xs[i] * xs[i]) + (ys[i] * ys[i]))\n ts[i] = math.degrees(math.atan(ys[i] / xs[i]))\n if check_debug_info:\n print('%.2f, %.2f, %.2f, %.2f, %.2f' % (fs[i], xs[i], ys[i], rs[i], ts[i]))\n if check_progress:\n plt.plot(fs, xs)\n plt.show()\n pass\n\n if check_debug_info and check_timing_info:\n time_elapsed = time.time() - absolute_time_start\n print('%.2f s' % time_elapsed)\n\n temp = numpy.column_stack((fs, xs, ys, rs, ts))\n numpy.savetxt('%s_%d.csv' % (file_prefix, (j + 1)), temp, delimiter=\",\")\n plt.plot(fs, xs)\n plt.show()\n input()\n","sub_path":"control/EGGLockin.py","file_name":"EGGLockin.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"576296234","text":"\"\"\"\n\t** Aamod Kore **\n\tComputer Science and Engineering,\n\tIndian Institute of Technology - Bombay.\n\twww.cse.iitb.ac.in/~aamod\n\taamod[at]cse.iitb.ac.in\n\"\"\"\n\nimport random\nimport sys\n\nimport VirtualDoubleCoverage\nfrom VirtualDoubleCoverage import *\n\nperim = 20\ntests = 20\n\n\ndef c_metric(a, b):\n # a = 0 if not a else a % perim\n # b = 0 if not b else b % perim\n d = abs(b-a) #% perim\n if d > perim//2:\n return perim-d\n return d\n\n\ndef c_metric_mid(a, b):\n a = 0 if not a else a % perim\n b = 0 if not b else b % perim\n if a == b:\n return (a+perim//2) % perim\n d = (b-a) % perim\n return a+d//2\n\n\ndef generate(conf):\n n = len(conf)\n config = sorted(conf)\n maximum, ans = 0, 0\n for i in range(n):\n j = 0 if i == n-1 else i+1\n mid = c_metric_mid(config[i], config[j])\n dist = c_metric(config[i], mid)\n # print(config[i], config[j], mid, dist)\n if dist > maximum:\n maximum, ans = dist, mid\n return ans\n\n\nif __name__ == \"__main__\":\n \"\"\"Test case. Duh!\"\"\"\n ns = 5\n perim=20\n tests=150\n \n for t in range(10):\n request_sequence=[]\n vCost = 0\n pCost = 0\n print(\"TEST CASE: \",t)\n initial = random.sample(range(1,perim+1), ns)\n # initConfig = (0,0,0)\n initConfig = tuple(initial)\n initial_configuration = list(initial)\n print(\"Configuration \", initConfig)\n test = VirtualDoubleCoverage(perim,ns,list(initConfig))\n\n # print(\"Initial config:\", end=\" \")\n # for q in range(len(initial)) :\n # \tprint(initial[q], end=\" \")\n\n onlineCost = 0\n for i in range(tests):\n mid = generate(test.configuration)\n # wf.add_request(mid)\n request_sequence.append(mid)\n print(\"-----------------------------------------------\")\n o=(i+1)%perim\n p, v = test.processRequest(o)\n\n print(\"Physical configurations: \", test.configuration)\n print(\"Virtual configurations: \", test.vPosition)\n print(\"Virtual distance : \", test.vDistance)\n print(\"Virtual cost: \", v, \" Physical cost: \",p)\n vCost += v\n pCost += p\n print(\"-----------------------------------------------\\n\")\n\n print(request_sequence, \"\\n\")\n print(len(request_sequence))\n # print(initial_configuration)\n # opt = ServerSpace(c_metric)\n # print(\"Second,\", initial_configuration)\n # opt.add_servers(initial_configuration)\n # optimal_cost = opt.process_requests(request_sequence)[0]\n # # optimal_cost=1 if optimal_cost==0 else optimal_cost\n # print(\"Total physical cost: \", pCost)\n # print(\"Total virtual cost: \", vCost)\n # print(\"Optimal cost: \", optimal_cost)\n # pCost = 1 if pCost == 0 else pCost\n # vCost = 1 if vCost == 0 else vCost\n # optimal_cost = 1 if optimal_cost == 0 else optimal_cost\n # print(\"(Virtual ) Competitive ratio: \", vCost/optimal_cost)\n # print(\"(Physical ) Competitive ratio: \", pCost/optimal_cost)\n # print()\n li = [perim, ns,len(request_sequence),request_sequence]\n with open('Longest-Arc.csv','a') as csvfile:\n csvwriter=writer(csvfile)\n # csvwriter.writerow(fields)\n csvwriter.writerow(li)\n csvfile.close()\n","sub_path":"Longest-Arc-input-sequence.py","file_name":"Longest-Arc-input-sequence.py","file_ext":"py","file_size_in_byte":3400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"378810679","text":"from app import db, api, cache\nfrom flask_restx import Resource, abort\nfrom util.models import auth_details\nfrom util.request_handling import *\nfrom util.helpers import *\nfrom configs.collection_names import *\nfrom random import randint\n\nfeed = api.namespace('feeds', description='Feeds service')\n\n\n@feed.route('/', endpoint='feeds', strict_slashes=False)\nclass Feeds(Resource):\n @feed.response(200, 'Success')\n @feed.response(400, 'invalid token')\n @feed.expect(auth_details)\n # @feed.param('n','Number of posts to fetch, 10 by default')\n # @feed.param('p','What post to start at, 0 by default')\n @cache.memoize(timeout=5)\n @feed.doc(description='''\n To get the feed of all recipes of all authors a user is following. \n ''')\n def get(self):\n user = authorize(request)\n # n = get_request_arg('n', int, default=10)\n # p = get_request_arg('p', int, default=0)\n followings = db.get_one_from_collection(\n FOLLOWINGS, 'user_id', user)['followings']\n res = []\n for f in followings:\n res.extend(db.get_one_from_collection(\n USER_RECIPES, 'user_id', f)['recipe_list'])\n \n return {'msg': res}\n\n\n@feed.route('/hot', strict_slashes=False)\nclass FeedHot(Resource):\n @feed.response(200, 'Success')\n @feed.response(400, 'invalid token')\n @feed.doc(description='''\n To return some hot recipes. \n ''')\n def get(self):\n cursor = db.search_by_range(RECIPES, 1, None, 100, {'_id': 1, 'liked_num': 1})\n pool = []\n pool.extend(cursor)\n n = len(pool)\n res = {'res': []}\n if n < 10:\n res['res'].extend(pool)\n return res\n\n picks = []\n for _ in range(10):\n i = randint(0, n-1)\n while i in picks:\n i = randint(0, n-1)\n picks.append(i)\n\n for i in picks:\n res['res'].append(pool[i])\n return res\n\n\n@feed.route('/users', strict_slashes=False)\nclass FeedUsers(Resource):\n @feed.response(200, 'Success')\n @feed.response(400, 'invalid token')\n @feed.param('n', 'Number of users that expect to be recommended in feed page')\n @feed.expect(auth_details)\n @feed.doc(description='''\n Return users who have similar rating with me. Will not serach on all users, but find out neighbours first.\n Neighbours are the user who is following one of the same contributors as me. \n If number of neighbours is less than the expected number, will search from logged-in user's followers, \n then from popular users who have the most number of likes.\n ''')\n def get(self):\n user_id = authorize(request)\n n = int(get_request_arg('n'))\n rate_list = db.get_one_from_collection(\n RATES, 'user_id', user_id)[\"rates_list\"]\n following_list = db.get_one_from_collection(\n FOLLOWINGS, 'user_id', user_id)['followings']\n \n # if the user has not rate any reicpes or follow any contributors, give top n users who have most likes\n if (len(rate_list) == 0 or len(following_list) == 0):\n top_users = db.search_by_range(USERS, 1, None, n, {\n '_id': 1, 'username': 1, 'headshot': 1, 'liked_num': 1})\n res = {\n 'msg': 'Have not rate any recipes or have not followed anyone', \n 'res': []\n }\n res['res'].extend(top_users)\n return res\n \n # find my neighbours\n neighbours = set() # avoid duplicate users\n neighbours_similarity = []\n for following_id in following_list:\n cur_follower_list = db.get_one_from_collection(\n FOLLOWERS, 'user_id', following_id)['followers']\n if (cur_follower_list == None):\n continue\n for follower_id in cur_follower_list:\n if (follower_id != user_id):\n neighbours.add(follower_id)\n\n if (len(neighbours) < n):\n # find from my followers\n follower_list = db.get_one_from_collection(\n FOLLOWERS, 'user_id', user_id)['followers']\n neighbours.union(set(follower_list))\n\n if (len(neighbours) < n):\n # find out from popular users\n popular_users = db.search_by_range(USERS, 1, None, 2 * n, {'_id': 1})\n for user in popular_users:\n if (user['_id'] != user_id):\n neighbours.add(user['_id'])\n\n # return list(neighbours)\n # calculate similarity between neighbours\n for neighbour in neighbours:\n # calculate similarity\n rate_list_ngb = db.get_one_from_collection(\n RATES, 'user_id', neighbour)['rates_list']\n user = db.get_one_from_collection(\n USERS, '_id', neighbour)\n similarity, common = calculate_similarity(\n rate_list, rate_list_ngb)\n user_res = {\n '_id': user['_id'],\n 'username': user['username'],\n 'similarity': similarity,\n 'no_common': common,\n }\n if ('headshot' in user.keys()): user_res['headshot'] = user['headshot']\n neighbours_similarity.append(user_res)\n\n neighbours_similarity.sort(key=lambda val: (val['similarity'], val['no_common']), reverse=True)\n neighbours_similarity = neighbours_similarity[:n]\n return {\n 'res': neighbours_similarity\n }\n","sub_path":"COMP9900/capstoneproject-comp9900-h18a-yyds/backend/apis/feeds.py","file_name":"feeds.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"645691370","text":"import re\nfrom neigefr.models import Zipcode\n\n\nZIPCODE = re.compile('(F-)?([0-9]{5})', re.IGNORECASE)\nRANKING = re.compile('([0-9]+)/10')\n\n\nclass Flake(object):\n zipcode = None\n ranking = None\n\n\ndef find_zipcode(zipcode):\n \"Find a Zipcode object. Or None\"\n if Zipcode.objects.filter(zipcode=zipcode).exists():\n return Zipcode.objects.get(zipcode=zipcode)\n\n\ndef parse_body(body):\n \"Parse the tweet body. Return a Flake object\"\n flake = Flake()\n if '#neigefr' not in body:\n return None\n matcher = ZIPCODE.search(body)\n if matcher:\n flake.zipcode = matcher.group(2)\n matcher = RANKING.search(body)\n if matcher:\n flake.ranking = int(matcher.group(1))\n return flake\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"104269573","text":"from rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.exceptions import APIException\nfrom rest_framework.response import Response\nfrom coin.api.CoinSerializer import CoinSerializer\nfrom core.api.CoreViewSets import CoreViewSets\nfrom coin.service.CoinService import CoinService\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg import openapi\n\nclass CointViewSet(CoreViewSets):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self._coin_service = CoinService()\n\n page = openapi.Parameter('page', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, description='Pagina')\n size = openapi.Parameter('size', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, description='Quantidade')\n @swagger_auto_schema(manual_parameters=[page, size])\n @action(methods=['get'], detail=True, url_path='page')\n def list(self, request):\n try:\n res = self._coin_service.list(request)\n return res\n except Exception as erro:\n raise APIException('Erro ao listar, verificque os parametros de paginação.')\n\n @swagger_auto_schema(responses={200: CoinSerializer(many=True)})\n @action(methods=['post'], detail=True)\n def create(self, request):\n try:\n res = self._coin_service.create(request)\n return Response({'msg': 'Sucesso'}, status=status.HTTP_200_OK)\n except Exception as error:\n raise APIException('Erro ao criar')\n\n @action(methods=['put'], detail=True)\n def update(self, request, pk=None):\n try:\n self._coin_service.update_for_pk(request, pk)\n return Response({'msg': 'Sucesso'}, status=status.HTTP_200_OK)\n except Exception as e:\n raise APIException('Erro ao atualizar.')\n\n @action(methods=['put'], detail=True)\n def update_all_coin(self, request):\n try:\n res = self._coin_service.update_all_coin(request)\n return Response(res)\n except Exception as e:\n raise APIException('Erro ao atualizar')\n\n @action(methods=['delete'], detail=True)\n def delete(self, request, pk=None):\n res = self._coin_service.delete(pk)\n return Response({'Sucesso'}, status=status.HTTP_200_OK)","sub_path":"coin/api/CoinViewSet.py","file_name":"CoinViewSet.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"46301196","text":"import unittest\nimport pandas as pd\nimport numpy as np\nimport sys\nsys.path.append('..')\nfrom sensor.sensor import Sensor, SensorArray\n\nclass TestSensorMarkdown(unittest.TestCase):\n\n sensor_file = 'input_files\\\\test_sensor.yml'\n check_md_output_file = 'check_files\\\\test_sensor.md'\n dummy_md_writing_file = 'check_files\\\\test_sensor_dumy.md'\n check_md_heaterintlet1_output_file = 'check_files\\\\test_sensor_HeaterInlet1.md'\n\n def test_basic_writing_to_md(self):\n\n #check that the to_md output returns previously verified results\n sensor_array = SensorArray.from_file(self.sensor_file)\n with open(self.check_md_output_file,'r') as file:\n check_txt = file.read()\n self.assertEqual(check_txt,sensor_array.to_md(self.check_md_output_file))\n\n #check that the write function to a file works as epxected\n sensor_array.to_md(self.check_md_output_file)\n\n def test_single_sensor_md_function(self):\n sensor_array = SensorArray.from_file(self.sensor_file)\n md = sensor_array.HeaterInlet1.to_md()\n #check previously verified results\n with open(self.check_md_heaterintlet1_output_file,'r') as file:\n self.assertEqual(md,file.read())\n\nclass TestSensorTables(unittest.TestCase):\n \"\"\"\n At the moment this pretty much just makes sure that no errors are thrown using diffreent kwargs and such\n and the actual testing of the desired output is pretty limited.\n \"\"\"\n sensor_file = 'input_files\\\\test_table.yaml'\n basic_tbl_check = 'check_files\\\\basic_table.txt'\n\n def test_basic_to_table(self):\n\n sensor_array = SensorArray.from_file(self.sensor_file)\n table = sensor_array.to_table()\n \n def test_include_kwarg(self):\n \n sensor_array = SensorArray.from_file(self.sensor_file)\n table = sensor_array.to_table(include = ['id','part_number','name'])\n\n #test that the required items are in the table\n for name in ['id','part_number','name']:\n self.assertIn(name,table)\n \n #check a couple of the other items to ensure they are not in the table\n for name in ['date','equation','measure_units']:\n self.assertNotIn(name,table)\n \n def test_exclude_kwarg(self):\n\n sensor_array = SensorArray.from_file(self.sensor_file)\n names = ['location','measure_min','measure_max','voltage_min','voltage_max','current_max','current_min']\n table = sensor_array.to_table(exclude = names)\n\n for n in names:\n self.assertNotIn(n,table)\n \n def test_different_table_fmts(self):\n\n sensor_array = SensorArray.from_file(self.sensor_file)\n table = sensor_array.to_table(include = ['id','part_number','name'],tablefmt = 'grid')\n \n table = sensor_array.to_table(include= ['id','part_number','name'],tablefmt = 'asdgasdg')\n\n def test_header_fmt(self):\n\n sensor_array = SensorArray.from_file(self.sensor_file)\n table = sensor_array.to_table(include = ['id','part_number','name'],tablefmt = 'grid',headerfmt = 'id')\n \n def test_attribute_columns(self):\n\n sensor_array = SensorArray.from_file(self.sensor_file)\n table = sensor_array.to_table(include = ['id','part_number','name'],tablefmt = 'grid',headerfmt = 'id',columns = 'attribute')\n\nunittest.main()","sub_path":"tests/test_reporting.py","file_name":"test_reporting.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"279395541","text":"import argparse\nimport numpy as np\nfrom pprint import pprint\nfrom attr import attrs, attrib\nfrom einops import rearrange\nimport pdb\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\nfrom itertools import product\nimport csv\n\nimport torch\nimport torch.nn as nn\nimport json\nfrom training.pytorch.models.unet import Unet\nfrom training.pytorch.models.fusionnet import Fusionnet\nfrom torch.optim import lr_scheduler\nimport copy\nfrom training.pytorch.utils.eval_segm import mean_IoU\nfrom training.pytorch.utils.experiments_utils import improve_reproducibility\nfrom training.pytorch.losses import (multiclass_ce, multiclass_dice_loss, multiclass_jaccard_loss, multiclass_tversky_loss, multiclass_ce_points)\nfrom training.pytorch.data_loader import DataGenerator\nfrom torch.utils import data\nimport os\n\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--config_file', type=str, default=\"/mnt/blobfuse/train-output/conditioning/models/backup_unet_gn_isotropic_nn9/training/params.json\", help=\"json file containing the configuration\")\n\nparser.add_argument('--model_file', type=str,\n help=\"Checkpoint saved model\",\n default=\"/mnt/blobfuse/train-output/conditioning/models/backup_unet_gn_isotropic_nn9/training/checkpoint_best.pth.tar\")\n\n#parser.add_argument('--data_path', type=str, help=\"Path to data\", default=\"/mnt/blobfuse/cnn-minibatches/summer_2019/active_learning_splits/\")\n# parser.add_argument('--data_sub_dirs', type=str, nargs='+', help=\"Sub-directories of `data_path` to get data from\", default=['val1',]) # 'test1', 'test2', 'test3', 'test4'])\n\nparser.add_argument('--run_validation', action=\"store_true\", help=\"Whether to run validation\")\nparser.add_argument('--validation_patches_fn', type=str, help=\"Filename with list of validation patch files\", default='training/data/finetuning/val2_test_patches_500.txt')\nparser.add_argument('--training_patches_fn', type=str, help=\"Filename with list of training patch files\", default=\"training/data/finetuning/val2_train_patches.txt\")\n\nparser.add_argument('--log_fn', type=str, help=\"Where to store training results\", default=\"/mnt/blobfuse/train-output/conditioning/models/backup_unet_gn_isotropic_nn9/finetuning/val/val2/finetune_results_last_k_layers.csv\")\n\nparser.add_argument('--model_output_directory', help='Where to store fine-tuned model', default='/mnt/blobfuse/train-output/conditioning/models/backup_unet_gn_isotropic_nn9/finetuning/val/val2/')\n\n\n\nargs = parser.parse_args()\n\nclass GroupParams(nn.Module):\n\n def __init__(self, model):\n super(GroupParams, self).__init__()\n self.gammas = nn.Parameter(torch.ones((1, 32, 1, 1)))\n self.betas = nn.Parameter(torch.zeros((1, 32, 1, 1)))\n self.model = model\n\n def forward(self, x):\n x, conv1_out, conv1_dim = self.model.down_1(x)\n\n x, conv2_out, conv2_dim = self.model.down_2(x)\n\n x, conv3_out, conv3_dim = self.model.down_3(x)\n x, conv4_out, conv4_dim = self.model.down_4(x)\n\n # Bottleneck\n x = self.model.conv5_block(x)\n\n # up layers\n x = self.model.up_1(x, conv4_out, conv4_dim)\n x = self.model.up_2(x, conv3_out, conv3_dim)\n x = self.model.up_3(x, conv2_out, conv2_dim)\n x = self.model.up_4(x, conv1_out, conv1_dim)\n x = x * self.gammas + self.betas\n\n return self.model.conv_final(x)\n\n\n@attrs\nclass FineTuneResult(object):\n best_mean_IoU = attrib(type=float)\n train_duration = attrib(type=timedelta)\ndef finetune_group_params(path_2_saved_model, loss, gen_loaders, params, hyper_parameters, log_writer, n_epochs=25):\n learning_rate = hyper_parameters['learning_rate']\n optimizer_method = hyper_parameters['optimizer_method']\n lr_schedule_step_size = hyper_parameters['lr_schedule_step_size']\n opts = params[\"model_opts\"]\n unet = Unet(opts)\n checkpoint = torch.load(path_2_saved_model)\n unet.load_state_dict(checkpoint['model'])\n unet.eval()\n for param in unet.parameters():\n param.requires_grad = False\n\n # Parameters of newly constructed modules have requires_grad=True by default\n model_2_finetune = GroupParams(unet)\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model_2_finetune = model_2_finetune.to(device)\n loss = loss().to(device)\n\n optimizer = torch.optim.SGD(model_2_finetune.parameters(), lr=learning_rate, momentum=0.9)\n if optimizer_method == torch.optim.Adam:\n optimizer = torch.optim.Adam(model_2_finetune.parameters(), lr=learning_rate, eps=1e-5)\n \n # Decay LR by a factor of 0.1 every 7 epochs\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=lr_schedule_step_size, gamma=0.1)\n\n model_2_finetune = train_model(model_2_finetune, loss, optimizer,\n exp_lr_scheduler, gen_loaders, hyper_parameters, log_writer, num_epochs=n_epochs)\n return model_2_finetune\n\ndef finetune_last_k_layers(path_2_saved_model, loss, gen_loaders, params, hyper_parameters, log_writer, n_epochs=25):\n learning_rate = hyper_parameters['learning_rate']\n optimizer_method = hyper_parameters['optimizer_method']\n lr_schedule_step_size = hyper_parameters['lr_schedule_step_size']\n last_k_layers = hyper_parameters['last_k_layers']\n \n opts = params[\"model_opts\"]\n unet = Unet(opts)\n checkpoint = torch.load(path_2_saved_model)\n unet.load_state_dict(checkpoint['model'])\n unet.eval()\n\n for layer in list(unet.children())[:-last_k_layers]:\n for param in layer.parameters():\n param.requires_grad = False\n \n # Parameters of newly constructed modules have requires_grad=True by default\n model_2_finetune = unet\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model_2_finetune = model_2_finetune.to(device)\n loss = loss().to(device)\n\n optimizer = torch.optim.SGD(model_2_finetune.parameters(), lr=learning_rate, momentum=0.9)\n if optimizer_method == torch.optim.Adam:\n optimizer = torch.optim.Adam(model_2_finetune.parameters(), lr=learning_rate, eps=1e-5)\n \n # Decay LR by a factor of 0.1 every 7 epochs\n exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=lr_schedule_step_size, gamma=0.1)\n\n model_2_finetune = train_model(model_2_finetune, loss, optimizer,\n exp_lr_scheduler, gen_loaders, hyper_parameters, log_writer, num_epochs=n_epochs)\n return model_2_finetune\n\n\ndef train_model(model, criterion, optimizer, scheduler, dataloaders, hyper_parameters, log_writer, num_epochs=5, superres=False, masking=True):\n global results_writer\n \n # mask_id indices (points per patch): [1, 2, 3, 4, 5, 10, 15, 20, 40, 60, 80, 100]\n mask_id = hyper_parameters['mask_id']\n \n # mask_id indices (points per patch): [1, 2, 3, 4, 5, 10, 15, 20, 40, 60, 80, 100]\n mask_id = hyper_parameters['mask_id']\n\n since = datetime.now()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_mean_IoU = 0.0\n best_epoch = -1\n duration_til_best_epoch = since - since\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n for epoch in range(-1, num_epochs):\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\n print('-' * 10)\n\n train_mean_IoU = -1\n train_loss = -1\n val_mean_IoU = -1\n val_loss = -1\n\n # Each epoch has a training and validation phase\n phases = ['train']\n if args.run_validation:\n phases += ['val']\n for phase in phases:\n if phase == 'train':\n scheduler.step()\n model.train() # Set model to training mode\n else: # phase == 'val'\n if 'val' in dataloaders:\n model.eval() # Set model to evaluate mode\n else:\n continue\n\n running_loss = 0.0\n meanIoU = 0.0\n n_iter = 0\n\n # Iterate over data.\n for entry in dataloaders[phase]:\n if superres:\n if masking:\n inputs, labels, nlcd, masks = entry\n else:\n inputs, labels, nlcd = entry\n # TODO: use nlcd for superres training, below\n else:\n if masking:\n inputs, labels, masks = entry\n else:\n inputs, labels = entry\n\n inputs = inputs[:, :, 2:240 - 2, 2:240 - 2]\n labels = labels[:, :, 94:240 - 94, 94:240 - 94]\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n if masking and phase == 'train':\n masks = masks.float()\n masks = masks.to(device)\n masks = rearrange(masks, 'batch unknown masks height width -> batch (unknown masks) height width')\n mask = masks[:, mask_id : mask_id + 1, 94:240 - 94, 94:240 - 94].to(device)\n labels = labels * mask\n\n if masking and phase == 'train':\n masks = masks.float()\n masks = masks.to(device)\n masks = rearrange(masks, 'batch unknown masks height width -> batch (unknown masks) height width')\n mask = masks[:, mask_id : mask_id + 1, 94:240 - 94, 94:240 - 94].to(device)\n labels = labels * mask\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train' and epoch > -1):\n outputs = model.forward(inputs)\n loss = criterion(torch.squeeze(labels,1).long(), outputs)\n\n # backward + optimize only if in training phase\n if phase == 'train' and epoch > -1:\n loss.backward()\n optimizer.step()\n\n # statistics\n running_loss += loss.item()\n n_iter+=1\n #if phase == 'val':\n y_hr = np.squeeze(labels.cpu().numpy(), axis=1)\n batch_size, _, _ = y_hr.shape\n # TODO: do we need this check below?\n if phase == 'train':\n y_hat = outputs.cpu().detach().numpy() * mask.cpu().detach().numpy()\n else:\n y_hat = outputs.cpu().numpy()\n y_hat = np.argmax(y_hat, axis=1)\n batch_meanIoU = 0\n if phase == 'val':\n for j in range(batch_size):\n #pdb.set_trace()\n batch_meanIoU += mean_IoU(y_hat[j], y_hr[j], ignored_classes={0})\n batch_meanIoU /= batch_size\n meanIoU += batch_meanIoU\n # print('batch_meanIoU: %f' % batch_meanIoU)\n\n if phase == 'val':\n val_loss = running_loss / n_iter\n val_mean_IoU = meanIoU / n_iter\n elif phase == 'train':\n train_loss = running_loss / n_iter\n #train_mean_IoU = meanIoU / n_iter\n\n #print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n # phase, epoch_loss, epoch_mean_IoU))\n result_row = {\n 'run_id': hyper_parameters['run_id'],\n 'hyper_parameters': hyper_parameters,\n 'epoch': epoch,\n # 'train_IoU': train_mean_IoU,\n 'train_loss': train_loss,\n 'val_IoU': val_mean_IoU,\n 'val_loss': val_loss,\n 'total_time': datetime.now() - since\n }\n print(result_row)\n results_writer.writerow(result_row)\n\n hyper_parameters['epoch'] = epoch\n hyper_parameters_str = sorted(hyper_parameters.items())\n finetuned_fn = str(Path(args.model_output_directory) / (\"finetuned_unet_gn.pth_%s.tar\" % hyper_parameters_str))\n torch.save(model.state_dict(), finetuned_fn)\n \n # deep copy the model\n #if phase == 'val' and epoch_mean_IoU > best_mean_IoU:\n # best_mean_IoU = epoch_mean_IoU\n # best_model_wts = copy.deepcopy(model.state_dict())\n # best_epoch = epoch\n # duration_til_best_epoch = datetime.now() - since\n print()\n\n duration = datetime.now() - since\n seconds_elapsed = duration.total_seconds()\n\n\n print('Training complete in {:.0f}m {:.0f}s'.format(\n seconds_elapsed // 60, seconds_elapsed % 60))\n print('Best val IoU: {:4f}'.format(best_mean_IoU))\n # load best model weights\n # model.load_state_dict(best_model_wts)\n return model, FineTuneResult(best_mean_IoU=best_mean_IoU, train_duration=duration)\n\ndef main(finetune_methods, validation_patches_fn=None):\n global results_writer\n results_file = open(args.log_fn, 'w+')\n results_writer = csv.DictWriter(results_file, ['run_id', 'hyper_parameters', 'epoch', 'train_IoU', 'train_loss', 'val_IoU', 'val_loss', 'total_time'])\n results_writer.writeheader()\n\n params = json.load(open(args.config_file, \"r\"))\n \n f = open(args.training_patches_fn, \"r\")\n training_patches = f.read().strip().split(\"\\n\")\n f.close()\n\n validation_patches = None\n if args.validation_patches_fn:\n f = open(args.validation_patches_fn, \"r\")\n validation_patches = f.read().strip().split(\"\\n\")\n f.close()\n\n # f = open(training_points_sample_fn, \"r\")\n # training_points = [ for line in f.read().stip().split(\"\\n\")]\n \n batch_size = params[\"loader_opts\"][\"batch_size\"]\n patch_size = params[\"patch_size\"]\n num_channels = params[\"loader_opts\"][\"num_channels\"]\n params_train = {'batch_size': params[\"loader_opts\"][\"batch_size\"],\n 'shuffle': params[\"loader_opts\"][\"shuffle\"],\n 'num_workers': params[\"loader_opts\"][\"num_workers\"]}\n \n training_set = DataGenerator(\n training_patches, batch_size, patch_size, num_channels, superres=params[\"train_opts\"][\"superres\"], masking=True\n )\n\n validation_set = None\n if validation_patches:\n validation_set = DataGenerator(\n validation_patches, batch_size, patch_size, num_channels, superres=params[\"train_opts\"][\"superres\"], masking=True\n )\n\n model_opts = params[\"model_opts\"]\n loss = multiclass_ce_points\n path = args.model_file\n\n dataloaders = {'train': data.DataLoader(training_set, **params_train)}\n if validation_set:\n dataloaders['val'] = data.DataLoader(validation_set, **params_train)\n\n results = {}\n for run_id, (finetune_method_name, finetune_function, hyper_params) in enumerate(finetune_methods):\n hyper_params['run_id'] = run_id\n print('Fine-tune hyper-params: %s' % str(hyper_params))\n improve_reproducibility()\n model, result = finetune_function(path, loss, dataloaders, params, hyper_params, results_writer, n_epochs=10)\n results[finetune_method_name] = result\n \n savedir = args.model_output_directory\n if not os.path.exists(savedir):\n os.makedirs(savedir)\n \n if model_opts[\"model\"] == \"unet\":\n finetuned_fn = str(Path(savedir) / (\"finetuned_unet_gn.pth_%s.tar\" % str(hyper_params)))\n torch.save(model.state_dict(), finetuned_fn)\n pprint(results)\n results_file.close()\n\n \ndef product_dict(**kwargs):\n keys = kwargs.keys()\n vals = kwargs.values()\n for instance in product(*vals):\n yield dict(zip(keys, instance))\n\n \nif __name__ == \"__main__\":\n params_sweep_last_k = {\n 'method_name': ['last_k_layers'],\n 'optimizer_method': [torch.optim.Adam], #, torch.optim.SGD],\n 'last_k_layers': [1, 2, 4], #, 8],\n 'learning_rate': [0.01], #, 0.005, 0.001],\n 'lr_schedule_step_size': [5],\n 'mask_id': range(12),\n }\n\n params_sweep_group_norm = {\n 'method_name': ['group_params'],\n 'optimizer_method': [torch.optim.Adam], #, torch.optim.SGD],\n 'learning_rate': [0.03], # 0.03, 0.01], # 0.005, 0.001],\n 'lr_schedule_step_size': [5],\n 'mask_id': range(12),\n }\n\n params_list_last_k = list(product_dict(**params_sweep_last_k))\n params_list_group_norm = list(product_dict(**params_sweep_group_norm))\n \n main([('Group params', finetune_group_params, hypers) for hypers in params_list_group_norm] + \\\n [('Last k layers', finetune_last_k_layers, hypers) for hypers in params_list_last_k])\n\n","sub_path":"training/pytorch/model_finetuning.py","file_name":"model_finetuning.py","file_ext":"py","file_size_in_byte":16641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"141296241","text":"#!/usr/bin/env python3\nimport sys, re\nregex = re.compile('[^a-zA-Z]')\n\n\n# input comes from STDIN\nfor line in sys.stdin:\n Nom = \"1\"\n TelephoneUser = \"1\"\n TelephoneCalls = \"1\"\n Duree = 0\n City = \"Paris\"\n\n # remove leading and trailing whitespace\n line = line.strip()\n\n # parse the input we got from mapper.py\n words = line.split(\",\")\n \n if len(words) == 5:\n if words[4]== City:\n Nom = words[0]\n TelephoneUser = words[2]\n else :\n TelephoneCalls = words[0]\n Duree = words[2]\n\n print('%s;%s;%s;%s' % (Nom, TelephoneUser, TelephoneCalls, Duree))\n\n\n ","sub_path":"mapperQ4.py","file_name":"mapperQ4.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"457243696","text":"from django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.shortcuts import render\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\nfrom .models import Car, BModel, Brand\nfrom django.shortcuts import get_object_or_404, get_list_or_404\nfrom django.utils.translation import ugettext_lazy as _\n\n\ndef index(request):\n if request.method == 'POST':\n pass\n order_list = {'model__brand__name': _('brand'),\n 'model__name': _('model'),\n 'price': _('price'),\n 'production_year': _('year'),\n 'model__power': _('power'),\n }\n order = request.POST.get(\"order_by\", 'model')\n query = request.GET.get(\"q\")\n cars_list = Car.objects.all().prefetch_related('model')\n count = None\n if order:\n rate = request.POST.get(\"rate\", 'asc')\n if query:\n cars_list = cars_list.filter(\n Q(model__name__icontains=query) |\n Q(model__brand__name__icontains=query)\n ).distinct()\n count = cars_list.count()\n if rate == 'desc':\n cars_list = cars_list.order_by('-'+order)\n else:\n cars_list = cars_list.order_by(order)\n paginator = Paginator(cars_list, 6)\n page = request.GET.get('page')\n try:\n cars = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n cars = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n cars = paginator.page(paginator.num_pages)\n return render(request, 'cars/index.html', {'cars_list': cars,\n 'order_list': order_list,\n 'count': count\n })\n\n\ndef show_car_detail(request, brand_slug, model_slug, car_id, template_name='cars/details.html'):\n car = get_object_or_404(Car, id=car_id)\n return render(request, template_name, {'car': car})\n\n\nclass BrandCreate(CreateView):\n model = Brand\n template_name = 'cars/car_form.html'\n fields = ['name',\n 'country'\n ]\n\n\nclass ModelCreate(CreateView):\n model = BModel\n template_name = 'car_form.html'\n fields = ['brand',\n 'name',\n 'doors',\n 'power',\n 'coupe_type',\n 'seats',\n 'engine'\n ]\n\n\nclass CarCreate(CreateView):\n model = Car\n template_name = 'cars/car_form.html'\n fields = ['model',\n 'color',\n 'picture',\n 'production_year',\n 'quantity',\n 'price',\n ]\n\n\nclass CarUpdate(UpdateView):\n model = Car\n template_name = 'cars/car_update_form.html'\n fields = ['model',\n 'color',\n 'production_year',\n 'quantity',\n 'price',\n 'picture',\n ]\n success_url = \"/cars/\"\n\n\nclass CarDelete(DeleteView):\n model = Car\n success_url = reverse_lazy('cars:index')\n\n\ndef show_models(request, brand_slug, template_name='cars/models.html'):\n c = get_object_or_404(Brand, slug=brand_slug)\n models = c.bmodel_set.all()\n return render(request, template_name, {'models': models})\n\n\ndef show_brands(request):\n brands = Brand.objects.all()\n return render(request, 'cars/brands.html', {'brands': brands})","sub_path":"cars_market/cars/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"32660367","text":"import numpy as np\n\nx = np.arange(0, 3 * np.pi, 0.1)\n# y = np.sin(x)\n# y = np.cos(x)\ny = np.tan(x)\nz = np.cos(0)\n\nprint(y)\nprint(z)\n\ny = np. sin(2 * 180 / np.pi)\nprint(y)","sub_path":"4 NumPy/43.numpy_trigonometri.py","file_name":"43.numpy_trigonometri.py","file_ext":"py","file_size_in_byte":170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"185434047","text":"class Solution(object):\r\n def findLUSlength(self, a, b):\r\n \"\"\"\r\n :type a: str\r\n :type b: str\r\n :rtype: int\r\n \"\"\"\r\n return -1 if a==b else max(len(a),len(b))\r\n\r\nif __name__ == '__main__':\r\n# a = \"aba\"\r\n# b = \"cdc\"\r\n \r\n a = \"aaa\"\r\n b = \"ccc\"\r\n sol = Solution()\r\n o = sol.findLUSlength(a, b)\r\n print(o)","sub_path":"Algorithms/521-longest-uncommon-subsequence-1.py","file_name":"521-longest-uncommon-subsequence-1.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"588592630","text":"def fill(x, pstate):\n for i in range(x):\n pstate.append(0)\n return(pstate)\ndef fillend(x, pstate):\n for i in range(x):\n pstate.append(1)\n return(pstate)\ndef intp(state):\n masterstate = []\n for i in state:\n if(i == 0):\n masterstate.append(False)\n else:\n masterstate.append(True)\n return masterstate\n\ndef possibilities(possiblestates):\n masterlist = []\n begin = possiblestates\n end = fillend(len(begin), [])\n i = len(begin) - 1\n state = begin\n while(state != end):\n if(state[i] == 1):\n i = i - 1\n else:\n state[i] = 1\n print(intp(state))\n \n \ndef bn(x):\n pstates = []\n pstates = fill(x, pstates)\n possibilities(pstates)\n\n\nbn(10)\n","sub_path":"Lab 3/bn.py","file_name":"bn.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"282824205","text":"import fire\nimport praw\nfrom config import config\nfrom tqdm import tqdm\nimport time\n\n\nclass AutoPublisher(object):\n \"\"\"A simple publisher for reddit and hackernews\"\"\"\n\n def __init__(self):\n self.reddit = praw.Reddit(client_id=config['redditAuth']['client_id'],\n client_secret=config['redditAuth']['client_secret'],\n user_agent=config['redditAuth']['user_agent'],\n username=config['redditAuth']['username'],\n password=config['redditAuth']['password'])\n\n def submitUrl(self, title, url):\n assert title, 'No title given'\n assert url, 'No url given'\n\n for subreddit in tqdm(config['subreddits']):\n self.reddit.subreddit(subreddit).submit(title, url=url).mod.distinguish(sticky=True)\n time.sleep(60 * 10) # Sleep for 10 min\n\n\nif __name__ == '__main__':\n fire.Fire(AutoPublisher)\n","sub_path":"autoPublisher/autoPublisher.py","file_name":"autoPublisher.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"8154723","text":"#!/usr/bin/env python3\n\nimport sys\nimport getopt\nimport sqlite3\nimport pandas as pd\n\n\n#-- Collateral Information\n\ncollateral_dir=\"/Users/dinos/Trading/Programming/OptionsTrading/DB_backups\"\nDB_dir=\"/Users/dinos/Trading/Programming/OptionsTrading/DBs\"\nlist_of_stocks=\"stocks.list.1\"\nlist_of_etfs=\"etf.list\"\netf_db=\"etf_values.db\"\nstock_db=\"stock_values.db\"\ntimeframes=[\"daily\", \"weekly\", \"monthly\"]\n\n#-- Helper Functions\ndef process_file(fn,name_list):\n with open(fn) as f:\n for elem in f.read().splitlines():\n name_list.add(elem)\n\n\n#--Classes\nclass DataBase:\n \n def __init__(self, name):\n self._name=name\n try:\n self._conn = sqlite3.connect(self._name)\n except Error as e:\n print(e)\n self._cur=self._conn.cursor()\n\n def __del__(self):\n self._cur.close()\n self._conn.close()\n\n @property\n def cur(self):\n return self._cur\n\n @property\n def conn(self):\n return self._conn\n\n\n\nclass Instrument:\n\n def __init__(self, name, db_name):\n self._name=name\n self._DB=DataBase(db_name)\n\n def get_values(self,tf,period):\n table_name=\"\"\n name=self._name.replace(\"-\",\"_\")\n if tf == \"daily\":\n table_name=name.lower()+\"_values\"\n else:\n table_name=name.lower()+\"_values_\"+tf\n query=f\"SELECT count(*) FROM {table_name}\"\n self._DB.cur.execute(query)\n rows=self._DB.cur.fetchone()\n number=rows[0]\n offset=number-period\n query=f\"SELECT * FROM {table_name} LIMIT {period} OFFSET {offset}\"\n df=pd.read_sql_query(query, self._DB.conn)\n return df\n\n\n\n\n##--\ndef main(argv):\n list_file=\"\"\n name_list=set()\n\n los=set()\n loe=set()\n\n process_file(collateral_dir+\"/\"+list_of_stocks, los)\n process_file(collateral_dir+\"/\"+list_of_etfs, loe)\n\n try:\n opts, args = getopt.getopt(argv,\"hl:\",[\"list_file=\"])\n except getopt.GetoptError:\n print (\"\"\"main.py -l <file containing list of stocks> \n \"\"\")\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print (\"\"\"main.py -l <file containing list of stocks> \n \"\"\") \n sys.exit()\n elif opt in(\"-l\",\"--list_file\"):\n list_file=arg\n \n if list_file != \"\":\n process_file(list_file, name_list)\n for stock in name_list:\n if stock in los:\n print (stock+\" is a stock\")\n ins=Instrument(stock, DB_dir+\"/\"+stock_db)\n df=ins.get_values(timeframes[0], 5)\n #df.set_index('index', inplace=True)\n df1=df.loc[0:, 'Date':'Adj Close']\n df1.set_index('Date', inplace=True)\n print(df1)\n elif stock in loe:\n print(stock+\" is an ETF\")\n ins=Instrument(stock, DB_dir+\"/\"+etf_db)\n else:\n print(stock+\" not recognized\") \n\nif __name__ == \"__main__\":\n main(sys.argv[1:]) ","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"653334924","text":"import os\nimport pysam\nfrom pyfasta import Fasta\nimport matplotlib\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport re\nfrom glob import glob\nfrom CMlib.showprocess import showbarprocess\nfrom PyQt5 import QtWidgets\n\ndef alnfile_filter(infofile,groupinfo, refname, output, bamdir):\n \"\"\"\n :param infofile: a description file of details of each sample, example: sample_infor.txt\n :param groupinfo: a description file of details of each group, example: group_infor.txt\n :param refname: a fasta format of the sequence in the target region, exaple:Samples_gene.fa\n :param output: folder of final result\n :param bamdir: folder of temporary files\n :return:\n \"\"\"\n fa = Fasta(refname)\n info = pd.read_csv(infofile, index_col=\"Index\")\n groupinfor = pd.read_csv(groupinfo)\n stranddict = dict()\n # outiofile = os.path.join(output,'filter_wt_reads_number.txt')\n # outio = open(outiofile, 'w')\n # print(\"Sample\\tfilter\", file=outio)\n for idy in groupinfor.index:\n stranddict[groupinfor.loc[idy].rep1] = groupinfor.loc[idy].strand\n stranddict[groupinfor.loc[idy].rep2] = groupinfor.loc[idy].strand\n stranddict[groupinfor.loc[idy].rep3] = groupinfor.loc[idy].strand\n stranddict[groupinfor.loc[idy].control] = groupinfor.loc[idy].strand\n\n for idx in info.index:\n\n note = info.loc[idx].Note\n if note not in stranddict:\n error = ' '.join([note, 'is not involved in group table! Please Check!'])\n showwarnings(\"Error\", error)\n continue\n\n bamname = os.path.join(bamdir, info.loc[idx].Note + '.bam')\n outfile_del = os.path.join(output, info.loc[idx].Note + '_del_aln.fa')\n outfile_snp = os.path.join(output, info.loc[idx].Note + '_snp_aln.fa')\n alnfile_del = os.path.join(output, info.loc[idx].Note + '_del_aln.txt')\n alnfile_snp = os.path.join(output, info.loc[idx].Note + '_snp_aln.txt')\n print(\"output\", info.loc[idx].Note)\n ################\n tmp = \"output \" + info.loc[idx].Note\n showbarprocess(tmp)\n ###############\n\n outfa_del = open(outfile_del, 'w')\n outfa_snp = open(outfile_snp, 'w')\n outlan_del = open(alnfile_del, 'w')\n outlan_snp = open(alnfile_snp, 'w')\n\n note = info.loc[idx].Note\n strand = stranddict[note]\n\n if (re.search(\"gRNA\", info.loc[idx].Note)):\n if strand == '+':\n start = info.loc[idx]['start'] - 10\n end = info.loc[idx]['end'] + 10\n\n else:\n start = info.loc[idx]['start'] - 10\n end = info.loc[idx]['end'] + 10\n\n elif (re.search(\"crRNA\", info.loc[idx].Note)):\n if strand == '+':\n start = info.loc[idx]['start'] - 10\n end = info.loc[idx]['end'] + 30\n\n else:\n start = info.loc[idx]['start'] - 30\n end = info.loc[idx]['end'] + 10\n\n # if (re.search(\"gRNA\", info.loc[idx].Note)):\n # start = info.loc[idx].start - 10\n # end = info.loc[idx].end + 10\n # elif (re.search(\"crRNA\", info.loc[idx].Note)):\n # start = info.loc[idx].start\n # end = info.loc[idx].end + 30\n #start = info.loc[idx].start - 10\n #end = info.loc[idx].end - 10\n gene = info.loc[idx].gene_name\n samfile = pysam.AlignmentFile(bamname, \"rb\")\n mtreads = set()\n totalcov = 0\n covage = 0\n\n replace = set()\n replace_left = set()\n replace_final = set()\n all_tmp = set()\n wt_set = set()\n replace_side = set()\n wt_side_set = set()\n wt_final_set = set()\n filter_set = set()\n\n insert = set()\n\n deletion = set()\n\n reads = dict()\n\n seq = fa[gene][start - 1:end].upper() ##reference sequence\n seqlist = list()\n for nt in seq:\n seqlist.append(nt)\n\n for pileupcolumn in samfile.pileup(gene, max_depth=50000):\n\n # print (pileupcolumn.pos, pileupcolumn.n)\n\n\n\n totalcov += pileupcolumn.n\n # print(pileupcolumn.pos, pileupcolumn.n)\n\n if end > pileupcolumn.pos >= start-1:\n\n for pileupread in pileupcolumn.pileups:\n # print(pileupcolumn.pos, pileupcolumn.n)\n\n if pileupread.alignment.query_name not in reads:\n # print(pileupread.alignment.query_name)\n reads[pileupread.alignment.query_name] = ''\n\n if not pileupread.is_del and not pileupread.is_refskip:\n refbase = fa[gene][pileupcolumn.pos].upper()\n querybase = pileupread.alignment.query_sequence[pileupread.query_position]\n all_tmp.add(pileupread.alignment.query_name)\n if querybase != refbase:\n replace.add(pileupread.alignment.query_name)\n\n reads[pileupread.alignment.query_name] += pileupread.alignment.query_sequence[\n pileupread.query_position]\n # print(reads[pileupread.alignment.query_name])\n\n # print(pileupread.query_position)\n # querybase = pileupread.alignment.query_sequence[pileupread.query_position]\n\n # # refbase = pileupread.alignment.get_reference_sequence()[pileupread.query_position]\n # refbase = fa[gene][pileupcolumn.pos].upper()\n # if querybase !=refbase :\n # # replace += 1\n # mtreads.add(pileupread.alignment.query_name)\n # replace.add(pileupread.alignment.query_name)\n\n # if pileupread.indel > 0:\n\n # # insert += 1\n # mtreads.add(pileupread.alignment.query_name)\n # insert.add(pileupread.alignment.query_name)\n # print()\n\n if pileupread.indel < 0:\n reads[pileupread.alignment.query_name] += '-' * abs(pileupread.indel)\n deletion.add(pileupread.alignment.query_name)\n # print(reads[pileupread.alignment.query_name])\n # print(reads)\n # # deletion += 1\n # mtreads.add(pileupread.alignment.query_name)\n # deletion.add(pileupread.alignment.query_name)\n\n wt_set = all_tmp - replace\n for pileupcolumn_filter in samfile.pileup(gene, max_depth=50000): ###两边也无突变\n\n if start > pileupcolumn_filter.pos >= 0 or pileupcolumn_filter.pos > end:\n for pileupread_filter in pileupcolumn_filter.pileups:\n # for replace_filter in replace_all:\n\n # if replace_filter in str(pileupread_filter) :\n # replace_side.add(pileupread_filter.alignment.query_name)\n\n if pileupread_filter.alignment.query_name not in replace_left:\n\n if not pileupread_filter.is_del and not pileupread_filter.is_refskip:\n querybase_filter = pileupread_filter.alignment.query_sequence[pileupread_filter.query_position]\n\n # refbase = pileupread.alignment.get_reference_sequence()[pileupread_filter.query_position]\n\n refbase_filter = fa[gene][pileupcolumn_filter.pos].upper()\n replace_side.add(pileupread_filter.alignment.query_name) # 两边无突变\n if querybase_filter != refbase_filter:\n # replace += 1\n # mtreads.add(pileupread.alignment.query_name)\n\n # replace.add(pileupread.alignment.query_name)\n replace_left.add(pileupread_filter.alignment.query_name) # 两边无突变,有错配\n # break\n\n\n wt_side_set = replace_side - replace_left\n wt_final_set = wt_side_set & wt_set\n filter_set = wt_set - wt_side_set\n replace_final = replace - deletion\n\n\n lt = end - start + 1\n # print(lt)\n typdict = dict()\n typdict_snp = dict()\n typdict_del = dict()\n for i in reads:\n if i in filter_set:\n continue\n if len(reads[i]) == lt:\n # print(reads[i])\n if i in replace_final:\n if reads[i] in typdict_snp:\n typdict_snp[reads[i]] += 1\n else:\n typdict_snp[reads[i]] = 1\n continue\n if i in deletion:\n if reads[i] in typdict_del:\n typdict_del[reads[i]] += 1\n else:\n typdict_del[reads[i]] = 1\n continue\n\n if reads[i] in typdict:\n typdict[reads[i]] += 1\n else:\n typdict[reads[i]] = 1\n for mutype in typdict:\n print('>', typdict[mutype], sep='', file=outfa_snp)\n print(mutype, file=outfa_snp)\n print(typdict[mutype], '\\t'.join(mutype), sep='\\t', file=outlan_snp)\n print('>', typdict[mutype], sep='', file=outfa_del)\n print(mutype, file=outfa_del)\n print(typdict[mutype], '\\t'.join(mutype), sep='\\t', file=outlan_del)\n for mutype_snp in typdict_snp:\n print('>', typdict_snp[mutype_snp], sep='', file=outfa_snp)\n print(mutype_snp, file=outfa_snp)\n print(typdict_snp[mutype_snp], '\\t'.join(mutype_snp), sep='\\t', file=outlan_snp)\n for mutype_del in typdict_del:\n print('>', typdict_del[mutype_del], sep='', file=outfa_del)\n print(mutype_del, file=outfa_del)\n print(typdict_del[mutype_del], '\\t'.join(mutype_del), sep='\\t', file=outlan_del)\n print(\"Refseq\",'\\t'.join(seqlist), sep='\\t',file=outlan_snp)\n print(\"Refseq\", '\\t'.join(seqlist), sep='\\t', file=outlan_del)\n\n\n # print(info.loc[idx].Note, end='\\t', file=outio)\n # print(len(filter_set), end='\\n', file=outio)\n outfa_snp.close()\n outlan_snp.close()\n outfa_del.close()\n outlan_del.close()\n #outio.close()\n\n# ############## warning message #########\ndef showwarnings(title, message):\n wBox = QtWidgets.QMessageBox()\n wBox.setIcon(QtWidgets.QMessageBox.Warning)\n wBox.setWindowTitle(title)\n wBox.setText(message)\n wBox.setStandardButtons(QtWidgets.QMessageBox.Ok)\n wBox.exec_()\n##################################################","sub_path":"CMlib/output_aln_fa_filter.py","file_name":"output_aln_fa_filter.py","file_ext":"py","file_size_in_byte":11344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"234161882","text":"import nltk\nimport pandas as pd\nimport ahocorasick as ac\nimport numpy as np\n\n_punkt_sentence_splitter_eng = nltk.load('tokenizers/punkt/english.pickle')\n_treebank_word_tokenize = nltk.tokenize.TreebankWordTokenizer().tokenize\n# initialize a set of English stop-words\n_stop_words = set(nltk.corpus.stopwords.words('english'))\n_stop_words |= {\"'s\", \"'ll\", \"n't\", \"'re\", \"'ve\", \"'d\"}\n_stop_words = frozenset(_stop_words)\n\n\ndef sentence_split(txt):\n if pd.isnull(txt):\n return None\n return _punkt_sentence_splitter_eng.tokenize(txt)\n\n\ndef _default_token_processor(s):\n if not next((ch for ch in s if ch.isalnum()), None):\n return None\n result = s.lower()\n return result if result not in _stop_words else None\n\n\ndef tokenize(txt, token_processor=_default_token_processor):\n if pd.isnull(txt):\n return None\n if not isinstance(txt, str):\n raise ValueError(\"Can't tokenize non-str value: %s\" % txt)\n result_gen = (token_processor(token) for sent in sentence_split(txt)\n for token in _treebank_word_tokenize(sent))\n return [t for t in result_gen if t]\n\n\nclass DictionaryMatcher(object):\n def __init__(self, *txt_file_paths, preprocessor=tokenize):\n #\n self.preprocessor = preprocessor\n # read dictionary sources\n raw_entry_list = []\n for txt_file_path in txt_file_paths:\n with open(txt_file_path) as f:\n raw_entry_list += [l.rstrip() for l in f.readlines()]\n #\n self.automaton = ac.Automaton()\n for raw_e in raw_entry_list:\n entry_norm_tokens = self._analyze(raw_e)\n # TODO optimization point\n entry_norm_key = self._to_automaton_string(entry_norm_tokens)\n self.automaton.add_word(entry_norm_key, raw_e)\n self.automaton.make_automaton()\n\n def contained_in(self, txt):\n if pd.notnull(txt):\n return True if next(self._get_match_iter(txt), None) else False\n else:\n return False\n\n def count_matches(self, txt):\n if pd.notnull(txt):\n return sum(1 for m in self._get_match_iter(txt))\n else:\n return np.nan\n\n # testing method\n def print_matches(self, txt):\n if pd.notnull(txt):\n tokens = self._analyze(txt)\n automaton_input = self._to_automaton_string(tokens)\n print(automaton_input)\n match_iter = self.automaton.iter(automaton_input)\n for end, matched_span in match_iter:\n print(\"Matched '%s' with end at %s\" % (matched_span, end))\n else:\n print(\"! NULL input!\")\n\n def get_matches(self, txt):\n if pd.notnull(txt):\n tokens = self._analyze(txt)\n automaton_input = self._to_automaton_string(tokens)\n match_iter = self.automaton.iter(automaton_input)\n return [matched_span for end, matched_span in match_iter]\n else:\n return []\n\n def replace_matches(self, txt, repl):\n if pd.notnull(txt):\n tokens = self._analyze(txt)\n automaton_input = self._to_automaton_string(tokens)\n match_iter = self.automaton.iter(automaton_input)\n\n def find_token_begin(token_end):\n if automaton_input[token_end] != ']':\n raise ValueError('Token end does not point to \"]\"')\n for i in range(token_end, -1, -1):\n if automaton_input[i] == '[':\n return i\n raise ValueError(\"Can't find token begin\")\n\n # an index of char after last consumed one\n last_consumed = 0\n result = ''\n for end, matched_span in match_iter:\n begin = find_token_begin(end)\n result += automaton_input[last_consumed:begin]\n result += repl\n last_consumed = end + 1\n result += automaton_input[last_consumed:]\n return result\n else:\n return txt\n\n def _to_automaton_string(self, tokens):\n return ' '.join(map(lambda t: '[' + t + ']', tokens))\n\n def _get_match_iter(self, txt):\n tokens = self._analyze(txt)\n automaton_input = self._to_automaton_string(tokens)\n return self.automaton.iter(automaton_input)\n\n def _analyze(self, txt):\n return self.preprocessor(txt)\n","sub_path":"runtimes/custom_scikit/src/cfepm/nlp/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"16349507","text":"# -*- coding:utf-8 -*-\n#!~/anaconda3/bin/python\nimport os\nimport re\nimport time\nimport urllib.request\nfrom io import BytesIO\n\nimport bson.binary\nimport chardet\nimport requests\nimport xlwt\nfrom bs4 import BeautifulSoup as beautiful\nfrom selenium import webdriver\nimport conf\nimport datetime\nfrom pymongo import ASCENDING, DESCENDING\nimport random\n\n#国家发展改革委员会\n\n\nheaders = {\n 'User-Agent': random.choice(conf.user_agent)\n}\n# file_ad = '../政府政策公告信息/国家超链接/'\n\n# 插入数据库\ndef insertFile(source1,source2,source3,ctitle,date,complete_href,ProgramStarttime,html_name,file_names,img_names,css_names,file_ad):\n coll = conf.coll\n dit = {'department': source1, \"column\": source2, \"category\": source3, \"title\": ctitle, \"PublishedDate\": date,\n \"Crawllink\": complete_href, \"ProgramStarttime\": ProgramStarttime}\n # article,file,file_name\n with open(html_name, 'rb') as file:\n article = BytesIO(file.read())\n dit.setdefault(\"article\", bson.binary.Binary(article.getvalue()))\n i = 0\n for downfile in file_names:\n i = i + 1\n filesave = file_ad + downfile\n with open(filesave, 'rb') as file:\n file_one = BytesIO(file.read())\n key1 = \"file\" + str(i)\n key2 = \"file_name\" + str(i)\n if len(bson.binary.Binary(file_one.getvalue())) > 16793598:\n print(complete_href+\" 附件过大 \"+filesave)\n if filesave.split('.')[-1] == 'pdf':\n with open(\"/home/260199/爬虫/爬虫数据/政府公告/long_attention.pdf\", 'rb') as file:\n file_one = BytesIO(file.read())\n else:\n file_one = BytesIO(b\"Attachment is too large to download\")\n dit.setdefault(key1, bson.binary.Binary(file_one.getvalue()))\n dit.setdefault(key2, downfile)\n img_list = []\n for img_name in img_names:\n imgsave = file_ad + img_name\n with open(imgsave,'rb') as img:\n img_one = BytesIO(img.read())\n img_list.append(bson.binary.Binary(img_one.getvalue()))\n dit.setdefault('imges', img_list)\n css_list = []\n for css_name in css_names:\n csssave = file_ad + css_name\n with open(csssave,'rb') as css:\n css_one = BytesIO(css.read())\n css_list.append(bson.binary.Binary(css_one.getvalue()))\n dit.setdefault('css',css_list)\n coll.save(dit)\n # coll.create_index([(\"PublishedDate\", ASCENDING)])\n\n#获取动态网页源码,参数为分页面url\ndef getHtml_move(url):\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument('--disable-gpu')\n options.add_argument(\"window-size=1024,768\")\n options.add_argument(\"--no-sandbox\")\n # options.add_argument('disable-infobars')\n driver = webdriver.Chrome('/home/260199/chrome/chromedriver', chrome_options=options)\n driver.maximize_window()\n driver.get(url)\n js = \"var q=document.documentElement.scrollTop=10000\"\n driver.execute_script(js)\n time.sleep(3)\n html_str = driver.page_source\n driver.quit()\n # html = urllib.request.urlopen(url).read()\n html = bytes(html_str, encoding=\"utf8\") #转码\n return html,html_str\n\n#获取静态网页源码,参数为分页面url\ndef getHtml_quiet(url):\n time.sleep(random.randint(5,10))\n req = urllib.request.Request(url,headers = headers)\n html = urllib.request.urlopen(req).read()\n chardit1 = chardet.detect(html)\n chard = chardit1['encoding']\n # html_req = requests.get(url)\n # html_req.encoding = chard\n # html_str = html_req.text\n html_str = html.decode(chard,'ignore')\n return html,chard,html_str\n\n#获取正文标题、附件信息,并下载附件,参数为分页面url,网页编码格式 不用\ndef get_ctitle(html_str,href,file_ad): #无\n bsObj = beautiful(html_str, \"html.parser\")\n #获取正文标题\n try:\n ctitle = bsObj.find('h1', {'id': 'con_title'}).text\n except:\n try:\n ctitle = bsObj.find('span', {'class': 'titleFont'}).text\n except:\n ctitle = None\n #获取附件信息,并下载\n file_infos = bsObj.find_all(\"a\", {\"href\": re.compile(r'.doc$|.docx$|.pdf$|.xls$|.xlsx$')})\n # print(file_infos)\n f1 = re.compile('href=\"(.*?)\"')\n f2 = re.compile('\">(.*?)</a>')\n file_names = []\n for each in file_infos:\n # file_href = each['href']\n file_href = re.findall(f1, str(each))[0]\n file_name = re.findall(f2, str(each))[0]\n # print(file_href,file_name)\n if file_name == '':\n continue\n if re.findall('http',file_href):\n pass\n else:\n file_href ='http://www.miit.gov.cn/' + file_href.split('../')[-1]\n # print(file_href)\n file_loc = file_ad + file_name\n try:\n download_file(file_href, file_loc)\n except Exception as e:\n print(\"下载附件出现问题:\", e)\n continue\n file_names.append(file_name)\n file_diff = sorted(set(file_names), key=file_names.index)\n # # 获取图片信息,并下载\n # img_infos = bsObj.find_all(\"img\", {\"src\": re.compile(r'.jpg$|.png$')})\n # img_names = []\n # for each in img_infos:\n # img_href = each['src']\n # # 附件后缀\n # img_adds = img_href.split('.')[-1]\n # img_name = img_href.split('/')[-1]\n # if re.findall(img_adds, img_name):\n # pass\n # else:\n # img_name = img_name + '.' + img_adds\n # if re.findall('http', img_href):\n # pass\n # elif re.findall('/.*/', img_href):\n # img_href = 'http://www.miit.gov.cn' + img_href\n # else:\n # href_add = href.replace(href.split('/')[-1], '')\n # img_href = href_add + img_href[2:]\n # print(img_href)\n # img_loc = file_ad + img_name\n # try:\n # download_file(img_href, img_loc)\n # except Exception as e:\n # print(\"下载图片出现问题:\", e)\n # continue\n # img_names.append(img_name)\n # 获取css文件信息,并下载\n css_infos = bsObj.find_all(\"link\", {\"type\": \"text/css\", \"href\": re.compile(r'.css$')})\n css_names = []\n for each in css_infos:\n css_href = each['href'].replace('../', '')\n # 附件后缀\n css_adds = css_href.split('.')[-1]\n css_name = css_href.replace('..', '').replace('/', '_')\n if re.findall(css_adds, css_name):\n pass\n else:\n css_name = css_name + '.' + css_adds\n if re.findall('http', css_href):\n pass\n elif re.findall('/.*/', css_href):\n css_href = 'http://www.miit.gov.cn/' + css_href\n else:\n href_add = href.replace(href.split('/')[-1], '')\n css_href = href_add + css_href\n css_loc = file_ad + css_name\n try:\n download_file(css_href, css_loc)\n except Exception as e:\n print(\"下载css文件出现问题:\", e)\n continue\n css_names.append(css_name)\n img_names = []\n return ctitle,file_diff, img_names, css_names\n\n\n#获取附件信息\ndef get_file(html_str,href,file_ad):\n # print(href)\n bsObj = beautiful(html_str, \"html.parser\")\n #获取附件信息,并下载\n file_infos = bsObj.find_all(\"a\", {\"href\": re.compile(r'.doc$|.docx$|.pdf$|.xls$|.xlsx$')})\n file_names = []\n for each in file_infos:\n file_href = each['href']\n file_adds = file_href.split('.')[-1]\n file_name = each.text\n href_add = href.replace(href.split('/')[-1], '')\n # print(file_href)\n if file_name == '':\n continue\n if re.findall(file_adds,file_name):\n pass\n else:\n file_name = file_name + '.' + file_adds\n if re.findall('http',file_href):\n newfile_href = file_href\n # print('1:',newfile_href)\n elif '/u/' in file_href:\n newfile_href = 'http://service.most.gov.cn' + file_href\n # print('2:',newfile_href)\n elif re.findall('/.*/',file_href):\n newfile_href = 'http://www.most.gov.cn/' + file_href.replace('../','')\n # print('3:',newfile_href)\n elif './' in file_href:\n newfile_href =href_add + file_href.replace('./','')\n # print('4:',newfile_href)\n else:\n newfile_href = file_href\n # print('5:',newfile_href)\n # print(newfile_href,file_name)\n file_name = file_name.replace('/','或')\n while file_name in os.listdir(file_ad):\n file_name = file_name.rstrip('.'+file_adds)+'~.'+file_adds\n file_loc = file_ad + file_name\n try:\n download_file(newfile_href, file_loc)\n except Exception as e:\n print(\"下载附件出现问题:\", e)\n continue\n file_names.append(file_name)\n file_diff = sorted(set(file_names), key=file_names.index)\n # # 获取图片信息,并下载\n # img_infos = bsObj.find_all(\"img\", {\"src\": re.compile(r'.jpg$|.png$')})\n # img_names = []\n # for each in img_infos:\n # img_href = each['src']\n # # 附件后缀\n # img_adds = img_href.split('.')[-1]\n # img_name = img_href.split('/')[-1]\n # if re.findall(img_adds, img_name):\n # pass\n # else:\n # img_name = img_name + '.' + img_adds\n # if re.findall('http', img_href):\n # pass\n # elif re.findall('/.*/', img_href):\n # img_href = 'http://www.mof.gov.cn' + img_href.replace('../','')\n # else:\n # href_add = href.replace(href.split('/')[-1], '')\n # img_href = href_add + img_href[2:]\n # print(img_href)\n # img_loc = file_ad + img_name\n # try:\n # download_file(img_href, img_loc)\n # except Exception as e:\n # print(\"下载图片出现问题:\", e)\n # continue\n # img_names.append(img_name)\n # 获取css文件信息,并下载\n css_infos = bsObj.find_all(\"link\", {\"type\": \"text/css\", \"href\": re.compile(r'.css$')})\n css_names = []\n for each in css_infos:\n css_href = each['href']\n if '../' in css_href:\n css_href = '/'+css_href.replace('../','')\n # 附件后缀\n css_adds = css_href.split('.')[-1]\n css_name = css_href.replace('..', '').replace('/', '_')\n if re.findall(css_adds, css_name):\n pass\n else:\n css_name = css_name + '.' + css_adds\n if re.findall('http', css_href):\n pass\n elif re.findall('/.*/', css_href):\n css_href = 'http://www.mof.gov.cn'+css_href\n else:\n href_add = href.replace(href.split('/')[-1], '')\n css_href = href_add + css_href\n css_loc = file_ad + css_name\n try:\n download_file(css_href, css_loc)\n except Exception as e:\n print(\"下载css文件出现问题:\", e)\n continue\n css_names.append(css_name)\n img_names = []\n return file_diff, img_names, css_names\n\n\n#保存为html文件,并获取保存后的html文件全称(**.html)\ndef saveHtml(html_save, html_content,file_ad):\n # 注意windows文件命名的禁用符,比如 /\n try:\n html_name = file_ad+html_save.replace('/', '_') + \".html\"\n with open(html_name, \"wb\") as f:\n # 写文件用bytes而不是str,所以要转码\n f.write(html_content)\n except:\n html_name = file_ad+html_save.replace('/', '_')[:20] + \".html\"\n with open(html_name, \"wb\") as f:\n # 写文件用bytes而不是str,所以要转码\n f.write(html_content)\n return html_name\n\n\n#保存附件\ndef download_file(file_href,file_loc):\n time.sleep(random.randint(3,5))\n r = requests.get(file_href, stream=True, headers=headers)\n # download started\n with open(file_loc, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024 * 1024):\n if chunk:\n f.write(chunk)\n\n# 保存到excel表\ndef save_excel(worksheet, row, title,ctitle, html_name, source1,source2,source3, date,ProgramStarttime, complete_href, file_names,img_names,css_names,file_ad):\n # 写入一行\n i = 0\n content = [ctitle, \"\", source1, source2,source3,date,ProgramStarttime, complete_href, \"\"]\n for each_header in content:\n worksheet.write(row, i, each_header)\n i += 1\n # 向excel表插入html文件超链接\n link = 'HYPERLINK(\"%s\";\"%s\")' % (html_name, str(title))\n worksheet.write(row, 1, xlwt.Formula(link))\n # 向excel表插入附件超链接\n x = 8\n for down_name in file_names:\n # print(down_name)\n file_loc = file_ad + down_name\n link = 'HYPERLINK(\"%s\";\"%s\")' % (file_loc, down_name)\n worksheet.write(row, x, xlwt.Formula(link))\n x = x + 1\n # worksheet.write(row, 1, xlwt.Formula('HYPERLINK(\"xx.html\";title)')) # Outputs the text \"Google\" linking to http://www.google.com\n for img_name in img_names:\n img_loc = file_ad +img_name\n link = 'HYPERLINK(\"%s\";\"%s\")' % (img_loc, img_name)\n worksheet.write(row, x, xlwt.Formula(link))\n x = x + 1\n for css_name in css_names:\n css_loc = file_ad +css_name\n link = 'HYPERLINK(\"%s\";\"%s\")' % (css_loc, css_name)\n worksheet.write(row, x, xlwt.Formula(link))\n x = x + 1\n\n#国家发展改革委员会 静态网页\ndef tztg_url(row,worksheet,url,source1,source2,source3,href_bloom,file_ad,ProgramStarttime):\n print(\"网站:\", source1+' '+source2 + ' ' +url)\n # 获取网页编码格式\n time.sleep(random.randint(10,20))\n reqt = urllib.request.Request(url,headers = headers)\n response = urllib.request.urlopen(reqt).read()\n chardit1 = chardet.detect(response)\n chardit = chardit1['encoding']\n print(\"编码格式\" + chardit)\n # 获取分页面url\n req = response.decode(chardit,'ignore')\n # req.encoding = chardit1['encoding']\n href_list = re.findall('<font class=\"date\">(.*?)</font><a href=\"\\./(.*?)\" target=\"_blank\">(.*?)</a><span class=\"new\">', req)\n # print(item_list)\n for i in range(len(href_list)):\n date = href_list[i][0]\n date = date.replace('.','-').replace('年','-').replace('月','-').replace('日','').replace('/','-')\n date = datetime.datetime.strptime(date, '%Y-%m-%d')\n complete_href = url+href_list[i][1]\n # down_href = url + href_list[i][1].split('/')[0]\n title = href_list[i][2]\n if complete_href in href_bloom:\n continue\n elif re.search(r'.doc$|.docx$|.pdf$|.xls$|.xlsx$', complete_href):\n print(\"正在采集:\",complete_href)\n href_adds = complete_href.split('.')[-1] #href后缀 doc、pdf等\n title = title + '.' + href_adds\n title = title.replace('/', '或')\n html_name = file_ad + title\n download_file(complete_href, html_name)\n file_names = []\n img_names = []\n css_names = []\n # 插入数据库\n insertFile(source1, source2, source3, title, date, complete_href, ProgramStarttime, html_name, file_names,img_names,css_names,\n file_ad)\n href_bloom.update([complete_href])\n # 保存到excel表\n save_excel(worksheet, row, title, title, html_name, source1, source2, source3, date, ProgramStarttime,\n complete_href, file_names, img_names,css_names,file_ad)\n else:\n print(\"正在采集:\", complete_href)\n #获取静态网页源码\n html,chard,html_str = getHtml_quiet(complete_href)\n #保存为html文件\n html_name = saveHtml(title, html,file_ad)\n #获取附件(在分页面获取的)\n file_names,img_names,css_names = get_file(html_str,complete_href,file_ad)\n #插入数据库\n insertFile(source1, source2, source3, title, date, complete_href, ProgramStarttime, html_name, file_names,img_names,css_names,file_ad)\n href_bloom.update([complete_href])\n # 保存到excel表\n save_excel(worksheet, row, title, title, html_name, source1, source2, source3, date, ProgramStarttime,complete_href, file_names,img_names,css_names, file_ad)\n row = row + 1\n return row\n\n\ndef main(row ,worksheet,href_bloom,file_ad1,ProgramStarttime):\n href_list = []\n source1 = '国家发改委'\n source2_list = ['通知','公告','解读']\n source3_list = ['通知公告','通知公告','政策解读']\n url_list = ['http://www.ndrc.gov.cn/zcfb/zcfbtz/','http://www.ndrc.gov.cn/zcfb/zcfbgg/','http://www.ndrc.gov.cn/zcfb/jd/']\n for i in range(len(source2_list)):\n source3 = source3_list[i]\n source2 = source2_list[i]\n url = url_list[i]\n row= tztg_url(row, worksheet, url,source1,source2,source3,href_bloom,file_ad1,ProgramStarttime)\n print(row)\n return row,href_list\n\n\n\n\n","sub_path":"country/fagaiwei.py","file_name":"fagaiwei.py","file_ext":"py","file_size_in_byte":17072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"171587299","text":"import re\nimport functools\nfrom collections import defaultdict\nimport git\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\n\n\ndef count_lines(file):\n try:\n with open(file, 'rt', encoding='utf-8') as f:\n return len(f.readlines())\n except Exception:\n return 0\n\n\nclass ProblemEntriesMeta(type):\n def __new__(mcs, name, bases, namespace):\n def add_plot_attr(obj):\n class NewClass(obj.__class__):\n plot = None\n return NewClass(obj)\n\n def make_plotable(f, plot_type_):\n @functools.wraps(f)\n def g(self):\n res = f(self)\n\n res = add_plot_attr(res)\n plot_func = getattr(self, 'plot_' + plot_type_)\n res.plot = lambda: plot_func(res)\n\n return res\n\n return g\n\n for k, v in namespace.items():\n plot_type_ = getattr(v, '_plot_type', None)\n if plot_type_:\n v = make_plotable(v, plot_type_)\n namespace[k] = v\n\n namespace['plots'][k] = None\n\n klass = super().__new__(mcs, name, bases, namespace)\n return klass\n\n\ndef plot_type(type_):\n def decorator(f):\n f._plot_type = type_\n return f\n\n return decorator\n\n\nclass ProblemEntries(defaultdict, metaclass=ProblemEntriesMeta):\n def __init__(self):\n super().__init__(dict)\n\n for plot_name in self.plots.keys():\n self.plots[plot_name] = lambda: getattr(self, plot_name)().plot()\n\n @classmethod\n def from_repo(cls, repo):\n probs = cls()\n\n for commit in repo.iter_commits():\n matched = re.match(r'^\\[(\\w+)\\]', commit.message)\n if matched:\n code_name = matched.group(1)\n entry = probs[code_name]\n entry.setdefault('commits', [])\n entry['commits'].append((commit.authored_datetime, commit.message))\n\n for code_name, entry in probs.items():\n file = 'src/{}.cpp'.format(code_name)\n entry['count_lines'] = count_lines(file)\n\n return probs\n\n plots = dict()\n\n @plot_type('prob')\n def lines_per_prob(self):\n pairs = [ (code_name, entry['count_lines']) for code_name, entry in self.items() ]\n pairs.sort(key=lambda pair: pair[1], reverse=True)\n return pairs\n\n def probs_by_first_date(self):\n ans = defaultdict(list)\n for code_name, entry in self.items():\n first_commit_date, _ = min(entry['commits'])\n ans[first_commit_date.date()].append(code_name)\n\n return ans\n\n @plot_type('prob')\n def commits_per_prob(self):\n pairs = [ (code_name, len(entry['commits'])) for code_name, entry in self.items() ]\n pairs.sort(key=lambda pair: pair[1], reverse=True)\n return pairs\n\n @plot_type('date')\n def prob_starts_per_day(self):\n return {\n date: len(codes)\n for date, codes in self.probs_by_first_date().items()\n }\n\n def commits_by_day(self):\n ans = defaultdict(list)\n for code_name, entry in self.items():\n for date, commit in entry['commits']:\n ans[date.date()].append(commit)\n\n return ans\n\n @plot_type('date')\n def commits_per_day(self):\n return {\n date: len(commits)\n for date, commits in self.commits_by_day().items()\n }\n\n @classmethod\n def plot_date(cls, dct):\n x = list(dct.keys())\n y = list(dct.values())\n\n fig, ax = plt.subplots()\n ax.barh(x, y, align='center')\n\n ax.yaxis.set_major_locator(mdates.DayLocator())\n ax.yaxis.set_major_formatter(mdates.DateFormatter('%m.%d'))\n for tk in ax.yaxis.get_major_ticks():\n tk.label.set_fontsize(8)\n\n ax.grid(axis='x', which='both')\n\n fig.set_size_inches(8, 16)\n\n return fig, ax\n\n @classmethod\n def plot_prob(cls, pairs):\n x = list(range(len(pairs)))\n y = [ lines for code_name, lines in pairs ]\n labels = [ code_name for code_name, lines in pairs ]\n\n fig, ax = plt.subplots()\n ax.barh(x, y, align='center')\n\n ax.set_ylim((0, len(pairs)))\n ax.set_yticks(x)\n ax.set_yticklabels(labels)\n\n xmax = int(ax.get_xbound()[1])\n if xmax > 100:\n xticks = list(range(0, xmax + 50, 50))\n xticks_minor = list(range(0, xmax + 50, 10))\n ax.set_xticks(xticks)\n ax.set_xticks(xticks_minor, minor=True)\n\n ax.grid(axis='x', which='both')\n\n fig.set_size_inches(8, 16)\n\n return fig, ax\n","sub_path":"scripts/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":4663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"372733087","text":"import this\nfrom django.contrib import admin, messages\nfrom .models import Links, Manual, Noticia, Categoria, Notificacio\n\ndef despublicar(modeladmin, request, queryset ):\n queryset.update(publicat=False)\n messages.success(request,'Noticies despublicades amb éxit')\ndespublicar.short_description='Despublicar noticies'\n\nclass LinksAdmin(admin.ModelAdmin):\n fields = ['titol','url','tipus','areas']\nclass ManualAdmin(admin.ModelAdmin):\n list_display = ('titol','creat_en','publicat','categoria')\n fields = ['titol', 'publicat','document','categoria']\nclass NoticiaAdmin(admin.ModelAdmin):\n list_display = ('titol','creat_en','publicat','destacada')\n fields = ['titol','text','publicat','document','destacada'] \n actions = [despublicar]\n\nclass CategoriaAdmin(admin.ModelAdmin):\n list_display = ('titol','descripcio')\n fields = ['titol', 'descripcio']\nclass NotificacioAdmin(admin.ModelAdmin):\n list_display = ('get_treballador_username','titol','text','llegida')\n fields = ['treballador','titol','text','llegida']\n def get_treballador_username(self, obj):\n return obj.treballador.username\n\n# Register your models here.\nadmin.site.register(Links,LinksAdmin)\nadmin.site.register(Manual,ManualAdmin)\nadmin.site.register(Noticia,NoticiaAdmin)\nadmin.site.register(Notificacio,NotificacioAdmin)\nadmin.site.register(Categoria,CategoriaAdmin)\n","sub_path":"permivac/intranet/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"323329733","text":"#!/usr/bin/env python3\n\nimport threading\nimport sys\nimport cmd_pkg as cp\nfrom getCmds import getCmds\nimport cmd_pkg.fDict\nimport shlex\nimport re\nimport os\n\nclass CommandHelper(object):\n def __init__(self):\n\n # Automatically generates the commands dictionary\n self.commands = getCmds() \n self.flagList=cp.fDict.flags()\n self.directsList=cp.fDict.direct()\n self.flagsDict = cp.fDict.fDict()\n \n # Makes the string value from self.commands a callable object\n for key in self.commands:\n self.commands[key] = eval(self.commands[key]) \n\n \n def exists(self, cmd): # Checks that a command exists\n return cmd in self.commands\n\n\n def parseit(self,cmd_in): \n flags=[]\n params=[]\n directs=[]\n cmd = cmd_in[0]\n\n if cmd_in[1:]:\n args = cmd_in[1:] \n \n for item, i in zip(args, range(0, len(args))):\n if item in self.flagsDict[cmd]:\n flags.append(item)\n \n elif item in self.directsList: # ex ['c:/user/2.txt', '<', 'c:/user/1.txt']\n\n if item == '>':\n item = \"w+\"\n elif item == '>>':\n item = \"a+\"\n elif item == '<':\n item = \"r+\"\n if i > 0 and not (str.isdigit(args[i-1])): #i >0 and the previous argument isn't a number\n directs.append(args[i-1]) # typically the read file (this won't always be used)\n directs.append(item) # the mode to open the writefile in\n directs.append(args[i+1]) # the writefile\n else:\n params.append(item)\n else: \n args = None\n\n self.runit(cmd=cmd, flags=flags, params=params, directs=directs )\n\n\n def runit(self, **kwargs):\n \n if 'cmd' in kwargs:\n cmd = kwargs['cmd']\n\n else:\n print(\"How did you mess this up?\")\n\n if kwargs['flags']:\n flags = kwargs['flags'] \n else:\n flags = []\n\n if kwargs['params']:\n params = kwargs['params'] \n else:\n params = []\n\n if kwargs['directs']:\n for item in kwargs['directs']:\n if item == '>':\n item = \"w+\" #write\n elif item == '>>':\n item = \"a+\" #append\n elif item == '<':\n item = \"r+\" #read (but it will make a file if one does not exist)\n directs = kwargs['directs'] \n else:\n directs = []\n\n self.commands[cmd](flags=flags, params=params, directs=directs)\n\n\nif __name__ == \"__main__\":\n ch = CommandHelper()\n \n basepath = os.path.dirname(__file__)\n filepath = os.path.abspath(os.path.join(basepath, \"history.log\"))\n\n while True:\n skip = False\n ch.runit(cmd='pwd', flags=None, params=None, directs=None) #Print working dir above the cmd prompt ##DISABLE WHEN DEBUGGING\n cmd_in = input(\"% \")\n\n while len(cmd_in.rstrip()) < 1: # Check for empty lines\n print(\"Error: no command entered\") \n cmd_in = input(\"% \")\n \n cmd = cmd_in.split()[0] # First word is the command\n\n if cmd == \"exit\": # This should be right after we pass input to history.py\n sys.exit(0)\n \n # Help\n if cmd == \"help\":\n cmd = cmd_in.split()[1]\n if cmd == \"!\":\n cmd = \"hist\"\n if ch.exists(cmd):\n print(cp.help(cmd))\n skip = True\n else:\n print(\"Invalid command\")\n \n # History \n num = 0 \n if cmd[0] == \"!\":\n if len(cmd)>1 and (str.isdigit(cmd[1:])):\n num =cmd[1:]\n cmd = 'hist'\n cmd_in = cp.hist(num)\n else:\n print(\"Invaild entry\") \n \n with open(filepath,\"a+\") as log: \n log.write(f\"{cmd_in}\\n\")\n\n if skip == False:\n if ch.exists(cmd):\n cmd_in = shlex.split(cmd_in,posix=True) # Split user input into multiple strings\n ch.parseit(cmd_in) # Parses the user input\n else:\n print(\"Invalid command\")\n else:\n pass\n\n","sub_path":"Assignments/P01-Shell/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":4524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"430146050","text":"\n# Functions\ndef intcheck(question):\n valid = False\n # Error message\n\n error = \"please enter a whole number greater than 0\"\n\n while not valid:\n try:\n # Gets user input\n response = int(input(question))\n # Checks number is not below zero\n if response <= 0:\n print(error)\n print()\n else:\n return response\n\n\n\n # If input is not a number or is a decimal then display error\n except ValueError:\n print(error)\n print()\n\n\n\n# Main\n\n\nname = \"\"\n\nticket_count = 0\nmax_tickets = 5\n\ntotal_sale = 0\nwhile ticket_count < max_tickets:\n\n if(max_tickets - ticket_count ) == 1:\n print(\"\")\n print(\"!!! There is only 1 ticket left !!!\")\n print(\"\")\n else:\n print(\"You have {} tickets left\".format(max_tickets - ticket_count))\n\n name = input(\"Name:\")\n if name == \"xxx\":\n break\n\n age = intcheck(\"Age: \")\n # Check if age is within range\n if age < 12:\n print(\"Error: You are too young to see this movie\")\n continue\n elif age > 130:\n print(\"Error: The age entered is too high\")\n continue\n\n price = 0\n if age < 16:\n price = 7.50\n elif age > 64:\n price = 6.50\n else:\n price = 10.50\n\n total_sale += price\n ticket_count += 1\n\n print(\"{}:${:.2f}\".format(name,price))\n\n\nif ticket_count < max_tickets:\n print(\"You have sold {}\".format(ticket_count))\n print(\"There are {} tickets left\".format(max_tickets-ticket_count))\nelse:\n print(\"All tickets have been sold\")\n\nprofit = total_sale - (5*ticket_count)\nprint(\"You have made ${:.2f} profit\".format(profit))\n\n","sub_path":"00_MegaMovieFundraiser_Base.py","file_name":"00_MegaMovieFundraiser_Base.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"9157521","text":"\"\"\"\nSMTP infra setup testing dag\n\"\"\"\nfrom datetime import datetime\nfrom airflow import DAG\nfrom airflow.operators.email import EmailOperator\n\n\ndefault_args = {\n \"owner\": \"Pin Jin\",\n # \"depends_on_past\": False,\n \"start_date\": datetime(2020, 6, 14),\n \"email\": [\"pin.jin@ga.gov.au\"],\n # \"email_on_failure\": False,\n # \"email_on_retry\": False,\n # \"retries\": 1,\n}\n\nwith DAG(\n \"test_emailoperator\",\n description=\"Simple Test DAG\",\n schedule_interval=None,\n default_args=default_args,\n catchup=False,\n) as dag:\n\n EmailOperator(\n mime_charset=\"utf-8\",\n task_id=\"send_email\",\n to=\"nikita.gandhi@ga.gov.au\",\n subject=\"Templated Subject: start_date {{ ds }}\",\n params={\"content1\": \"random\"},\n html_content=\"Templated Content: content1 - {{ params.content1 }} task_key - {{ task_instance_key_str }} \"\n \"test_mode - {{ test_mode }} task_owner - {{ task.owner}} hostname - {{ ti.hostname }}\",\n )\n","sub_path":"dags/tests/test_emailoperator.py","file_name":"test_emailoperator.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"209357764","text":"\"\"\"\nAuthor: Jeremy Cornett\nDate: 2018-08-29\nPurpose: Create a zip of the specified folder and store it in the specified location. Retain the specified number of\nrecords.\n\"\"\"\n\nimport argparse\nimport datetime\nimport glob\nimport os\nimport shutil\nimport subprocess\nimport zipfile\n\n\nif __name__ == \"__main__\":\n # Parse the command line arguments.\n parser = argparse.ArgumentParser(description=\"Create a zip of the specified folder or registry key and store it in\"\n \"the specified location. Retain the specified number of records.\")\n parser.add_argument(\"name\", help=\"The name to give the zip.\")\n parser.add_argument(\"target\", help=\"The file or directory to zip up.\")\n parser.add_argument(\"destination\", help=\"The directory to place the zip.\")\n parser.add_argument(\"retention\", help=\"The number of zips to retain. Zero means infinite (i.e. don't delete \"\n \"anything).\")\n args = parser.parse_args()\n\n # Give the log file some space between runs.\n print()\n print()\n\n is_target_registry = False\n for registry_root_key in [\"HKLM\", \"HKCU\", \"HKCR\", \"HKU\", \"HKCC\"]:\n if args.target.startswith(registry_root_key):\n is_target_registry = True\n break\n\n if is_target_registry:\n path_target = args.target\n else:\n path_target = os.path.abspath(os.path.normpath(args.target))\n path_destination = os.path.abspath(os.path.normpath(args.destination))\n\n # Ensure the target and destination exist.\n if not is_target_registry and not os.path.exists(path_target):\n raise ValueError(\"The target path must exist.\")\n print(\"TARGET: {}\".format(path_target))\n if not os.path.exists(path_destination):\n raise ValueError(\"The destination path must exist.\")\n else:\n print(\"DESTINATION: {}\".format(path_destination))\n\n # Ensure retention is a positive number.\n count_retain = int(float(args.retention))\n if count_retain < 0:\n raise ValueError(\"The retention number ({}) must greater than or equal to zero.\".format(args.retention))\n\n # Construct the zip file name from the current date and time.\n path_zip = os.path.join(path_destination, \"{}_{}\".format(args.name,\n datetime.datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")))\n\n # By virtue of using a timestamp as part of the name, a collision shouldn't occur, but it's good to check anyways.\n if os.path.exists(\"{}.zip\".format(path_zip)):\n raise ValueError(\"COLLISION - The calculated path for the zip already exists - {}.zip\".format(path_zip))\n\n # Zip up the target to the destination folder.\n print(\"ZIP: {}.zip\".format(path_zip))\n if is_target_registry:\n path_reg_file = os.path.join(path_destination, \"{}.reg\".format(args.name))\n # https://docs.microsoft.com/en-us/windows-server/administration/windows-commands/reg-export\n # https://docs.python.org/3/library/subprocess.html#subprocess.call\n subprocess.call([\"reg.exe\", \"export\", path_target, path_reg_file, \"/y\"])\n with zipfile.ZipFile(\"{}.zip\".format(path_zip), 'w') as file_zip:\n file_zip.write(path_reg_file)\n else:\n shutil.make_archive(path_zip, \"zip\", path_target, path_target)\n\n # Check how many zips there are. If there's more than the retention value, delete the older ones. Ensure\n # that the timestamp for each is taken from the filename, not the file attributes.\n list_zips = glob.glob(os.path.join(path_destination, \"{}_*.zip\".format(args.name)))\n\n if len(list_zips) > count_retain:\n # The files are named in such a way that a lexicographical sort will sort them chronologically as well.\n list_zips.sort()\n # The more recent ones we want to retain.\n for i in range(0, count_retain):\n list_zips.pop()\n\n for path_zip_delete in list_zips:\n print(\"DELETE: {}\".format(path_zip_delete))\n os.remove(path_zip_delete)\n","sub_path":"files/backup.py","file_name":"backup.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"141137312","text":"import logging\nimport os\nimport copy,time\nimport shutil\nimport subprocess \nfrom functions import fileDir\nfrom functions import other\n\nclass deployApp(object):\n \n def __init__(self,params,cid,stdLogger):\n self.cid=cid\n self.stdLogger=stdLogger\n self.params=eval(params)\n self.customerDir='../customerDir/'+self.params['customerId']\n self.projectDir=self.customerDir+'/project/'+self.params['project']\n self.appDir=self.projectDir+'/app/'+self.params['appName']\n self.enviDir=self.projectDir+'/envi/'+self.params['environment']\n self.appWarDir=self.appDir+'/war'\n self.appLableDir=self.appWarDir+'/'+self.params['environment']+'/'+self.params['lable']\n self.appConfDir=self.appDir+'/conf'\n self.appConfDep =self.appConfDir+'/'+self.params['appName']+'.deploy.conf'\n self.appConfSt=self.appConfDir+'/'+self.params['appName']+'.st.conf'\n self.xmlDir=self.appDir+'/'+self.params['appName']+'.ant.xml'\n self.appEnvIpDir=self.appConfDir+'/'+self.params['appName']+'.'+self.params['environment']+'.ip'\n self.buildLogDir='logs/'+self.params['customerId']+'/'+self.params['project']+'/'+self.params['appName']+'/'+self.params['environment']+'/'+self.params['lable']+'.log'\n \n def run(self):\n if os.path.exists(self.customerDir):\n if os.path.exists(self.projectDir):\n if os.path.exists(self.enviDir):\n if os.path.exists(self.appDir):\n self.process()\n else:\n self.sendMess('0','App '+self.params['appName']+' does not exist')\n else:\n self.sendMess('0','Env '+self.params['environment']+' does not exist')\n else:\n self.sendMess('0','Project '+self.params['project']+' does not exists')\n else:\n self.sendMess('0','Customer '+self.params['customerId']+' does not exist')\n \n def process(self):\n self.tardir=self.appLableDir+'/'+self.params['appName']+'.tar'\n# if not os.path.exists(self.tardir):\n #self.mkXml()\n if self.getTar():\n ipports=self.getAppEnvIp() \n if len(ipports)>0:\n #self.mkLableFile()\n #self.toTar()\n self.apInitdir=self.appLableDir+'/'+self.params['appName']+'.initial'\n if os.path.exists(self.appDir+'/'+self.params['environment']+'.initial'): \n shutil.copy(self.tardir,self.apInitdir)\n fileDir.filesInZip([self.appConfDep],self.apInitdir) \n deployFile=self.apInitdir\n else:\n deployFile=self.tardir \n #ipports=self.getAppEnvIp()\n for ipport in ipports:\n count=int((int(self.getStDep('percentDep'))*0.01)*len(ipports))\n if count<1:\n count=1\n while 1:\n if count>self.getMaxSendFile():\n self.sendFiles(deployFile,ipport,'80') \n break\n time.sleep(0.1)\n if deployFile==self.apInitdir:\n os.remove(self.appDir+'/'+self.params['environment']+'.initial')\n else:\n self.sendMess('0','App '+self.params['appName']+' No nodes are deployed at any node') \n else:\n self.sendMess('0','App '+self.params['appName']+' Download compilation fails')\n \n def getTar(self):\n if os.path.exists(self.tardir):\n return(True)\n self.mkXml() \n if self.toBuild():\n self.mkLableFile()\n self.toTar()\n return(True)\n return(False) \n \n def getMaxSendFile(self):\n temp=0 \n while 1:\n try:\n if os.path.exists('task/'+self.params['customerId']+'.sendFile.running'):\n with open('task/'+self.params['customerId']+'.sendFile.running','r') as myfile:\n for file in myfile:\n countcus=eval(file)\n if countcus['ID']==self.params['ID']:\n temp+=1\n \n if os.path.exists('task/'+self.params['customerId']+'.sendFile.temp'): \n with open('task/'+self.params['customerId']+'.sendFile.temp','r') as myfile:\n for file in myfile:\n countcus=eval(file)\n if countcus['ID']==self.params['ID']:\n temp+=1 \n if os.path.exists('task/'+self.params['customerId']+'.sendFile.queue'): \n with open('task/'+self.params['customerId']+'.sendFile.queue','r') as myfile:\n for file in myfile:\n countcus=eval(file)\n if countcus['ID']==self.params['ID']:\n temp+=1\n break \n except OSError as e:\n pass \n time.sleep(0.1)\n return(temp) \n \n def getAppEnvIp(self):\n if 'node' in self.params.keys():\n if len(self.params['node'])>0:\n return(self.params['node'])\n temp=[]\n if os.path.exists(self.appEnvIpDir):\n with open(self.appEnvIpDir,'r') as myfile:\n for ipport in myfile:\n temp.append(ipport.replace(',','').replace('\\n',''))\n return(temp)\n \n \n def toTar(self):\n if not os.path.exists(self.appWarDir+'/'+self.params['environment']):\n os.mkdir(self.appWarDir+'/'+self.params['environment'])\n if not os.path.exists(self.appLableDir):\n os.mkdir(self.appLableDir)\n warfile=fileDir.listFileName('file',self.appDir+'/source/target','war','short')\n os.rename(self.appDir+'/source/target/'+warfile[0],self.appDir+'/source/target/'+self.getStDep('appWar'))\n fileDir.files2zip([[self.appDir+'/source/target/'+self.params['appName']+'.lable',self.params['appName']+'.lable'],[self.appDir+'/source/target/'+self.getStDep('appWar'),self.getStDep('appWar')]],self.tardir)\n \n def mkBuildLogDir(self):\n\n if not os.path.exists('logs/'+self.params['customerId']):\n os.mkdir('logs/'+self.params['customerId'])\n if not os.path.exists('logs/'+self.params['customerId']+'/'+self.params['project']):\n os.mkdir('logs/'+self.params['customerId']+'/'+self.params['project'])\n if not os.path.exists('logs/'+self.params['customerId']+'/'+self.params['project']+'/'+self.params['appName']):\n os.mkdir('logs/'+self.params['customerId']+'/'+self.params['project']+'/'+self.params['appName'])\n if not os.path.exists('logs/'+self.params['customerId']+'/'+self.params['project']+'/'+self.params['appName']+'/'+self.params['environment']):\n os.mkdir('logs/'+self.params['customerId']+'/'+self.params['project']+'/'+self.params['appName']+'/'+self.params['environment'])\n\n \n def toBuild(self):\n if os.path.exists(self.appDir+'/source/'):\n shutil.rmtree(self.appDir+'/source/')\n shellStr='export ANT_HOME='+self.getSeverConf('ANT_HOME')+'\\n'+self.getSeverConf('ANT_HOME')+'/bin/ant -v -f '+self.xmlDir\n #print(shellStr) \n ant=subprocess.Popen(shellStr,shell=True,universal_newlines=True,stderr=subprocess.PIPE,stdout=subprocess.PIPE) \n stdout=ant.stdout.read()\n stderr=ant.stderr.read()\n self.mkBuildLogDir()\n myfile=open(self.buildLogDir,'w')\n myfile.write(stdout+'\\n')\n myfile.write(stderr+'\\n')\n #self.stdLogger.debug(stdout)\n #self.stdLogger.debug(stderr)\n if stderr=='':\n #weiwancheng\n if self.getStDep('mvnProfile')=='true':\n mvnParams='-P '+self.params['environment']\n else:\n mvnParams=''\n shellStr='export JAVA_HOME='+self.getSeverConf('JAVA_HOME')+'\\ncd '+self.appDir+'/source\\n'+self.getSeverConf('maven_home')+'/bin/mvn clean '+mvnParams+' package'\n self.stdLogger.debug(shellStr)\n mvn=subprocess.Popen(shellStr,shell=True,universal_newlines=True,stderr=subprocess.PIPE,stdout=subprocess.PIPE) \n stdout=mvn.stdout.read()\n stderr=mvn.stderr.read()\n myfile.write(stdout+'\\n')\n myfile.write(stderr+'\\n')\n myfile.close()\n #self.stdLogger.debug(stdout)\n #self.stdLogger.debug(stderr)\n #print('***********'+self.appDir+'/source/target/'+self.params['appName']+'.war')\n if stderr=='' and os.path.exists(self.appDir+'/source/target/'+self.getStDep('appWar')):\n return(True)\n else:\n return(False)\n else:\n myfile.close()\n return(False) \n \n def mkLableFile(self):\n with open(self.appDir+'/source/target/'+self.params['appName']+'.lable','w') as myfile:\n myfile.write(self.params['lable']) \n \n def getSeverConf(self,key):\n return(fileDir.getConfFile('conf/server.conf',key))\n \n def getStDep(self,key):\n dirs=[self.appConfSt,self.appConfDep]\n for dir in dirs:\n with open(dir,'r') as myfile:\n for data in myfile:\n if data.split('=')[0]==key:\n return(data.split('=')[1][:-1]) \n \n def mkXml(self):\n with open(self.xmlDir,'w',encoding='gbk') as myfile:\n myfile.write(self.toXml(self.getStDep('stProject'),self.getStDep('stView'),self.getStDep('stFolder'),self.params['lable']))\n \n def toXml(self,stProject,stview,stfolder,lable):\n #print(self.appDir)\n #print(stProject,stview,stfolder,lable)\n string='''<?xml version = \"1.0\" encoding = \"GBK\"?>\n<project basedir=\".\" default=\"sourcecheckout\" name=\"checkout_mavn\">\n <property file=\"/home/deployuser/adcs/program/conf/st_ant_maven.conf\"/>\n <property name=\"stproject\" value=\"'''+stProject+'''\" />\n <property name=\"stview\" value=\"'''+stview+'''\" />\n <target name=\"sourcecheckout\">\n <stcheckout servername=\"${stserver}\"\n serverport=\"${stport}\"\n projectname=\"'''+stProject+'''\"\n viewname=\"'''+stview+'''\"\n username=\"${stuser}\"\n password=\"${stpassword}\"\n label=\"'''+lable+'''\"\n rootstarteamfolder=\"'''+stfolder+'''\"\n rootlocalfolder=\"'''+self.appDir+'''/source\"\n forced=\"true\"\n recursive=\"true\"\n deleteuncontrolled=\"false\" />\n </target>\n</project>'''\n return(string)\n \n def sendMess(self,status,mess):\n #print(other.toSendMessageStr(self.params['ID'],self.cid,self.params['customerId'],status,mess))\n fileDir.appendFileLine('messages/'+self.params['customerId']+'.receiveMessage.queue',other.toSendMessageStr(self.params['ID'],self.cid,self.params['customerId'],status,mess))\n\n\n def sendFiles(self,file,ip,dir):\n a=(other.toSendFileStr(self.params,self.cid,file,ip,dir))\n #print(a)\n fileDir.appendFileLine( 'task/'+self.params['customerId']+'.sendFile.queue',a)","sub_path":"functions/deployApp.py","file_name":"deployApp.py","file_ext":"py","file_size_in_byte":11333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"295956692","text":"import regex\nimport numpy as np\nimport sys\nimport subprocess as sp\n\n\n###Carly's script, but edited for python 3####s\n# PARAMETERS # \nblatPath=\"/usr2/people/mabrams/bin/blat/blat\"\n\nmapping_genome = \"/usr2/people/carlyweiss/Amended_Genomes/Amended_Genomes/Concatenated/D1373_Z1.fa\" # location of the .udb database\nmin_ID = str(95) # minimum identity cutoff for mapping reads. Must be a decimel proportion (0-1)\nmin_id_pooling = .95 # minimum identity for reads at the pooling stage. Can be different then at mapping. Must be (0-1)\ngff = '/usr2/people/carlyweiss/SparScerinfo/YS2+CBS432+plasmid_clean' ## GFF to use for annotations. \nmin_read_cutoff = 0 ## this is only for reporting summary statistics \n\nmin_id_pooling*=100\n\n# HELP #\n#in /usr2/people/mabrams/data/Carly_Tn_Data\n##server: niya\n##running in folder: nice -n 17 bash -c 'for file in ./*.fastq; do /usr2/people/mabrams/bin/anaconda3/bin/python /usr2/people/mabrams/scripts/map_and_pool_BLAT_1-31-19.py $file ./fastq_pooled & done'\n\n\n\nif len(sys.argv) == 1:\n print(\"USAGE: python map_PB_reads.py fastq_file out_directory\")\n exit()\n# INPUT # \n\nread_file = sys.argv[1] \nfastq_filename = read_file.split(\"/\")[-1]\n\nout_dir = sys.argv[2]\n\n# BEGIN FUNCTIONS # \n\ndef Filter_Reads(fastq_file): ## filter out reads that do not have a TN sequence \n\n wf = open(out_dir+fastq_filename+'_parsed_reads','w') # outfile for the trucated reads that will be mapped\n\n tn_pattern = regex.compile('(?e)(CAGACTATCTTTCTAGGGTTAA){e<=2}') # last 2 bps of the left arm of the transposon. searches for this pattern allowing 2 mismatchs for sequencing errors\n\n # counts for summary stats #\n\n tot_reads = 0.0 \n reads_with_tn = 0\n line_count = 0\n total_Ns = 0\n total_bases = 0\n reads_too_short = 0\n too_high_Ns = 0\n f = open(fastq_file) # the file with the reads\n \n for line in f:\n\n # parses fastq file # \n\n line_count+=1\n if line_count % 4 == 1:\n header = line\n elif line_count % 4 == 2:\n read = line.strip()\n elif line_count % 4 == 0:\n qual = line\n tot_reads+=1\n\n tn_match_data = tn_pattern.search(read) # searches for the tn pattern\n if tn_match_data == None:\n continue #skips the read if it doesn't have a tn pattern\n\n total_bases+=len(read)\n total_Ns+=read.count('N') # counts the numer of Ns per read\n \n reads_with_tn+=1 ## counts reads that have the tn sequence\n\n end_match = tn_match_data.end() # finds the location in the read that the last tn base matches to\n \n genome_read = read[tn_match_data.end()-4:] # gets the sequence of the read outside the transposon (still contains TTAA)\n\n if len(genome_read) < 50: # if the length of the parsed read is less than 50, throw it out. This is appropriate for 150bp reads and should be changed otherwise\n reads_too_short+=1\n continue \n \n if genome_read.count('N') / float(len(genome_read)) > .2:\n \n too_high_Ns+=1\n continue\n\n \n # writes a new fastq file that has the filtered read and only the regions TTAA--genome. The TN sequence is removed # \n\n wf.writelines(\">\"+header)\n wf.writelines(genome_read+\"\\n\")\n # wf.writelines(\"+\\n\")\n # wf.writelines(qual[tn_match_data.end()-4:])\n \n wf.close() \n\n prop_Ns = float(total_Ns) / total_bases # gets the proportion of Ns in the fastq file as an indicator for overall sequence quality\n\n\n wf = open(out_dir+fastq_filename+\"_mapping_stats\", 'w') # the file that will contain summary stats of the mapping and additional analysis\n\n # write mapping statistics\" \n\n print(\"total_reads: \", str(tot_reads))\n wf.writelines(\"total_reads: \"+str(tot_reads)+\"\\n\")\n print(\"reads with tn: \", str(reads_with_tn), \" (\"+str(100*reads_with_tn/tot_reads)+\"%)\")\n wf.writelines(\"reads with TN: \"+str(reads_with_tn)+\" (\"+str(100*reads_with_tn/tot_reads)+\"%)\\n\")\n print(\"proportion bases in TN containing reads that are 'N': \", str(prop_Ns))\n wf.writelines(\"proportion bases in TN containing reads that are 'N': \"+ str(prop_Ns)+\"\\n\")\n print(\"TN contaning reads too short that were discarded: \", str(reads_too_short), \" (\"+str(100*reads_too_short/float(reads_with_tn))+\"%)\")\n wf.writelines(\"TN contaning reads too short that were discarded: \"+str(reads_too_short)+\" (\"+str(100*reads_too_short/float(reads_with_tn))+\"%)\\n\")\n print(\"TN containing reads with too many Ns: \", str(too_high_Ns), \" (\"+str(100*too_high_Ns/float(reads_with_tn))+\"%)\")\n wf.writelines(\"TN containing reads with too many Ns: \"+str(too_high_Ns)+\" (\"+str(100*too_high_Ns/float(reads_with_tn))+\"%)\\n\")\n \n\n\n wf.close()\n\n tot_parsed_reads = reads_with_tn - reads_too_short - too_high_Ns\n\n return tot_parsed_reads\n\n\ndef Map_Reads(): # uses usearch to map the reads\n\n# cmd = [\"/opt/bin/usearch8.1.1756_i86linux64\", \"-threads\", nthreads, \"-usearch_local\", out_dir+fastq_filename+\"_parsed_reads\", \"-db\", db, \"-strand\", \"both\", \"-id\", min_id, \"-alnout\", out_dir+fastq_filename+\"_aligned_reads\", \"-maxhits\", \"2\", \"-notmatched\", out_dir+fastq_filename+\"_unmapped_reads\", \"-userout\", out_dir+fastq_filename+\"_mapped_reads\", \"-userfields\", \"query+target+id+alnlen+qstrand+tstrand+tlot+thit\", \"-mincols\", \"50\", \"-query_cov\", \".95\"] # the command used to call usearch\n\n cmd = [blatPath, mapping_genome, out_dir+fastq_filename+\"_parsed_reads\", out_dir+fastq_filename+\"_mapped_reads\", \"-minIdentity=95\", \"-tileSize=12\", \"-out=blast8\" ]\n \n sp.call(cmd)\n\ndef Filter_for_ID_and_multimapping_and_pool(num_parsed_reads): # filters our mapped reads that map below the ID threshold, those that map to multiple locations, then pools reads into insertion sites\n\n## min_id_pooling\n\n mapped_file = out_dir+fastq_filename+\"_mapped_reads\" # where the maped reads are\n\n first = 'y'\n\n f = open(mapped_file)\n\n\n read_dict = {}\n total_mapped_reads = 0 ### CHANGE THIS BACK TO 0\n reads_above_identity_threshold = 0\n mulitmapped_reads = 0\n \n all_read_list = set()\n for line in f:\n\n \n line = line.strip().split(\"\\t\")\n\n \n read = line[0]\n \n if read not in all_read_list: ## counts all reads that map at least once\n all_read_list.add(read)\n total_mapped_reads+=1\n\n\n ID = float(line[2])\n if ID < min_id_pooling: ## filters out mapped reads below the minimum identity for pooling \n continue\n\n len_align = float(line[3])\n start_align = float(line[6]) \n\n if len_align < 50 or start_align > 3:\n continue\n\n\n\n reads_above_identity_threshold+=1 # this counts reads that are above the identity threshold and length of aligment threshold and in which the alignment starts within 3pbs of the read TTAA\n\n start = line[8]\n end = line[9]\n\n strand = '+'\n if start < end:\n strand = '-'\n \n\n insertion_loc = start\n \n loc = line[1]+\"__\"+strand+\"__\"+insertion_loc ## This is the identifier for the insertion. scaffold+strand+position\n\n # searches for reads that map to more than 1 locatiion # \n\n if read in read_dict: \n read_dict[read].append([loc, ID]) ## appends the read location and %ID of mapping in case of multiple mappings of the same read\n # mulitmapped_reads+=2 # as long as the usearch multimapping paramter is set to a maximim of 2 reads, this will work. \n else:\n read_dict[read] = [[loc, ID ]]\n\n\n\n # filters our reads that map to more than one location # \n\n\n print(\"done making the read_dict\")\n\n loc_dict = {} #\n read_keys=dict.fromkeys(read_dict.keys())\n for read in read_keys.keys():\n if len(read_dict[read]) > 2:\n del read_dict[read]\n continue\n if len(read_dict[read]) == 2:\n\n # print read_dict[read]\n\n\n if read_dict[read][0][1] == read_dict[read][1][1]:\n del read_dict[read]\n continue\n if read_dict[read][0][1] > read_dict[read][1][1]:\n read_dict[read] = [read_dict[read][0]]\n elif read_dict[read][0][1] < read_dict[read][1][1]:\n read_dict[read] = [read_dict[read][1]]\n\n # print read_dict[read]\n \n \n# print read_dict\n\n read_dict[read] = read_dict[read][0][0] ## appends just the insertion location\n\n # print read_dict[read]\n\n\n\n if read_dict[read] in loc_dict:\n loc_dict[read_dict[read]]+=1\n else:\n loc_dict[read_dict[read]] = 1\n\n\n # for loc in loc_dict:\n # print loc, loc_dict[loc]\n\n \n\n print(\"total_mapped reads: \", str(total_mapped_reads), \" (\"+str(float(100*total_mapped_reads/num_parsed_reads))+\"% of parsed reads)\")\n print(\"mapped reads passing identity cutoff: \"+str(reads_above_identity_threshold), \"(\"+str(100*reads_above_identity_threshold/float(total_mapped_reads))+\"%)\")\n print(\"remaining reads mapping to one location: \"+str(len(read_dict)), \"(\"+str(100*len(read_dict)/float(reads_above_identity_threshold))+\"%)\")\n\n wf = open(out_dir+fastq_filename+\"_mapping_stats\", 'a')\n\n wf.writelines(\"total mapped reads: \"+str(total_mapped_reads)+\" (\"+str(float(100*total_mapped_reads/num_parsed_reads))+\"% of parsed reads)\\n\")\n wf.writelines(\"mapped reads passing identity cutoff: \"+str(reads_above_identity_threshold)+\" \"+str(100*reads_above_identity_threshold/float(total_mapped_reads))+\"% of mapped reads\\n\")\n wf.writelines(\"remaining reads mapping to one location: \"+str(len(read_dict))+\" \"+str(100*len(read_dict)/float(reads_above_identity_threshold))+\"%\\n\")\n\n wf.close()\n\n return loc_dict, len(read_dict)\n\n\ndef Combine_near_mappings(loc_dict, reads_remaining): # # combines insertion sites that are within 3 bases of each other. reads are assigned to the site with the initial max number of reads\n\n split_loc_dict = {} # will hold hierarchical data on each insertion site. keys for nested dictionaries are scaffold, strand, position and value is # reads mapping there. \n\n for full_location in loc_dict: ## loc dict holds the identifier for an inserion site as key and reads mapping to that site as a value\n chrom = full_location.split(\"__\")[0]\n strand = full_location.split(\"__\")[1]\n pos = int(full_location.split(\"__\")[2])\n\n # initialize the dictionary #\n\n if chrom not in split_loc_dict:\n split_loc_dict[chrom] = {'+' : {}, '-' : {}}\n\n if pos not in split_loc_dict[chrom][strand]:\n split_loc_dict[chrom][strand][pos] = loc_dict[full_location]\n\n reads_moved = 0\n \n # sorts the insertion positions, and combines reads forward, then reverses the sorting and combines forward again. #\n\n for chrom in split_loc_dict:\n for strand in split_loc_dict[chrom]:\n\n sorted_positions = sorted(split_loc_dict[chrom][strand])\n first ='y'\n for pos in sorted_positions:\n if first == 'y':\n first = 'n'\n last = pos\n continue\n\n if int(pos) - int(last) < 4:\n\n if split_loc_dict[chrom][strand][pos] >= split_loc_dict[chrom][strand][last]:\n split_loc_dict[chrom][strand][pos]+=split_loc_dict[chrom][strand][last]\n reads_moved+=split_loc_dict[chrom][strand][last]\n del split_loc_dict[chrom][strand][last]\n\n last = pos\n sorted_positions = sorted(split_loc_dict[chrom][strand])\n sorted_positions.reverse()\n\n first ='y'\n for pos in sorted_positions:\n if first == 'y':\n first = 'n'\n last = pos\n continue\n\n if abs(int(pos) - int(last)) < 4:\n if split_loc_dict[chrom][strand][pos] >= split_loc_dict[chrom][strand][last]:\n split_loc_dict[chrom][strand][pos]+=split_loc_dict[chrom][strand][last]\n reads_moved+=split_loc_dict[chrom][strand][last]\n del split_loc_dict[chrom][strand][last]\n\n last = pos\n\n print(\"remaining reads moved to higher peak: \", str(reads_moved), \"(\"+str(100*float(reads_moved)/reads_remaining)+\"%)\")\n \n\n wf = open(out_dir+fastq_filename+\"_mapping_stats\", 'a')\n wf.writelines(\"remaining reads moved to higher peak: \"+str(reads_moved)+\" \"+str(100*float(reads_moved)/reads_remaining)+\"%\\n\")\n wf.close()\n\n return split_loc_dict\n\ndef Annotate_insetions(split_loc_dict, mapped_reads):\n\n out_filename = out_dir+fastq_filename+\"_pooled_reads\" # the final output, this will hold the pooled insertion table \n wf = open(out_filename,'w')\n\n wf.writelines(\"ID\\tscaffold\\tstrand\\tlocation\\tannotation\\tn\\trel_loc\\trel_prop\\tgene_length\\n\")\n\n\n sc_insertions = 0\n sp_insertions = 0\n\n sc_genic_insertions = 0\n sp_genic_insertions = 0\n\n tot_insertions = 0\n tot_min_insertions = 0.0\n plasmid_reads = 0 # reads mapping to the plasmid\n Rtn_reads = 0 # reads mapping to the tn right border\n Ltn_reads = 0 # reads mapping to the tn left border\n\n gff_dict = {}\n f = open(gff)\n\n for line in f:\n\n line = line.split(\"\\t\")\n chrom = line[1]\n gene = line[0]\n start = int(line[4])\n end = int(line[5])\n strand = line[3]\n type = line[2]\n\n # put the annotation information in a dictionary # \n\n if chrom not in gff_dict:\n gff_dict[chrom] = {}\n\n gff_dict[chrom][gene] = [start, end, strand]\n\n\n# Search through the dictionary for insertions that fall within genes \n for chrom in split_loc_dict:\n for strand in split_loc_dict[chrom]:\n for pos in split_loc_dict[chrom][strand]:\n\n if split_loc_dict[chrom][strand][pos] > min_read_cutoff:\n tot_min_insertions+=1\n if chrom[:2] == 'sc':\n sc_insertions+=1\n elif chrom[:2] == 'sp':\n sp_insertions+=1\n tot_insertions+=1\n\n # set defaults for noncoding \n\n insertion_type = 'NC'\n gene_length = -1\n relative_insertion_site = -1\n prop_gene_insertion_in = -1\n\n for gff_chrom in gff_dict:\n if gff_chrom != chrom:\n continue\n for gene in gff_dict[gff_chrom]:\n if pos >= gff_dict[gff_chrom][gene][0] and pos <= gff_dict[gff_chrom][gene][1]: # if the insertion falls within a gene \n insertion_type = gene\n gene_length = gff_dict[gff_chrom][gene][1] - gff_dict[gff_chrom][gene][0]+1\n if gff_dict[gff_chrom][gene][2] == '+':\n relative_insertion_site = pos - gff_dict[gff_chrom][gene][0]\n\n else:\n relative_insertion_site = gff_dict[gff_chrom][gene][1] - pos\n\n if gene[:2] == 'sp' and split_loc_dict[chrom][strand][pos] > min_read_cutoff:\n sp_genic_insertions+=1\n elif gene[:2] == 'sc' and split_loc_dict[chrom][strand][pos] > min_read_cutoff:\n sc_genic_insertions+=1\n\n prop_gene_insertion_in = relative_insertion_site / float(gene_length)\n\n wf.writelines(chrom+\"_\"+strand+\"_\"+str(pos)+\"\\t\"+chrom+\"\\t\"+strand+\"\\t\"+str(pos)+\"\\t\"+insertion_type+\"\\t\"+str(split_loc_dict[chrom][strand][pos])+\"\\t\"+str(relative_insertion_site)+\"\\t\"+str(prop_gene_insertion_in)+\"\\t\"+str(gene_length)+\"\\n\")\n\n wf.close()\n\n\n\n \n f = open(out_filename)\n for line in f:\n if line[:2] != 'pl':\n continue\n\n line = line.split(\"\\t\")\n if line[4] == 'TN_right_arm':\n Rtn_reads+=int(line[5]) # reads mapping to the tn right border \n elif line[4] == 'TN_left_arm':\n\n Ltn_reads+=int(line[5]) # reads mapping to the tn right border \n elif line[4] == 'NC':\n plasmid_reads+=int(line[5])\n\n f.close()\n\n \n tot_genic_insertions = float(sc_genic_insertions+sp_genic_insertions)\n\n wf = open(out_dir+fastq_filename+\"_mapping_stats\", 'a')\n \n wf.writelines(\"reads mapping to NC plasmid backbone: \"+str(plasmid_reads)+\" (\"+str(100*plasmid_reads/mapped_reads)+\"%) of mapped reads\\n\")\n wf.writelines(\"reads mapping to TN right border: \"+str(Rtn_reads)+\" (\"+str(100*Rtn_reads/mapped_reads)+\"%) of mapped reads\\n\")\n wf.writelines(\"reads mapping to TN left border: \"+str(Ltn_reads)+\" (\"+str(100*Ltn_reads/mapped_reads)+\"%) of mapped reads\\n\")\n\n wf.writelines(\"total insertions: \"+str(tot_insertions)+\"\\n\")\n wf.writelines(\"total insertions with >\"+str(min_read_cutoff)+\" reads: \"+str(tot_min_insertions)+\"\\tscer: \"+str(sc_insertions)+\" (\"+str(100*sc_insertions/tot_min_insertions)+\"%)\"+\" spar: \"+str(sp_insertions)+\"(\"+str(100*sp_insertions/tot_min_insertions)+\"%)\\n\")\n\n\n if tot_genic_insertions > 0:\n \n\n\n wf.writelines(\"OF THESE:\\n\")\n wf.writelines(\"total genic insertions: \"+str(sp_genic_insertions + sc_genic_insertions)+\" (\"+str(100*(sp_genic_insertions + sc_genic_insertions)/tot_min_insertions)+\"% of insertions)\\n\")\n wf.writelines(\"Scer genic insertions: \"+str(sc_genic_insertions)+\" (\"+str(100*sc_genic_insertions/tot_genic_insertions)+\"% of genic insertions)\\n\")\n wf.writelines(\"Spar genic insertions: \"+str(sp_genic_insertions)+\" (\"+str(100*sp_genic_insertions/tot_genic_insertions)+\"% of genic insertions)\\n\")\n\n else:\n wf.writelines(\"no genic insertions\\n\")\n\n\n wf.close()\n\n\n\n \n\n#### START PROGRAM ####\n\nnum_parsed_reads = Filter_Reads(read_file) ## filters out reads that don't have tn sequence, writes the genomic portion of the remaining reads to a new file\nMap_Reads() ## maps reads\n\nprint(\"done mapping\")\n\nloc_dict, reads_remaining = Filter_for_ID_and_multimapping_and_pool(num_parsed_reads) # filters out reads below the identity threshold, that map to multiple locations and then pools insertions \n\n\nprint(\"done filtering\")\nloc_dict = Combine_near_mappings(loc_dict, reads_remaining) # combine insertions within 3bp of each other\n\nprint(\"done combine near mappings\")\n\nAnnotate_insetions(loc_dict, reads_remaining) # identify insertions in genes, and write the final pooled outfile\n\n\n\n\n\n\n\n\n","sub_path":"map_and_pool_BLAT_1-31-19.py","file_name":"map_and_pool_BLAT_1-31-19.py","file_ext":"py","file_size_in_byte":19398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"} +{"seq_id":"274998091","text":"# Copyright 2017 reinforce.io. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"\nImplements normalized advantage functions, largely following\n\nhttps://github.com/carpedm20/NAF-tensorflow/blob/master/src/network.py\n\nfor the update logic with different modularisation.\n\nThe core training update code is under MIT license, for more information see LICENSE-EXT.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom six.moves import xrange\nfrom tensorflow.contrib.framework import get_variables\n\nfrom tensorforce.core import Model\nfrom tensorforce.core.networks import NeuralNetwork, layers\n\n\nclass NAFModel(Model):\n\n default_config = dict(\n update_target_weight=1.0,\n clip_gradients=0.0\n )\n allows_discrete_actions = False\n allows_continuous_actions = True\n\n def __init__(self, config):\n \"\"\"\n Training logic for NAFs.\n\n :param config: Configuration parameters\n \"\"\"\n config.default(NAFModel.default_config)\n super(NAFModel, self).__init__(config)\n\n def create_tf_operations(self, config):\n super(NAFModel, self).create_tf_operations(config)\n\n # Get hidden layers from network generator, then add NAF outputs, same for target network\n with tf.variable_scope('training'):\n self.training_network = NeuralNetwork(config.network, inputs=self.state)\n self.internal_inputs.extend(self.training_network.internal_inputs)\n self.internal_outputs.extend(self.training_network.internal_outputs)\n self.internal_inits.extend(self.training_network.internal_inits)\n\n with tf.variable_scope('training_outputs'):\n num_actions = len(self.action)\n # Action outputs\n mean = layers['linear'](x=self.training_network.output, size=num_actions)\n for n, action in enumerate(sorted(self.action)):\n # mean = tf.Print(mean,[mean])\n self.action_taken[action] = mean[n]\n\n # Advantage computation\n # Network outputs entries of lower triangular matrix L\n lower_triangular_size = num_actions * (num_actions + 1) // 2\n l_entries = layers['linear'](x=self.training_network.output, size=lower_triangular_size)\n\n l_matrix = tf.exp(tf.map_fn(tf.diag, l_entries[:, :num_actions]))\n\n if num_actions > 1:\n offset = num_actions\n l_columns = list()\n for zeros, size in enumerate(xrange(num_actions - 1, 0, -1), 1):\n column = tf.pad(l_entries[:, offset: offset + size], ((0, 0), (zeros, 0)))\n l_columns.append(column)\n offset += size\n l_matrix += tf.stack(l_columns, 1)\n\n # P = LL^T\n p_matrix = tf.matmul(l_matrix, tf.transpose(l_matrix, (0, 2, 1)))\n # p_matrix = tf.Print(p_matrix, [p_matrix])\n\n # l_rows = []\n # offset = 0\n # for i in xrange(num_actions):\n # # Diagonal elements are exponentiated, otherwise gradient often 0\n # # Slice out lower triangular entries from flat representation through moving offset\n # diagonal = tf.exp(l_entries[:, offset]) # tf.slice(l_entries, (0, offset), (-1, 1))\n # n = config.actions - i - 1\n # # Slice out non-zero non-diagonal entries, - 1 because we already took the diagonal\n # non_diagonal = l_entries[:, offset + 1: offset + n + 1] # tf.slice(l_entries, (0, offset + 1), (-1, n))\n # # Fill up row with zeros\n # row = tf.pad(tf.concat(axis=1, values=(diagonal, non_diagonal)), ((0, 0), (i, 0)))\n # offset += (num_actions - i)\n # l_rows.append(row)\n #\n # # Stack rows to matrix\n # l_matrix = tf.transpose(tf.stack(l_rows, axis=1), (0, 2, 1))\n\n actions = tf.stack(values=[self.action[name] for name in sorted(self.action)], axis=1)\n action_diff = actions - mean\n\n # A = -0.5 (a - mean)P(a - mean)\n advantage = -tf.matmul(tf.expand_dims(action_diff, 1), tf.matmul(p_matrix, tf.expand_dims(action_diff, 2))) / 2\n advantage = tf.squeeze(advantage, 2)\n\n # Q = A + V\n # State-value function\n value = layers['linear'](x=self.training_network.output, size=1)\n q_value = tf.squeeze(value + advantage, 1)\n training_output_vars = get_variables('training_outputs')\n\n with tf.variable_scope('target'):\n self.target_network = NeuralNetwork(config.network, inputs=self.state)\n self.internal_inputs.extend(self.target_network.internal_inputs)\n self.internal_outputs.extend(self.target_network.internal_outputs)\n self.internal_inits.extend(self.target_network.internal_inits)\n target_value = dict()\n\n with tf.variable_scope('target_outputs'):\n # State-value function\n target_value_output = layers['linear'](x=self.target_network.output, size=1)\n for action in self.action:\n # Naf directly outputs V(s)\n target_value[action] = target_value_output\n\n target_output_vars = get_variables('target_outputs')\n\n with tf.name_scope(\"update\"):\n for action in self.action:\n q_target = self.reward[:-1] + (1.0 - tf.cast(self.terminal[:-1], tf.float32)) * config.discount\\\n * target_value[action][1:]\n delta = q_target - q_value[:-1]\n\n # We observe issues with numerical stability in some tests, gradient clipping can help\n if config.clip_gradients > 0.0:\n huber_loss = tf.where(tf.abs(delta) < config.clip_gradients, tf.multiply(tf.square(delta), 0.5),\n tf.abs(delta) - 0.5)\n loss = tf.reduce_mean(huber_loss)\n else:\n loss = tf.reduce_mean(tf.square(delta))\n # loss = tf.Print(loss, [loss])\n tf.losses.add_loss(loss)\n\n with tf.name_scope(\"update_target\"):\n # Combine hidden layer variables and output layer variables\n training_vars = self.training_network.variables + training_output_vars\n target_vars = self.target_network.variables + target_output_vars\n\n self.target_network_update = list()\n for v_source, v_target in zip(training_vars, target_vars):\n update = v_target.assign_sub(config.update_target_weight * (v_target - v_source))\n self.target_network_update.append(update)\n\n def update_target_network(self):\n \"\"\"\n Updates target network.\n\n :return:\n \"\"\"\n self.session.run(self.target_network_update)\n","sub_path":"tensorforce/models/naf_model.py","file_name":"naf_model.py","file_ext":"py","file_size_in_byte":7565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"57"}