diff --git "a/847.jsonl" "b/847.jsonl"
new file mode 100644--- /dev/null
+++ "b/847.jsonl"
@@ -0,0 +1,631 @@
+{"seq_id":"96690481","text":"#econding=utf8\nimport os\nimport subprocess\nDIR_OF_THIS_SCRIPT = os.path.abspath( os.path.dirname( __file__ ) )\nOUTPUT_SCRIPT='''\nload(\"@hello_server//bazel-compilation-database:aspects.bzl\", \"compilation_database\")\n\ncompilation_database(\n name = \"compiledb\",\n targets = [\n \"//src:hello_server\",\n ],\n'''\n\n\ndef gen_compiledb():\n subprocess.call(\"bazel clean\", shell=True)\n f = open(DIR_OF_THIS_SCRIPT + \"/bazel-compilation-database/BUILD\", 'w+')\n print(OUTPUT_SCRIPT, file=f)\n exec_root = subprocess.check_output([\"bazel\", \"info\", \"execution_root\"]).decode('utf-8').replace('\\n', \"\")\n print(\" exec_root = \" + '\"' + exec_root + '\"' + \",\", file=f)\n print(\")\", file=f)\n f.close()\n subprocess.call(\"bazel build //src:hello_server\", shell=True)\n subprocess.call(\"bazel build //bazel-compilation-database:compiledb\", shell=True)\n subprocess.call(\"cp bazel-bin/bazel-compilation-database/compile_commands.json .\", shell=True)\n\ngen_compiledb()\n","sub_path":".gen_compiledb.py","file_name":".gen_compiledb.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"592937657","text":"from collections import deque\n\nn, m, t = map(int, input().split())\n\ncircles = [0]\n\nfor _ in range(n):\n circles.append(deque(list(map(int, input().split()))))\n\ncommands = []\n\nfor _ in range(t):\n x, d, k = map(int, input().split())\n commands.append((x, d, k))\n\nwhile (commands):\n x, d, k = commands.pop(0)\n for i in range(1, n + 1):\n # x의 배수인 원판만\n if (i % x == 0):\n if (d == 0):\n circles[i].rotate(k)\n else:\n circles[i].rotate(-k)\n\n # 인접수\n zeros = []\n # 1번 원판\n for i in range(m):\n if (i + 1 > m - 1):\n temp = 0\n else:\n temp = i + 1\n\n if (circles[1][i] == circles[1][i - 1] and circles[1][i] != 0):\n zeros.append((1, i))\n if (i - 1 < 0):\n zeros.append((1, m - 1))\n else:\n zeros.append((1, i - 1))\n if (circles[1][i] == circles[1][temp] and circles[1][i] != 0):\n zeros.append((1, i))\n zeros.append((1, temp))\n\n if (circles[1][i] == circles[2][i] and circles[1][i] != 0):\n zeros.append((1, i))\n zeros.append((2, i))\n\n # n번 원판\n for i in range(m):\n if (i + 1 > m - 1):\n temp = 0\n else:\n temp = i + 1\n\n if (circles[n][i] == circles[n][i - 1] and circles[n][i] != 0):\n zeros.append((n, i))\n if (i - 1 < 0):\n zeros.append((n, m - 1))\n else:\n zeros.append((n, i - 1))\n if (circles[n][i] == circles[n][temp] and circles[n][i] != 0):\n zeros.append((n, i))\n zeros.append((n, temp))\n\n if (circles[n][i] == circles[n - 1][i] and circles[n][i] != 0):\n zeros.append((n, i))\n zeros.append((n - 1, i))\n\n for i in range(2, n):\n # 2~n-1번원판\n for j in range(m):\n if (j + 1 > m - 1):\n temp = 0\n else:\n temp = j + 1\n\n if (circles[i][j] == circles[i][j - 1] and circles[i][j] != 0):\n zeros.append((i, j))\n if (j - 1 < 0):\n zeros.append((i, m - 1))\n else:\n zeros.append((i, j - 1))\n if (circles[i][j] == circles[i][temp] and circles[i][j] != 0):\n zeros.append((i, j))\n zeros.append((i, temp))\n if (circles[i][j] == circles[i - 1][j] and circles[i][j] != 0):\n zeros.append((i, j))\n zeros.append((i - 1, j))\n if (circles[i][j] == circles[i + 1][j] and circles[i][j] != 0):\n zeros.append((i, j))\n zeros.append((i + 1, j))\n\n zeros = list(set(zeros))\n\n if (zeros):\n for i in zeros:\n circles[i[0]][i[1]] = 0\n else:\n # 인접수가 없는경우\n sum_temp = 0\n cnt = 0\n for i in range(1, n + 1):\n for j in range(m):\n if (circles[i][j] != 0):\n sum_temp += circles[i][j]\n cnt += 1\n try:\n sum_temp = sum_temp / cnt\n except:\n continue\n\n for i in range(1, n + 1):\n for j in range(m):\n if (circles[i][j] > sum_temp and circles[i][j] != 0):\n circles[i][j] -= 1\n elif (circles[i][j] < sum_temp and circles[i][j] != 0):\n circles[i][j] += 1\n # 각 단계별로 원판 상태출력\n '''\n for i in range(1,n+1):\n print(circles[i])\n print()\n '''\n\nans = 0\nfor i in range(1, n + 1):\n ans += sum(circles[i])\n\nprint(ans)\n","sub_path":"17822_원판 돌리기.py","file_name":"17822_원판 돌리기.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"472945713","text":"import tables\n\n\ndef to_bits(s):\n result = []\n for c in s:\n bits = bin(ord(c))[2:]\n bits = '00000000'[len(bits):] + bits\n result.extend([int(b) for b in bits])\n return result\n\n\ndef from_bits(bits):\n chars = []\n for b in range(int(len(bits) / 8)):\n byte = bits[b * 8:(b + 1) * 8]\n chars.append(chr(int(''.join([str(bit) for bit in byte]), 2)))\n return ''.join(chars)\n\n\ndef apply_permutation(array, permutation):\n result = []\n for p in permutation:\n result.append(array[p - 1])\n return result\n\n\n# input: key = 56 bits\n# output: key = 64 bits\ndef get_extended_key(key):\n result = []\n sum = 0\n for i in range(56):\n result.append(key[i])\n sum += key[i]\n if (i + 1) in [7, 14, 21, 28, 35, 42, 49, 56]:\n result.append(int(sum % 2))\n sum = 0\n return result\n\n\n# input: key = 56 bits\n# output: keys[ {48 bits} ], len = 16 \ndef get_keys(key):\n extended_key = get_extended_key(key)\n result = []\n C = [apply_permutation(extended_key, tables.C0)]\n D = [apply_permutation(extended_key, tables.D0)]\n for i in range(16):\n C.append(C[i].copy())\n D.append(D[i].copy())\n for _ in range(tables.I[i]):\n C[i + 1].append(C[i + 1].pop(0))\n D[i + 1].append(D[i + 1].pop(0))\n new_key = apply_permutation(C[i + 1] + D[i + 1], tables.K)\n result.append(new_key)\n return result\n\n\n# input: string = 8 chars, key = 7 chars\n# output: string = 8 chars\ndef encrypt_block(string, key):\n keys = get_keys(to_bits(key))\n T0 = apply_permutation(to_bits(string), tables.IP)\n L = [T0[:32]]\n R = [T0[32:]]\n for i in range(1, 17):\n # L\n L.append(R[i - 1])\n\n # f1\n _F1 = apply_permutation(R[i - 1], tables.E)\n # f2\n _F2 = []\n for j in range(48):\n _F2.append((_F1[j] + keys[i - 1][j]) % 2)\n # f3\n _F3 = []\n for j in range(8):\n B = _F2[j * 6:(j + 1) * 6]\n a = B[0] * 2 + B[5]\n b = B[1] * 2 ** 3 + B[2] * 2 ** 2 + B[3] * 2 + B[4]\n _F3 += tables.BIN[tables.S[j][a][b]]\n # f4\n _F4 = apply_permutation(_F3, tables.P)\n\n # R\n R.append([])\n for j in range(32):\n R[i].append((L[i - 1][j] + _F4[j]) % 2)\n\n bits_result = apply_permutation(L[16] + R[16], tables.IP_)\n return from_bits(bits_result)\n\n\n# input: string = 8 chars, key = 7 chars\n# output: string = 8 chars\ndef decrypt_block(string, key):\n keys = get_keys(to_bits(key))\n T0 = apply_permutation(to_bits(string), tables.IP)\n L = [T0[:32]]\n R = [T0[32:]]\n for i in range(1, 17):\n # R\n R.append(L[i - 1])\n\n # f1\n _F1 = apply_permutation(L[i - 1], tables.E)\n # f2\n _F2 = []\n for j in range(48):\n _F2.append((_F1[j] + keys[15 - (i - 1)][j]) % 2)\n # f3\n _F3 = []\n for j in range(8):\n B = _F2[j * 6:(j + 1) * 6]\n a = B[0] * 2 + B[5]\n b = B[1] * 2 ** 3 + B[2] * 2 ** 2 + B[3] * 2 + B[4]\n _F3 += tables.BIN[tables.S[j][a][b]]\n # f4\n _F4 = apply_permutation(_F3, tables.P)\n\n # L\n L.append([])\n for j in range(32):\n L[i].append((R[i - 1][j] + _F4[j]) % 2)\n\n bits_result = apply_permutation(L[16] + R[16], tables.IP_)\n return from_bits(bits_result)\n\n\ndef encrypt(string, key):\n key = key[:7]\n while len(string) % 8 != 0:\n string += ' '\n result = \"\"\n for i in range(int(len(string) / 8)):\n result += encrypt_block(string[i * 8:(i + 1) * 8], key)\n return result\n\n\ndef decrypt(string, key):\n key = key[:7]\n while len(string) % 8 != 0:\n string += ' '\n result = \"\"\n for i in range(int(len(string) / 8)):\n result += decrypt_block(string[i * 8:(i + 1) * 8], key)\n return result\n","sub_path":"6term/isob/Lab2/DES.py","file_name":"DES.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"402731922","text":"\nimport sys\nimport time\nimport glob\n\ndef getFileString(file_path):\n file_string = ''\n with open(file_path, 'r') as file_obj:\n file_string = file_obj.read()\n return file_string\n\ndef handleControlSequence(line, i):\n replacement = ''\n\n t_blank = False\n link_text = ''\n link_href = ''\n\n i += 1\n\n # handle text\n if line[i] == '{':\n i += 1\n while line[i] != '}':\n link_text += line[i]\n i += 1\n i += 1\n\n if line[i] == '*':\n t_blank = True\n i += 1\n\n while line[i] != '}':\n link_href += line[i]\n i += 1\n\n if len(link_text) == 0:\n link_text = link_href\n\n replacement = ''\n replacement += link_text\n replacement += ''\n\n return replacement, i\n\ndef generateHTML(file_string):\n new_file_string = ''\n\n lines = [line.strip('\\r\\t').rstrip() for line in file_string.split('\\n')]\n\n new_file_string += '\\n'\n for line in lines:\n new_line = ''\n\n if len(line) > 0:\n\n if line.strip().startswith('include:'):\n attr = line.split(':')\n file_path = attr[1].strip()\n new_line += generateHTML(getFileString(file_path))\n new_file_string += new_line + '\\n'\n continue\n elif line.strip().startswith('img:'):\n attr = line.split(':')\n img_path = attr[1].strip()\n height = ''\n if len(attr) > 2:\n height = ' style=\"height: ' + attr[2] + 'px;\"'\n if len(attr) > 3 and 'l' in attr[3]:\n new_line += ''\n else:\n new_line += '
'\n else:\n indentation = ''\n while line[0] == ' ':\n indentation += ' '\n line = line[1:]\n if line[0:2] in ['- ', '+ ', '* ']:\n indentation += ' ' if line[0] == ' ' else line[0]\n line = line[1:]\n while line[0] == ' ':\n indentation += ' '\n line = line[1:]\n\n i = 0\n while i < len(line):\n if line[i:i+2] == ' ':\n new_line += ' '\n i += 1\n elif line[i] == '{':\n append, i = handleControlSequence(line, i)\n new_line += append\n else:\n new_line += line[i]\n i += 1\n\n if len(indentation) > 0:\n new_file_string += '
' + indentation + '
' + new_line + '
\\n'\n continue\n\n new_file_string += new_line + ' \\n'\n\n return new_file_string\n\ndef generateHTML_full(file_string):\n new_file_string = ''\n\n # top of html file\n with open('./include/top.html', 'r') as file_obj:\n new_file_string += file_obj.read()\n\n new_file_string += generateHTML(file_string)\n\n # bottom of html file\n with open('./include/bottom.html', 'r') as file_obj:\n new_file_string += file_obj.read()\n\n return new_file_string\n\ndef run():\n txt_files = glob.glob('./*.txt')\n\n for file_path in txt_files:\n\n file_name = file_path.split('/')[-1].split('.')[-2]\n file_string = getFileString(file_path)\n\n new_file_string = generateHTML_full(file_string)\n new_file_path = '../' + file_name + '.html'\n\n with open(new_file_path, 'w') as new_file_obj:\n new_file_obj.write(new_file_string)\n\nif '--dev' in sys.argv:\n print('Running...')\n while True:\n run()\n time.sleep(0.5)\nelse:\n run()","sub_path":"pages_source/gen_pages.py","file_name":"gen_pages.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"446188221","text":"# -*- coding: utf-8 -*-\nimport re\nfrom tutorial.items import TutorialItem\nimport scrapy\n\n\nclass StackoverflowSpider(scrapy.Spider):\n name = 'stackoverflow'\n allowed_domains = ['stackoverflow.com']\n start_urls = ['https://stackoverflow.com/questions?page={}&sort=newest'.format(i)for i in range(1000,1010)]\n\n def parse(self, response):\n for summary in response.css(\".question-summary\"):\n item=TutorialItem()\n desc=summary.css('.excerpt::text').extract_first()\n #把\\r\\n替换掉,并且去除里面的null字符串\n item['desc']=re.sub(r'[\\r\\n]]','',desc).strip()\n # print('Desc:',desc)\n\n item['title']=summary.css('.summary h3 a::text').extract_first()\n # print('Title:',title)\n\n view=summary.css('.views::text').extract_first()\n item['view']=re.sub(r'[^\\d+]','',view)\n # print('User:',view)\n\n # item['start']=summary.css('.summary .started .user-info .user-action-time span::attr(title)').extract_first()\n item['start'] = summary.xpath('/html/body/div[3]/div/div[1]/div[2]/div[1]/div[2]/div[3]/div/div[1]/span/@title').extract_first()\n\n # print('Start time:',start)\n\n item['user']=summary.css('.summary .started .user-info .user-details a::attr(href)').extract_first()\n # print('user:',user)\n\n # item['answer']=summary.css('.summary .stats .statusunanswered strong::text').extract_first()\n item['answer'] = summary.xpath('/html/body/div[3]/div/div[1]/div[2]/div[1]/div[1]/div[2]/div[2]/strong/text()').extract_first()\n # print('answer:',answer)\n\n item['vote']=summary.css('.statscontainer .stats .vote .votes .vote-count-post strong::text').extract_first()\n # print('vote:',vote)\n yield item","sub_path":"tutorial/spiders/stackoverflow.py","file_name":"stackoverflow.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"363856052","text":"#Modified by smartbuilds.io\n#Date: 27.09.20\n#Desc: This web application serves a motion JPEG stream\n# main.py\n# import the necessary packages\nfrom flask import Flask, render_template, Response, request\nfrom car import CarControls\nfrom camera import VideoCamera\nimport time\nimport threading\nimport os\n\nleft = 2\nright = 3\nforward = 4\nbackward = 17\n\nCControl = CarControls(left,right,forward, backward)\n\npi_camera = VideoCamera(flip=True) # flip pi camera if upside down.\n\n# App Globals (do not edit)\napp = Flask(__name__)\n\n@app.route('/stopt', methods=['POST'])\ndef tStop():\n CControl.tStop()\n return('',204)\n\n@app.route('/stopg', methods=['POST'])\ndef gStop():\n CControl.gStop()\n return('',204)\n\n@app.route('/movel', methods=['POST'])\ndef leftTurn():\n CControl.left()\n return('',204)\n\n@app.route('/mover', methods=['POST'])\ndef rightTurn():\n CControl.right()\n return('',204)\n\n@app.route('/movef', methods=['POST'])\ndef foward():\n CControl.forward()\n return('',204)\n\n@app.route('/moveb', methods=['POST'])\ndef backward():\n CControl.backward()\n return('',204)\n\n@app.route('/moves', methods=['POST'])\ndef stop():\n CControl.stop()\n return('',204)\n\n@app.route('/')\ndef index():\n return render_template('index.html', carC = CControl) #you can customze index.html here\n\ndef gen(camera):\n #get camera frame\n while True:\n frame = camera.get_frame()\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n\\r\\n')\n\n@app.route('/video_feed')\ndef video_feed():\n return Response(gen(pi_camera),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\nif __name__ == '__main__':\n\n app.run(host='0.0.0.0', debug=False)\n \n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"316585099","text":"import json\nimport os\nimport requests\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\n\n\"\"\"\nThis module contains the interface class used by the \nCorona Spread feature, making an API request to the\nCorona Monitor API, which is hosted at Rapid Api and\ncan be found here:\n\nhttps://rapidapi.com/astsiatsko/api/coronavirus-monitor/\n\"\"\"\n\n\nclass ApiHandle:\n\t\"\"\"\n\tCall api and parse output to JSON. Returns cache \n\tunless the data over 2 hours old by default as to not \n\toverload the api service. The object calls the api upon\n\tinstantiation, and will automatically cache the response.\n\n\t:uri:\n\t\tURI for the REST api\n\n\t:_last_api_call:\n\t\tdatetime stamp for when data was most recently fetched\n\t\tfrom the api, used to return cache within the defined\n\t\tspan upon construction, minmum 0 hours.\n\n\t:_wait_time:\n\t\tseconds calculated by the defined standby_hours parameter\n\n\t:_cached_response:\n\t\tlast response received by the API\n\n\t:_headers_:\n\t\tdictionary which can be added to with the add_header method.\n\t\tContains headers which will be used upon a request with the \n\t\tfetch() call.\n\t\"\"\"\n\n\tdef __init__(self, uri: str, standby_hours = 2):\n\t\tself.uri: str = uri\n\t\tself.last_api_call: datetime = None\n\t\tself._wait_time = (60 * 60) * standby_hours\n\t\tself._cached_response = None\n\t\tself._cached_response: dict = None\n\t\tself._headers = {}\n\n\t@property\n\tdef uri(self) -> str:\n\t\treturn self._uri\n\t\n\t@uri.setter\n\tdef uri(self, uri: str) -> None:\n\t\tif uri.startswith('https'):\n\t\t\tself._uri = uri\n\t\telse:\n\t\t\traise AttributeError('Got \"http\", expected \"https\"')\n\t\n\t@property\n\tdef last_api_call(self) -> str:\n\t\t\"\"\"\n\t\tReturn property in string format for easy readability\n\t\tfor users.\n\t\t\"\"\"\n\t\treturn self._last_api_call.strftime(\"%Y-%m-%d %H:%M\")\n\n\t@last_api_call.setter\n\tdef last_api_call(self, val: datetime) -> None:\n\t\tself._last_api_call = val\n\n\tdef add_header(self, key: str, value: str) -> None:\n\t\t\"\"\"\n\t\tAllows this object to add HTML headers for the \n\t\trequest. The method is meant to be used prior to\n\t\ta call for an API which requires headers to work.\n\n\t\t:param key:\n\t\t\tstr\n\t\t\tthe key in the header, example: 'User-Agent'\n\t\t:param vaue:\n\t\t\tstr\n\t\t\tThe value behind said key.\n\t\t:returns:\n\t\t\tNone\n\t\t\"\"\"\n\t\tself._headers[key] = value\n\n\tdef fetch(self) -> dict:\n\t\t\"\"\"\n\t\tCall the api and mutate the instance variable _cached_response\n\t\tat the same time, if either none prior were made or the time \n\t\texpired and it needs to be refreshed. \n\n\t\t:returns:\n\t\t\tdict\n\t\t\"\"\"\n\t\tif self._cached_response:\n\t\t\tseconds_since_last_call = (datetime.now() - self._last_api_call).seconds\n\t\t\tif seconds_since_last_call < self._wait_time: \n\t\t\t\treturn self._cached_response\n\t\ttry:\n\t\t\tresponse = requests.get(self.uri, headers = self._headers).json()\n\t\texcept Exception:\n\t\t\traise\n\t\t\n\t\tself._cached_response = response\n\t\tself.last_api_call = datetime.now()\n\t\treturn response\n\n\nclass Client:\n\t\"\"\"\n\tAct as the interface from the retreived data \n\tby an instance of the ApiHandle class.\n\n\tReturn infections by country, mortalities,\n\trecoveries based upon method call.\n\t\"\"\"\n\n\tdef __init__(self, api_handle: ApiHandle, translation_file_path: str):\n\t\tself.api_handle = api_handle\n\t\tself.translation_file_path = translation_file_path\n\n\tdef _translate(self, country: str, from_language: str) -> str:\n\t\t\"\"\"\n\t\tReturn the value behind key country parameter\n\t\twhich is the swedish translated string of given\n\t\tcountry.\n\t\t:param country:\n\t\t\tstring, country to translate\n\t\t:param from_language:\n\t\t\tstring, from which language. Either Swedish to English or vice versa.\n\t\t:returns:\n\t\t\tstring\n\t\t\"\"\"\n\t\tcountry = country.lower()\n\t\ttry:\n\t\t\twith open(self.translation_file_path, 'r', encoding = 'utf-8') as f:\n\t\t\t\ttranslation = json.loads(f.read())\n\t\texcept Exception as e:\n\t\t\traise Exception(f'Could not load translation file. {e}')\n\t\t\n\t\tif from_language == 'swedish':\n\t\t\treturn translation['swe_to_eng'][country]\n\t\treturn translation['eng_to_swe'][country]\n\n\tdef get_raw_data(self):\n\t\t\"\"\"\n\t\tReturns the raw api return without any parsing.\n\t\tfor debugging.\n\t\t\"\"\"\n\t\treturn self.api_handle.fetch()\n\n\tdef get_total_recoveries(self) -> int:\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\treturn sum([int(i['total_recovered'].replace(',','')) for i in data])\n\n\tdef get_total_infections(self) -> int:\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\treturn sum([int(i['cases'].replace(',','')) for i in data])\n\n\tdef get_total_deaths(self, sort_by_highest = True) -> str:\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\treturn sum([int(i['deaths'].replace(',','')) for i in data])\n\n\tdef get_recoveries(self, sort_by_highest = True) -> str:\n\t\tsorter = lambda i: int(i['total_recovered'].replace(',',''))\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\tdata.sort(key = sorter, reverse = sort_by_highest)\n\t\ttranslated_country = self._translate(data[0]['country_name'], 'english')\n\t\treturn f\"{translated_country}: {data[0]['total_recovered']}\"\n\n\tdef get_infections(self, sort_by_highest = True) -> str:\n\t\tsorter = lambda i: int(i['cases'].replace(',',''))\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\tdata.sort(key = sorter, reverse = sort_by_highest)\n\t\ttranslated_country = self._translate(data[0]['country_name'], 'english')\n\t\treturn f\"{translated_country}: {data[0]['cases']}\"\n\n\tdef get_deaths(self, sort_by_highest = True) -> str:\n\t\tsorter = lambda i: int(i['deaths'].replace(',',''))\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\tdata.sort(key = sorter, reverse = sort_by_highest)\n\t\ttranslated_country = self._translate(data[0]['country_name'], 'english')\n\t\treturn f\"{translated_country}: {data[0]['deaths']}\"\t\n\n\tdef get_by_query(self, query: str, country_name: str) -> str:\n\t\t\"\"\"\n\t\tGet details on a country depending on query.\n\t\t:param data:\n\t\t\tstring representing deaths, recoveries or cases. These are:\n\t\t\t- 'cases'\n\t\t\t- 'recovered'\n\t\t\t- 'deaths'\n\t\t:param country: \n\t\t\tstring represenging country for lookup.\n\t\t:returns:\n\t\t\tstring\n\t\t\"\"\"\n\n\t\tdata = self.api_handle.fetch()['countries_stat']\n\t\tfor country in data:\n\t\t\tif country['country_name'].lower() == self._translate(country_name, 'swedish'):\n\t\t\t\treturn country[query]\n\t\traise KeyError(f'No such key: {country_name}')\n\n\tdef get_data_timestamp(self) -> str:\n\t\t\"\"\"\n\t\tReturns the datetime string under 'statistic_taken_at' key\n\t\tin response body from the API. This indicates when the\n\t\tstatistics were taken, thus how old the data is.\n\t\t:returns:\n\t\t\tstring, datetime\n\t\t\"\"\"\n\t\treturn self.api_handle.fetch()['statistic_taken_at']","sub_path":"source/coronafeatureclient.py","file_name":"coronafeatureclient.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"435823156","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nimport unittest\nimport operator\nimport sys\nimport os.path\nsys.path.append(os.path.abspath('..'))\nfrom mecab.writer import WordInfo\nfrom mecab import partofspeech as PoS\nfrom textproc.dataloader import getDataLoader\nfrom textproc.sentenceparser import MecabSentenceParser, PyPortSentenceParser\n\nclass SentenceParserTest(unittest.TestCase):\n def setUp(self):\n self.pyparser = PyPortSentenceParser(getDataLoader())\n self.exeparser = MecabSentenceParser()\n\n def testExeSimple(self):\n res = self.exeparser.tokenize('ですからあの人')\n expected = [WordInfo('ですから', 0, 'ですから', PoS.CONJ, 'デスカラ'),\n WordInfo('あの', 4, 'あの', PoS.FILLER, 'アノ'),\n WordInfo('人', 6, '人' ,PoS.NOUN, 'ヒト')]\n self.assertEquals(expected, res)\n\n def testPySimple(self):\n res = self.pyparser.tokenize('ですからあの人')\n expected = [WordInfo('ですから', 0, 'ですから', PoS.CONJ, 'デスカラ'),\n WordInfo('あの', 4, 'あの', PoS.FILLER, 'アノ'),\n WordInfo('人', 6, '人' ,PoS.NOUN, 'ヒト')]\n self.assertEquals(expected, res)\n\n\n def testMecabFailure(self):\n \"\"\"\n A test where Mecab fails to recognize the verb 滲み込む\n \"\"\"\n result = self.exeparser.tokenize('すべてに滲み込み')\n result = list(map(operator.attrgetter('dictionaryForm'), result))\n self.assertEquals(['すべて', 'に', '滲みる', '込み'], result)\n\n def testPyPort(self):\n result = self.pyparser.tokenize('所に着いたのは')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['所', 'に', '着い', 'た', 'の', 'は'], result)\n\n def testWhiteSpace(self):\n result = self.pyparser.tokenize('\\n所に着いたのは')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['所', 'に', '着い', 'た', 'の', 'は'], result)\n\n def testNumericKanji(self):\n result = self.pyparser.tokenize('一列縦隊')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['一', '列', '縦隊'], result)\n\n def testUnicodeErrorInString(self):\n result = self.pyparser.tokenize('ドンキ-・バー')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['ドンキ', '-', '・', 'バー'], result)\n\n\n\n def testTokenizeNum(self):\n \"\"\"\n ~\n \"\"\"\n result = self.pyparser.tokenize('九~九')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['九', '~', '九'], result)\n\n def testWhiteSpaceInside(self):\n result = self.pyparser.tokenize('\\n船が検 疫所に\\n')\n words = list(map(operator.attrgetter('word'), result))\n self.assertEquals(['船', 'が', '検', '疫所', 'に'], words)\n positions = list(map(operator.attrgetter('startPos'), result))\n self.assertEquals([1, 2, 3, 5, 7], positions)\n\n def testTokenize2(self):\n res = self.pyparser.tokenize('所に着いたのは')\n expected = [ WordInfo('所', 0, '所', PoS.NOUN, 'トコロ'),\n WordInfo('に', 1, 'に', PoS.PRT_CASE, 'ニ'),\n WordInfo('着い', 2, '着く', PoS.VERB, 'ツイ'),\n WordInfo('た', 4, 'た', PoS.VERB_AUX, 'タ'),\n WordInfo('の', 5, 'の', PoS.NOUN_NONIND, 'ノ'),\n WordInfo('は', 6, 'は', PoS.PRT_BIND, 'ハ')\n ]\n self.assertEquals(expected, res)\n\n def testUnknownWord(self):\n res = self.pyparser.tokenize('デッキに昇って行った')\n expected = [ WordInfo('デッキ', 0, 'デッキ', PoS.NOUN, 'デッキ'),\n WordInfo('に', 3, 'に', PoS.PRT_CASE, 'ニ')\n ]\n self.assertEquals(expected, res[0:2])\n\n def testComma(self):\n result = self.pyparser.tokenize('や、船客')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEqual(['や', '、', '船客'], result)\n\n def testUnkUnk(self):\n result = self.pyparser.tokenize('はっぴー・ばれん')\n result = list(map(operator.attrgetter('word'), result))\n self.assertEqual(['はっぴ', 'ー', '・', 'ばれ','ん'], result)\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(SentenceParserTest)\n unittest.TextTestRunner(verbosity=2).run(suite)","sub_path":"tests/test_sentenceparser.py","file_name":"test_sentenceparser.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"467954910","text":"\n\n#calss header\nclass _TRIBUNE():\n\tdef __init__(self,): \n\t\tself.name = \"TRIBUNE\"\n\t\tself.definitions = [u'used in the titles of some newspapers: ', u\"in ancient Rome, an elected official whose job was to protect people's rights\"]\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_tribune.py","file_name":"_tribune.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"195027521","text":"from ckeditor.fields import RichTextField\nfrom django.db import models\nfrom blog_website.utils.models import BaseModel\nfrom user.models import User\n\n\nclass ArticleCategory(BaseModel):\n \"\"\"文章分类\"\"\"\n name = models.CharField(max_length=10, verbose_name='名称', help_text='不超过10个字')\n parent = models.ForeignKey('self', null=True, blank=True, related_name='subs',\n on_delete=models.CASCADE, verbose_name='父类别')\n describe = models.CharField(max_length=100, default='', verbose_name='类别描述', help_text='不超过100个字')\n image_url = models.CharField(max_length=1000, null=True, verbose_name='类别图片')\n\n class Meta:\n db_table = 'article_category'\n verbose_name = '文章分类'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n \"\"\"文章\"\"\"\n author = models.ForeignKey(User, null=True, blank=True,\n on_delete=models.CASCADE, verbose_name='作者')\n title = models.CharField(max_length=50, verbose_name='标题', help_text='b不超过50字')\n content = RichTextField(verbose_name='内容')\n category1 = models.ForeignKey(ArticleCategory, on_delete=models.PROTECT,\n related_name='cat1', verbose_name='一级分类')\n category2 = models.ForeignKey(ArticleCategory, on_delete=models.PROTECT,\n related_name='cat2', verbose_name='二级分类')\n read_count = models.IntegerField(default=0, verbose_name='阅读量')\n index_image = models.CharField(max_length=1000, null=True, verbose_name='文章主图')\n is_top = models.BooleanField(default=False, verbose_name='是否置顶')\n like_count = models.IntegerField(default=0, verbose_name='点赞数')\n describe = models.TextField(default='', verbose_name='文章描述', help_text='用于列表页展示文章简介')\n labels = models.ManyToManyField('Label', verbose_name='文章标签')\n\n class Meta:\n db_table = 'article'\n verbose_name = '文章'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.title\n\n\nclass Label(BaseModel):\n \"\"\"文章标签\"\"\"\n name = models.CharField(max_length=20, verbose_name='文章标签', help_text='不超过20个字')\n\n class Meta:\n db_table = 'label'\n verbose_name = '标签'\n verbose_name_plural = verbose_name\n\n def __str__(self):\n return self.name\n","sub_path":"blog_website/blog_website/apps/article/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"327699407","text":"import tensorflow as tf\n\n\"\"\"\na=tf.Variable(0)\nb=tf.Variable(1)\nc=tf.add(a,b)\n\nupdate1=tf.assign(a,tf.add(a,b))\nupdate2=tf.assign(a,tf.add(a,c))\nupdate3=tf.assign(b,tf.add(a,b))\nupdate4=tf.assign(b,tf.add(c,b))\n#update5=tf.assign(c,tf.add(a,c))\n#update6=tf.assign(c,tf.add(c,b))\nsess=tf.Session()\ninit_op=tf.initialize_all_variables()\nsess.run(init_op)\n\nsess.run(update1)\nprint(sess.run([a,b,c]))\nsess.run(update2)\nprint(sess.run([a,b,c]))\nsess.run(update3)\nprint(sess.run([a,b,c]))\nsess.run(update4)\nprint(sess.run([a,b,c]))\n\nsess.close()\n\"\"\"\nweights=tf.Variable(tf.random_normal([7,2],stddev=0.35),name=\"weights0\")\nbiases=tf.Variable(tf.zeros([200]),name=\"biases0\")\n# Create another variable with the same value as 'weights'.\nw2 = tf.Variable(weights.initialized_value(), name=\"w20\")\n# Create another variable with twice the value of 'weights'\nw_twice = tf.Variable(weights.initialized_value() * 2.0, name=\"w_twice0\")\n\ninit_op=tf.initialize_all_variables()\nsaver = tf.train.Saver()\n\nwith tf.Session() as sess:\n\tsess.run(init_op)\n\t#sess.run(tf.initialize_variables([w2,w_twice]))\n\t#sess.run(tf.initialize_variables([biases]))\n\t\t\n\tprint(sess.run(weights))\n\tprint(sess.run(biases))\n\tprint(sess.run(w2))\n\tprint(sess.run(w_twice))\n\t\n\tsave_path=saver.save(sess,\"/tmp/model.csv\")\n\tprint(\"MOdel saved in file: %s\"% save_path)\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"474394369","text":"import unittest\nfrom Ship import Ship\n\nclass TestShip(unittest.TestCase):\n\n def test_battleship_coords(self):\n ship = Ship(\"battleship\", \"H\", 1, \"A\")\n ship_body = [(0, 0), (0, 1), (0, 2), (0, 3)]\n self.assertEqual(ship_body, ship.get_body)\n\n ship = Ship(\"battleship\", \"V\", 1, \"A\")\n ship_body = [(0, 0), (1, 0), (2, 0), (3, 0)]\n self.assertEqual(ship_body, ship.get_body)\n\n def test_cruiser_coords(self):\n ship = Ship(\"cruiser\", \"H\", 1, \"A\")\n ship_body = [(0, 0), (0, 1), (0, 2)]\n self.assertEqual(ship_body, ship.get_body)\n\n ship = Ship(\"cruiser\", \"V\", 1, \"A\")\n ship_body = [(0, 0), (1, 0), (2, 0)]\n self.assertEqual(ship_body, ship.get_body)\n \n def test_destroyer_coords(self):\n ship = Ship(\"destroyer\", \"H\", 1, \"A\")\n ship_body = [(0, 0), (0, 1)]\n self.assertEqual(ship_body, ship.get_body)\n\n ship = Ship(\"destroyer\", \"V\", 1, \"A\")\n ship_body = [(0, 0), (1, 0)]\n self.assertEqual(ship_body, ship.get_body)\n \n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"TextbaseGames/Battleship/test_Ship.py","file_name":"test_Ship.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"583090172","text":"#!/usr/bin/python\nimport random\nimport os\n\n\nclass SloppyJoe(object):\n def __init__(self):\n ROOT = os.path.dirname(os.path.abspath(__file__))\n\n self.adjectives = [x.rstrip() for x in open(\"%s/%s\" % (ROOT, \"words.txt\")).readlines()]\n self.names = [x.rstrip() for x in open(\"%s/%s\" % (ROOT, \"names.txt\")).readlines()]\n\n def get_adjective(self):\n return random.sample(self.adjectives, 1)[0]\n\n def get_common_name(self, names=[]):\n if len(names) == 0:\n names = self.names\n\n return random.sample(names, 1)[0]\n\n def generate_name(self, alliterate=False):\n \"\"\"\n Returns a name that is made from a list of synonyms for sloppy and\n common names. If ``alliterate`` is True only names where the adjective\n and the common name have the same first character will be returned.\n \"\"\"\n names = self.names\n adjective = self.get_adjective()\n\n if alliterate:\n names = []\n\n while len(names) == 0:\n names = filter(lambda name: name[0] == adjective[0], self.names)\n\n # If there aren't any names, get a new adjective and try again.\n if len(names) == 0:\n adjective = self.get_adjective()\n\n name = self.get_common_name(names)\n return \"%s %s\" % (adjective, name)\n","sub_path":"chrispickett_me/projects/sloppyjoe/sloppyjoe.py","file_name":"sloppyjoe.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"460125165","text":"from flask import (\n Blueprint, flash, g, redirect, render_template, request, session, url_for, make_response, Response\n)\nimport json\nfrom datetime import datetime\nfrom lastMeal.models.user import User\nfrom lastMeal.models.ingredient import Ingredient\nfrom bson.objectid import ObjectId\n\nfrom flask_jwt_extended import get_jwt_identity\nfrom flask_jwt_extended import jwt_required\nimport datetime as dt\nimport requests\n\n# Blueprint for connection to main process\nbp = Blueprint('recipes', __name__, url_prefix='/v1/recipes')\n\napi_key = ''\n\n# Basic Recipe Request Based on Ingredients\n@bp.route('', methods=['POST'])\n#@jwt_required()\ndef fetch_recipes():\n request_data = request.json\n\n ingredientList = request_data['ingredients']\n\n if ingredientList is None:\n return ({\"error\": \"no ingredients were passed\"}, 400)\n\n try:\n body = {\n 'ignorePantry': True,\n 'ingredients': ingredientList,\n 'limitLicense': False,\n 'number': 10,\n 'ranking': 1,\n 'apiKey': api_key\n }\n\n endpoint = \"https://api.spoonacular.com/recipes/findByIngredients\"\n\n headers={\n \"X-Mashape-Key\": api_key,\n \"X-Mashape-Host\": \"mashape host\"\n }\n \n r = requests.get(endpoint, params=body, headers=headers)\n recipe_results = r.json()\n\n print(\"TESTING SPOONACULAR RECIPE API\")\n print(recipe_results)\n\n new_data = {}\n\n new_data['recipe_data'] = recipe_results\n return ({'recipe_data': new_data}, 200)\n \n except Exception as e:\n print(e)\n return ({\"error\": \"Error in recipe fetch request\"}, 400)\n\n@bp.route('/', methods=['GET'])\n# @jwt_required()\ndef fetch_recipe_info(recipe_id):\n\n if recipe_id is None:\n return ({\"error\": \"No recipe ID was passed\"}, 400)\n \n try:\n endpoint = \"https://api.spoonacular.com/recipes/\" + recipe_id + \"/information\"\n\n body = {\n 'apiKey': api_key\n }\n\n headers={\n \"X-Mashape-Key\": api_key,\n \"X-Mashape-Host\": \"mashape host\"\n }\n\n r = requests.get(endpoint, params=body, headers=headers)\n recipe_info_results = r.json()\n\n new_data = {}\n\n new_data['recipe_info'] = recipe_info_results\n return ({'recipe_id': recipe_id, 'recipe_data': new_data}, 200)\n\n except Exception as e:\n print(e)\n return ({\"error: Error in recipe info fetch request\"}, 400)\n\n@bp.route('save/', methods=['GET'])\n# @jwt_required()\ndef user_favorite_recipe(recipe_id):\n pass \n # placeholder for user-favorited recipes; make sure that data is parsed before saving, and that the recipe ID is saved\n # Upon clicking on the UI, we can redirect to the same recipeInfo page that we do for the pantry/recipe page\n\n\n# Space here for any additional parsing we want to do in the backend\n","sub_path":"lastMeal/api/v1/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"121864953","text":"# [Note] http://numba.pydata.org/\n# [Note] http://numba.pydata.org/numba-doc/0.35.0/index.html\n# [Note] conda update numba\n\n\nimport numba\nfrom numba import jit, float32, int32, void, cuda\nfrom numpy import arange\nfrom timeit import default_timer as timer\n\nprint(\"Numba Version\", numba.__version__)\n\n# jit decorator tells Numba to compile this function.\n# The argument types will be inferred by Numba when function is called.\n@jit\ndef jit_sum_1(arr):\n M, N = arr.shape\n result = 0.0\n for i in range(M):\n for j in range(N):\n result += arr[i,j]\n return result\n\n\n@jit(float32(int32[:]))\ndef jit_sum_2(arr):\n M, N = arr.shape\n result = 0.0\n for i in range(M):\n for j in range(N):\n result += arr[i,j]\n return result\n\n\ndef sum(arr):\n M, N = arr.shape\n result = 0.0\n for i in range(M):\n for j in range(N):\n result += arr[i,j]\n return result\n\n\n# @cuda.jit(void(int32[:]))\n# def cuda_jit_sum(arr):\n# M, N = arr.shape\n# result = 0.0\n# for i in range(M):\n# for j in range(N):\n# result += arr[i,j]\n# return result\n\n\na = arange(900000000).reshape(30000, 30000)\nprint(a)\n\nprint()\n\ns = timer()\nresult = jit_sum_1(a)\ne = timer()\nprint(\"JIT_SUM_1: {:7.6f} ms\".format((e - s) * 1000))\nprint(result)\n\nprint()\n\ns = timer()\nresult = jit_sum_2(a)\ne = timer()\nprint(\"JIT_SUM_2: {:7.6f} ms\".format((e - s) * 1000))\nprint(result)\n\nprint()\n\ns = timer()\nresult = sum(a)\ne = timer()\nprint(\"NORMAL_SUM: {:7.6f} ms\".format((e - s) * 1000))\nprint(result)\n\nprint()\n\n# s = timer()\n# result = cuda_jit_sum(a)\n# e = timer()\n# print(\"{:7.6f} ms\".format((e - s) * 1000))\n# print(result)","sub_path":"1731061014_jinseojeong/numba_test/numba_test.py","file_name":"numba_test.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"573460268","text":"import tensorflow as tf\nfrom tensorflow.python.platform import gfile\nimport INCEPTION_V3_demo\nimport os.path\nimport random\nimport numpy as np\nimport glob\n\nBOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'\n\n#图像输入张量所对应的名称。\nJPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'\n\n#下载的谷歌训练好的Inception-v3模型文件目录。\n\n\nMODEL_FILE = 'graph.pb'\n\n\n\ndef create_image_test_lists():\n INPUT_DATA = 'test'\n file_list = []\n extensions = ['jpg', 'jpeg', '#JPG', '#JPEG']\n for extension in extensions:\n file_glob = os.path.join(INPUT_DATA, '*.' + extension)\n file_list.extend(glob.glob(file_glob))\n return file_list \n\nimage_path = create_image_test_lists()[0]\n#获取图片内容。\nimage_data = gfile.GFile(image_path, 'rb').read()\n\ndef get_bottleneck_values():\n #load graph\n with gfile.GFile(\"path/to/model/classify_image_graph_def.pb\",'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\n #加载读取的Inception-v3模型,并返回数据输入所对应的张量以及计算瓶颈层结果所对应\n #的张量。\n bottleneck_tensor, jpeg_data_tensor = tf.import_graph_def(graph_def,\n return_elements=[BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME])\n\n with tf.Session() as sess:\n bottleneck_values = sess.run(bottleneck_tensor, {jpeg_data_tensor: image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n\n return bottleneck_values\n \ndef main(self):\n train_bottlenecks = get_bottleneck_values()\n #load graph\n saver = tf.train.import_meta_graph(\"path/to/save/model.ckpt.meta\")\n\n with tf.Session() as sess:\n saver.restore(sess, \"path/to/save/model.ckpt\")\n train_bottlenecks = np.reshape(train_bottlenecks, (1,2048))\n \n c1 = sess.run(\"final_training_ops/Softmax:0\",feed_dict={\"BottleneckInputPlaceholder:0\":train_bottlenecks})\n print(c1)\n \n\nif __name__ == '__main__':\n tf.app.run()\n\n","sub_path":"Inception_V3/test_demo.py","file_name":"test_demo.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"643862453","text":"def metodo_diferencias_divididas():\n # Polinomio interpolación\n # Diferencias diferencia dividida\n # Tarea: Verificar tamaño de vectores,\n # verificar puntos equidistantes en x\n import numpy as np\n import sympy as sym\n import matplotlib.pyplot as plt\n\n # INGRESO , Datos de prueba\n #xi = np.array([3.2, 3.8, 4.2, 4.5])\n #fi = np.array([5.12, 6.42, 7.25, 6.85])\n xi_aux = []\n fi_aux = []\n n_1 = 0\n while(True):\n try:\n n_1 = int(input(\"Cantidad de n >> \"))\n break\n except Exception as e:\n pass\n\n for _ in range(n_1):\n aux = float(input(\"x_{} >> \".format(_)))\n xi_aux.append(aux)\n\n for _ in range(n_1):\n aux = float(input(\"f_{} >> \".format(_)))\n fi_aux.append(aux)\n\n xi = np.array(xi_aux)\n fi = np.array(fi_aux)\n # PROCEDIMIENTO\n\n # Tabla de Diferencias divididas\n titulo = ['i','xi','fi']\n n = len(xi)\n ki = np.arange(0,n,1)\n tabla = np.concatenate(([ki],[xi],[fi]),axis=0)\n tabla = np.transpose(tabla)\n # diferencias \n dfinita = np.zeros(shape=(n,n),dtype=float)\n tabla = np.concatenate((tabla,dfinita), axis=1)\n # Calcula tabla, inicia en columna 3\n [n,m] = np.shape(tabla)\n diagonal = n-1\n j = 3\n while (j < m):\n # Añade título para cada columna\n titulo.append('df'+str(j-2))\n # cada fila de columna\n paso=j-2\n i = 0\n while (i < diagonal):\n numerador = tabla[i+1,j-1]-tabla[i,j-1]\n denominador = xi[i+paso]- xi[i]\n tabla[i,j] = numerador/denominador\n i = i+1\n diagonal = diagonal - 1\n j = j+1\n\n # POLINOMIO con diferencias divididas\n # caso: puntos equidistantes en eje x\n h = xi[1] - xi[0]\n dfinita = tabla[0,3:]\n n = len(dfinita)\n # expresión del polinomio con Sympy\n x = sym.Symbol('x')\n polinomio = fi[0]\n for j in range(1,n,1):\n factor = dfinita[j-1]\n termino = 1\n for k in range(0,j,1):\n termino = termino*(x-xi[k])\n polinomio = polinomio + termino*factor\n # simplifica multiplicando entre (x-xi)\n polisimple = polinomio.expand()\n\n # polinomio para evaluacion numérica\n px = sym.lambdify(x,polisimple)\n\n # Puntos para la gráfica\n muestras = 101\n a = np.min(xi)\n b = np.max(xi)\n pxi = np.linspace(a,b,muestras)\n pfi = px(pxi)\n\n # SALIDA\n np.set_printoptions(precision=3)\n print('Tabla Diferencia dividida')\n print([titulo])\n print(tabla)\n print('dividida: ')\n print(dfinita)\n print('polinomio: ')\n print(polinomio)\n print('polinomio simplificado: ' )\n print(polisimple)\n\n # Gráfica\n plt.plot(xi,fi,'o', label = 'Puntos')\n ##for i in range(0,n,1):\n ## plt.axvline(xi[i],ls='--', color='yellow')\n plt.plot(pxi,pfi, label = 'Polinomio')\n plt.legend()\n plt.xlabel('xi')\n plt.ylabel('fi')\n plt.title('diferencia dividida por newton')\n plt.show()\n#metodo_diferencias_divididas()","sub_path":"Diferencias_divididas.py","file_name":"Diferencias_divididas.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"78571426","text":"'''\r\nPython 3.6.0\r\n\r\nJenny Tso\r\nGUI Drill\r\n\r\n'''\r\n\r\nfrom tkinter import *\r\nimport tkinter as ttk\r\n\r\nimport fileTransfer_gui\r\nimport fileTransfer_functions\r\n\r\nclass ParentFrame(Frame):\r\n def __init__(self, master):\r\n Frame.__init__(self, master)\r\n self.master = master\r\n self.master.minsize(480, 420)\r\n self.master.maxsize(480, 420)\r\n #self.master.resizeable(False, False)\r\n #fileTransfer_functions.center_window(self, 480, 480)\r\n self.master.title(\"Transfer New or Modified Files\")\r\n self.master.configure(bg=\"#F0F0F0\")\r\n #self.master.protocol(\"WM_DELETE_WINDOW\", lambda: phonebook_func.ask_quit(self))\r\n fileTransfer_gui.window(self)\r\n\r\nif __name__ == \"__main__\":\r\n root = ttk.Tk()\r\n Application = ParentFrame(root)\r\n root.mainloop()\r\n \r\n","sub_path":"fileTransfer_main.py","file_name":"fileTransfer_main.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"442241088","text":"import os\nimport glob\nimport platform\nimport shutil\nfrom conans import ConanFile, tools, AutoToolsBuildEnvironment\n\n\nclass ICUBase(ConanFile):\n version = \"64.2\"\n homepage = \"http://site.icu-project.org\"\n license = \"ICU\"\n description = \"ICU is a mature, widely used set of C/C++ and Java libraries \" \\\n \"providing Unicode and Globalization support for software applications.\"\n url = \"https://github.com/bincrafters/conan-icu\"\n topics = (\"conan\", \"icu\", \"icu4c\", \"i see you\", \"unicode\")\n exports = [\"icu_base.py\"]\n # exports_sources = [\"patches/*.patch\"]\n _source_subfolder = \"source_subfolder\"\n _build_subfolder = \"build_subfolder\"\n _env_build = None\n short_paths = True\n\n @property\n def _the_os(self):\n return self.settings.get_safe(\"os\") or self.settings.get_safe(\"os_build\")\n\n @property\n def _the_arch(self):\n return self.settings.get_safe(\"arch\") or self.settings.get_safe(\"arch_build\")\n\n @property\n def _is_msvc(self):\n return self.settings.compiler == \"Visual Studio\"\n\n @property\n def _is_mingw(self):\n return self._the_os == \"Windows\" and self.settings.compiler == \"gcc\"\n\n def build_requirements(self):\n if self._the_os == \"Windows\":\n #self.build_requires(\"cygwin_installer/2.9.0@bincrafters/stable\")\n self.build_requires(\"msys2_installer/20161025@bincrafters/stable\")\n\n def source(self):\n version = self.version.replace('.', '-')\n version_with_underscore = self.version.replace('.', '_')\n source_url = \"https://github.com/unicode-org/icu/releases/download/release-{0}/icu4c-{1}-src.tgz\".format(version, version_with_underscore)\n self.output.info(\"Downloading {0} ...\".format(source_url))\n tools.get(source_url,\n sha256=\"627d5d8478e6d96fc8c90fed4851239079a561a6a8b9e48b0892f24e82d31d6c\")\n os.rename(\"icu\", self._source_subfolder)\n\n def _replace_pythonpath(self):\n if self._is_msvc:\n srcdir = os.path.join(self.build_folder, self._source_subfolder, \"source\")\n configure = os.path.join(self._source_subfolder, \"source\", \"configure\")\n tools.replace_in_file(configure,\n 'PYTHONPATH=\"$srcdir/data\"',\n 'PYTHONPATH=\"%s\\\\data\"' % srcdir)\n tools.replace_in_file(configure,\n 'PYTHONPATH=\"$srcdir/test/testdata:$srcdir/data\"',\n 'PYTHONPATH=\"%s\\\\test\\\\testdata;%s\\\\data\"' % (srcdir, srcdir))\n\n def _workaround_icu_20545(self):\n if tools.os_info.is_windows:\n # https://unicode-org.atlassian.net/projects/ICU/issues/ICU-20545\n srcdir = os.path.join(self.build_folder, self._source_subfolder, \"source\")\n makeconv_cpp = os.path.join(srcdir, \"tools\", \"makeconv\", \"makeconv.cpp\")\n tools.replace_in_file(makeconv_cpp,\n \"pathBuf.appendPathPart(arg, localError);\",\n \"pathBuf.append('/', localError); pathBuf.append(arg, localError);\")\n\n def build(self):\n for filename in glob.glob(\"patches/*.patch\"):\n self.output.info('applying patch \"%s\"' % filename)\n tools.patch(base_path=self._source_subfolder, patch_file=filename)\n\n if self._is_msvc:\n run_configure_icu_file = os.path.join(self._source_subfolder, 'source', 'runConfigureICU')\n\n flags = \"-%s\" % self.settings.compiler.runtime\n if self.settings.get_safe(\"build_type\") == 'Debug':\n flags += \" -FS\"\n tools.replace_in_file(run_configure_icu_file, \"-MDd\", flags)\n tools.replace_in_file(run_configure_icu_file, \"-MD\", flags)\n\n self._replace_pythonpath() # ICU 64.1\n self._workaround_icu_20545()\n\n self._env_build = AutoToolsBuildEnvironment(self)\n if not self.options.get_safe(\"shared\"):\n self._env_build.defines.append(\"U_STATIC_IMPLEMENTATION\")\n if tools.is_apple_os(self._the_os):\n self._env_build.defines.append(\"_DARWIN_C_SOURCE\")\n if self.settings.get_safe(\"os.version\"):\n self._env_build.flags.append(tools.apple_deployment_target_flag(self._the_os,\n self.settings.os.version))\n\n build_dir = os.path.join(self.build_folder, self._source_subfolder, 'build')\n os.mkdir(build_dir)\n\n with tools.vcvars(self.settings) if self._is_msvc else tools.no_op():\n with tools.environment_append(self._env_build.vars):\n with tools.chdir(build_dir):\n # workaround for https://unicode-org.atlassian.net/browse/ICU-20531\n os.makedirs(os.path.join(\"data\", \"out\", \"tmp\"))\n\n self.run(self._build_config_cmd, win_bash=tools.os_info.is_windows)\n if self.options.get_safe(\"silent\"):\n silent = '--silent' if self.options.silent else 'VERBOSE=1'\n else:\n silent = '--silent'\n command = \"make {silent} -j {cpu_count}\".format(silent=silent,\n cpu_count=tools.cpu_count())\n self.run(command, win_bash=tools.os_info.is_windows)\n if self.options.get_safe(\"with_unit_tests\"):\n command = \"make {silent} check\".format(silent=silent)\n self.run(command, win_bash=tools.os_info.is_windows)\n command = \"make {silent} install\".format(silent=silent)\n self.run(command, win_bash=tools.os_info.is_windows)\n\n self._install_name_tool()\n\n def package(self):\n if self._is_msvc:\n for dll in glob.glob( os.path.join( self.package_folder, 'lib', '*.dll' ) ):\n shutil.move( dll, os.path.join( self.package_folder, 'bin' ) )\n\n self.copy(\"LICENSE\", dst=\"licenses\", src=os.path.join(self.source_folder, self._source_subfolder))\n\n @staticmethod\n def detected_os():\n if tools.OSInfo().is_macos:\n return \"Macos\"\n if tools.OSInfo().is_windows:\n return \"Windows\"\n return platform.system()\n\n @property\n def cross_building(self):\n if tools.cross_building(self.settings):\n if self._the_os == self.detected_os():\n if self._the_arch == \"x86\" and tools.detected_architecture() == \"x86_64\":\n return False\n return True\n return False\n\n @property\n def build_config_args(self):\n prefix = self.package_folder.replace('\\\\', '/')\n platform = {(\"Windows\", \"Visual Studio\"): \"Cygwin/MSVC\",\n (\"Windows\", \"gcc\"): \"MinGW\",\n (\"AIX\", \"gcc\"): \"AIX/GCC\",\n (\"AIX\", \"xlc\"): \"AIX\",\n (\"SunOS\", \"gcc\"): \"Solaris/GCC\",\n (\"Linux\", \"gcc\"): \"Linux/gcc\",\n (\"Linux\", \"clang\"): \"Linux\",\n (\"Macos\", \"gcc\"): \"MacOSX\",\n (\"Macos\", \"clang\"): \"MacOSX\",\n (\"Macos\", \"apple-clang\"): \"MacOSX\"}.get((str(self._the_os),\n str(self.settings.compiler)))\n arch64 = ['x86_64', 'sparcv9', 'ppc64']\n bits = \"64\" if self._the_arch in arch64 else \"32\"\n args = [platform,\n \"--prefix={0}\".format(prefix),\n \"--with-library-bits={0}\".format(bits),\n \"--disable-samples\",\n \"--disable-layout\",\n \"--disable-layoutex\"]\n\n if self.cross_building:\n if self._env_build.build:\n args.append(\"--build=%s\" % self._env_build.build)\n if self._env_build.host:\n args.append(\"--host=%s\" % self._env_build.host)\n if self._env_build.target:\n args.append(\"--target=%s\" % self._env_build.target)\n\n if self.options.get_safe(\"data_packaging\"):\n args.append(\"--with-data-packaging={0}\".format(self.options.data_packaging))\n else:\n args.append(\"--with-data-packaging=static\")\n\n if self._is_mingw:\n mingw_chost = 'i686-w64-mingw32' if self._the_arch == 'x86' else 'x86_64-w64-mingw32'\n args.extend([\"--build={0}\".format(mingw_chost),\n \"--host={0}\".format(mingw_chost)])\n\n if self.settings.get_safe(\"build_type\") == \"Debug\":\n args.extend([\"--disable-release\", \"--enable-debug\"])\n if self.options.get_safe(\"shared\"):\n args.extend([\"--disable-static\", \"--enable-shared\"])\n else:\n args.extend([\"--enable-static\", \"--disable-shared\"])\n if not self.options.get_safe(\"with_unit_tests\"):\n args.append('--disable-tests')\n return args\n\n @property\n def _build_config_cmd(self):\n return \"../source/runConfigureICU %s\" % \" \".join(self.build_config_args)\n\n def _install_name_tool(self):\n if tools.is_apple_os(self._the_os):\n with tools.chdir(os.path.join(self.package_folder, 'lib')):\n for dylib in glob.glob('*icu*.{0}.dylib'.format(self.version)):\n command = 'install_name_tool -id {0} {1}'.format(os.path.basename(dylib), dylib)\n self.output.info(command)\n self.run(command)\n","sub_path":"icu_base.py","file_name":"icu_base.py","file_ext":"py","file_size_in_byte":9508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"16302025","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pickle\nimport codecs\nimport glob\nimport os\n\nhome = os.getcwd()\n\ndef get_text_list(path):\n list_id = []\n os.chdir(path)\n for file in glob.glob('*.txt'):\n list_id.append(file)\n return list_id\n\ndef get_train_text():\n pos_list = get_text_list('data_train/train/pos')\n os.chdir(home)\n pos_text = []\n for item in pos_list:\n f = codecs.open(''.join(['data_train/train/pos/', item]), 'r', 'utf-8')\n pos_text.append(f.read().replace('\\n', ' '))\n f.close()\n neg_list = get_text_list('data_train/train/neg')\n os.chdir(home)\n neg_text = []\n for item in neg_list:\n f = codecs.open(''.join(['data_train/train/neg/', item]), 'r', 'utf-8')\n neg_text.append(f.read().replace('\\n', ' '))\n f.close()\n del pos_list, neg_list\n return pos_text, neg_text\n\ndef get_test_text():\n pos_list = get_text_list('data_train/test/pos')\n os.chdir(home)\n pos_text = []\n for item in pos_list:\n f = codecs.open(''.join(['data_train/test/pos/', item]), 'r', 'utf-8')\n pos_text.append(f.read().replace('\\n', ' '))\n f.close()\n neg_list = get_text_list('data_train/test/neg')\n os.chdir(home)\n neg_text = []\n for item in neg_list: \n f = codecs.open(''.join(['data_train/test/neg/', item]), 'r', 'utf-8')\n neg_text.append(f.read().replace('\\n', ' '))\n f.close()\n del pos_list, neg_list\n return pos_text, neg_text\n\npos_text, neg_text = get_train_text()\ndata_train = pos_text + neg_text\nos.chdir(home)\npos_text, neg_text = get_test_text()\ndata_test = pos_text + neg_text\nos.chdir(home)\ndel pos_text, neg_text\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\ntfidf_vectorizer = TfidfVectorizer()\nX_train = tfidf_vectorizer.fit_transform(data_train)\nX_test = tfidf_vectorizer.transform(data_test)\ndel data_train, data_test\npickle.dump(X_train, open('train','wb'))\npickle.dump(X_test, open('test','wb'))\ny_train = np.append(np.ones(15000), np.zeros(15000))\n#y_train = y_train.reshape((-1, 1))\ny_test = np.append(np.ones(5000), np.zeros(5000))\n\n#from sklearn.neural_network import MLPClassifier\n#from sklearn.metrics import accuracy_score\n#from sklearn.metrics import confusion_matrix\n#\n#clf = MLPClassifier(hidden_layer_sizes=(100,50,50), alpha=1e-5, max_iter=40,\n# verbose=10, random_state=1, tol=0.000000001)\n#clf.fit(X_train, y_train)\n#y_pred = clf.predict(X_test)\n#print(accuracy_score(y_test, y_pred))\n#cm = confusion_matrix(y_test, y_pred)\n#print(cm)\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom xgboost import XGBClassifier\n\nnames = ['Decision Tree', 'Random Forest', 'Gradient Boosting', 'XGBoost']\nmodels = [\n DecisionTreeClassifier(),\n RandomForestClassifier(n_estimators=100),\n GradientBoostingClassifier(n_estimators=100,\n validation_fraction=0.2,\n n_iter_no_change=5, tol=0.00001),\n XGBClassifier()]\naccuracy = []\nfor name, clf in zip(names, models):\n clf.fit(X_train, y_train)\n score = clf.score(X_test, y_test)\n accuracy.append(score)\n print('{} has accuracy {:.4f}'.format(name, score))\n\n#sns.heatmap(cm, center=True)\n#plt.show()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"155773239","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lilly', '0002_study_category'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='conditions',\n name='condition_term_rev',\n field=models.CharField(help_text=b'condition term words reversed and comma removed to match Study conditions', max_length=1000, null=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='conditions',\n name='condition_term',\n field=models.CharField(max_length=1000),\n preserve_default=True,\n ),\n ]\n","sub_path":"studies/lilly/migrations/0003_auto_20150110_1614.py","file_name":"0003_auto_20150110_1614.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"113321135","text":"from django.urls import path\n\nfrom .views.base import IndexView, get_data_for_chart\nfrom .views.monitoring import MonitoringLhkasnView, get_table_lhkasn, MonitoringLhkpnView, get_table_lhkpn\nfrom .views.verifikator import BebanVerifikatorView, get_table_verifikator\n\napp_name = 'dashboardApp'\n\nurlpatterns = [\n path('ajax/get_data_for_chart/', get_data_for_chart, name='getDataChartUrl'),\n path('ajax/get_table_verifikator/', get_table_verifikator, name='getTableVerifikatorUrl'),\n path('ajax/get_table_lhkasn/', get_table_lhkasn, name='getTableLhkasnUrl'),\n path('ajax/get_table_lhkpn/', get_table_lhkpn, name='getTableLhkpnUrl'),\n\n path('beban-verifikator/', BebanVerifikatorView.as_view(), name='bebanVerifikatorUrl'),\n path('monitoring-lhkasn/', MonitoringLhkasnView.as_view(), name='monitoringLhkasnUrl'),\n path('monitoring-lhkpn/', MonitoringLhkpnView.as_view(), name='monitoringLhkpnUrl'),\n path('', IndexView.as_view(), name='indexUrl'),\n]\n","sub_path":"simpanan_berharga_v2/dashboard/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"270057716","text":"import cv2\nfrom pathlib import Path\n\nfaceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\nvc = cv2.VideoCapture(0)\n\nprint(\"Enter the id and name of the person:\")\nuserId = input()\nuserName = input()\n\ncount = 1\n\ndef saveImage(img, userName, userId, imgId):\n Path(\"dataset/{}\".format(userName)).mkdir(parents=True, exist_ok=True)\n cv2.imwrite(\"dataset/{}/{}_{}.jpg\".format(userName, userId, imgId), img)\n\nwhile True:\n\n _, img = vc.read()\n\n originalImg = img\n\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(gray_img,\n scaleFactor=1.2,\n minNeighbors=5,\n minSize=(50, 50))\n\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n coords = [x, y, w, h]\n\n imS = cv2.resize(img, (960, 540))\n cv2.imshow(\"identified image\", imS)\n\n key = cv2.waitKey(1) & 0xff\n\n if key == ord('s'):\n if count <= 100:\n roi_img = originalImg[coords[1]:coords[1]+coords[3], coords[0]:coords[0]+coords[2]]\n saveImage(roi_img, userName, userId, count)\n count += 1\n else:\n break\n elif key == ord('q'):\n break\n\n if cv2.waitKey(1) & 0xff == ord('q'):\n break\n\nvc.release()\ncv2.destroyAllWindows()\n","sub_path":"generate_dataset.py","file_name":"generate_dataset.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"529224828","text":"import json\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.utils.serializer_helpers import ReturnList\n\nclass RatebumJSONRenderer(JSONRenderer):\n charset = 'utf-8'\n object_label = 'object'\n\n def render(self, data, media_type=None, renderer_context=None):\n # If the view throws an error (such as the user can't be authenticated)\n # `data` will contain an `errors` key. \n # the default JSONRenderer will handle rendering errors\n\n if type(data) is ReturnList:\n return json.dumps({\n self.pagination_object_label: data\n })\n\n if type(data) is dict:\n errors = data.get('errors', None)\n if errors is not None:\n return json.dumps({\n 'errors': data['errors']\n })\n\n return json.dumps({\n self.object_label: data\n })","sub_path":"ratebum/apps/core/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"609165137","text":"# App to get support_action records with specific action keys.\nimport argparse\nimport requests\nimport yaml\nimport json\n\n# Get the login credentials\nparser = argparse.ArgumentParser(description='Read supporters')\nparser.add_argument('--login', dest='loginFile', action='store',\n help='YAML file with login credentials')\nparser.add_argument('--input', dest='keyFile', action='store',\n\t\t help='Backup file of supporter_action_KEY records to delete')\n\nargs = parser.parse_args()\ncred = yaml.load(open(args.loginFile))\n\n# Authenticate\npayload = {\n 'email': cred['email'],\n 'password': cred['password'],\n 'json': True }\ns = requests.Session()\nu = 'https://' + cred['host'] + '/api/authenticate.sjs'\nr = s.get(u, params=payload)\nj = r.json()\nif j['status'] == 'error':\n print('Authentication failed: ', j)\n exit(1)\n\nprint('Authentication: ', j)\n\nf = open(args.keyFile, 'r')\nkeys = [ line.split('\\t')[0] for line in f ]\nf.close()\n\n# We have a backup in the file. That means that we can just whack\n# the records without the API examining them first.\nfor key in keys:\n print(f\"{key}\")\n payload = {'json': True,\n 'object': 'supporter_action',\n 'key': key }\n u = 'https://'+ cred['host'] +'/delete'\n r = s.get(u, params=payload)\n print(f\"{key}: {r.json()}\")\n","sub_path":"supporter_action_cleanup.py","file_name":"supporter_action_cleanup.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"376662421","text":"import math\n\nprint(\"\\nChoose either 'investment' or 'bond' from the menu below to proceed:\\n\\n 1. Investement - to calculate the amount of interest you'll earn on interest.\\n 2. Bond - to calculate the amount you'll have to pay on a home loan.\\n\")\nuser_choice = input(\"Please enter your selection here: \")\n#Initial input from user\nif len(user_choice) == 0:\n print(\"**Error - no selection has been made.**\\n\")\n#Error message\nif user_choice.lower() == \"investment\":\n print(\"\\nPlease answer the following questions:\")\n deposit = float(input(\"\\n\\t 1. Please enter the amount of money that you want to invest: R\"))\n int_rate = float(input(\"\\n\\t 2. Please enter the interest rate: \"))\n years = float(input(\"\\n\\t 3. How many years would like to invest for your money for: \"))\n interest = input(\"\\n\\t 4. Would you like to earn simple or compound interest? \")\n #Investment option input from user \n interest_dec = int_rate / 100\n simple_interest = round(deposit*(1+((interest_dec)*years)), 2) \n compound_interest = round((deposit)*math.pow((1+interest_dec),years), 2) \n#Formulae \n if interest.lower() == \"simple\":\n print(\"\\nThe total amount is: R\" + str(simple_interest))\n#Simple interest option\n if interest.lower() == \"compound\":\n print(\"\\nThe total amount is: R\" + str(compound_interest)) \n#Compound interest option\nif user_choice.lower() == \"bond\":\n print(\"\\nPlease answer the following questions:\")\n house_price = float(input(\"\\n\\t1. Please enter the value of the house: \"))\n house_int_rate = float(input(\"\\n\\t2. Please enter the interest rate: \"))\n months = float(input(\"\\n\\t3. Please enter the loan term in months: \"))\n#Bond option questions\n monthly_interest = house_int_rate / 100 / 12\n bond = round((monthly_interest*house_price) / (1-(1+monthly_interest)**(-months)), 2)\n#Bond option formula\n print(\"\\nYour monthly repayment will be: R\" + str(bond))\n","sub_path":"finance_calculators.py","file_name":"finance_calculators.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"496012352","text":"import os\nimport cv2\nimport numpy\n\ndir = \"C:/Users/jeffp/pytorch-CycleGAN-and-pix2pix/datasets/cervoai_pix2pix_axial\"\ndir_train = dir + \"/train\"\ndir_test = dir + \"/test\"\ndir_val = dir + \"/val\"\ndir_test_fail = dir + \"/test_fail\"\ndir_list = [dir_train, dir_test, dir_val]\nfor directory in dir_list:\n breakpoint()\n for subdir, dirs, files in os.walk(directory):\n for file in files:\n img = cv2.imread(subdir + \"/\" + file)\n if numpy.sum(img) == 0:\n print(subdir + \"/\" + file + \" does not contain an image and will be removed.\")\n os.remove(subdir + \"/\" + file)\n","sub_path":"util/remove_black_images.py","file_name":"remove_black_images.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"39712109","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\nchromeDriver = \"C:\\\\Users\\\\parksoyoung\\\\Downloads\\\\chromedriver_win32\\\\chromedriver.exe\"\ndriver = webdriver.Chrome(chromeDriver)\ndriver.get('https://mobileticket.interpark.com/Goods/GoodsInfo/info?GoodsCode=20001874&is1=ticket&is2=product')\ntime.sleep(3)\n\ndriver.find_element_by_xpath('//*[@id=\"root\"]/div[@class=\"contents\"]/div[@class=\"productsInformation\"]/div[@class=\"productsTabWrap\"]'\n '/*[@id=\"productsTab\"]/ul/li[3]').click()\nelem = driver.find_element_by_tag_name(\"body\")\n\n# Get scroll height\nlast_height = driver.execute_script(\"return document.body.scrollHeight\")\n\nwhile True:\n for i in range(10):\n # Scroll down to bottom\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n time.sleep(30)\n\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n break\n last_height = new_height\n\ntime.sleep(3)\nsource = driver.page_source\nsoup = BeautifulSoup(source, \"html.parser\")\n\ndriver.quit()\n\nsports = soup.find(\"ul\", {\"id\": \"writerInfo\"})\ncomments_li =sports.find_all(\"li\")\nprint(len(comments_li))\n\nresult = pd.DataFrame()\ntitles = []\nreviews = []\nlabels = []\nrates = []\n\nfor li in comments_li:\n #print(li.find(\"div\", {\"class\": \"userBoardTitle\"}).find(\"b\").find(text=True))\n title = li.find(\"div\", {\"class\": \"userBoardTitle\"}).find(\"b\").find(text=True)\n #print(li.find(\"div\", {\"class\": \"boardContentTxt\"}).find(text=True))\n text = li.find(\"div\", {\"class\": \"boardContentTxt\"}).find(text=True)\n #print(li.find(\"div\", {\"class\": \"shareInfo\"}).find(\"div\").get(\"class\"))\n rate = li.find(\"div\", {\"class\": \"shareInfo\"}).find(\"div\").get(\"class\")\n score = int(rate[1][5:])\n #print(score)\n if score >= 8:\n label = 1\n elif score <= 6:\n label = 0\n else:\n continue\n\n titles.append(title)\n reviews.append(text)\n labels.append(label)\n rates.append(score)\n\n\n\nresult['title'] = titles\nresult['review'] = reviews\nresult['label'] = labels\nresult['rating'] = rates\n\nresult.to_csv('sports_auc.txt', encoding=\"utf8\", sep=\"\\t\")","sub_path":"data/crawling/sports_auction.py","file_name":"sports_auction.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"529359140","text":"# -*- coding: utf-8 -*-\n\"\"\"Setup file for the Pigi1300 project.\n\"\"\"\n\nimport codecs\nimport os.path\nimport re\nimport sys\nfrom setuptools import setup, find_packages\n\nversion = None\nfor line in codecs.open(os.path.join('pigi1300', '__init__.py'), 'r', encoding='utf-8'):\n matcher = re.match(r\"\"\"^__version__\\s*=\\s*['\"](.*)['\"]\\s*$\"\"\", line)\n version = version or matcher and matcher.group(1)\n\n# get README content from README.md file\nwith codecs.open(os.path.join(os.path.dirname(__file__), 'README.md'), encoding='utf-8') as fd:\n long_description = fd.read()\n\nentry_points = {u'console_scripts': [u'pigi1300-manage = djangofloor.scripts:manage',\n u'pigi1300-celery = djangofloor.scripts:celery',\n u'pigi1300-gunicorn = djangofloor.scripts:gunicorn']}\n\nsetup(\n name='pigi1300',\n version=version,\n description='No description yet.',\n long_description=long_description,\n author='Matthieu Gallet',\n author_email='mgallet@19pouces.net',\n license='CeCILL-B',\n url='',\n entry_points=entry_points,\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n test_suite='pigi1300.tests',\n install_requires=['djangofloor', 'PyPDF2', 'pybarcode', 'pillow', 'pypng', 'PyQRCode', 'reportlab', 'WeasyPrint'],\n setup_requires=[],\n classifiers=[],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"417578904","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom ..contacts.models import Contact\nfrom .models import OpenHouse\n\n\n@login_required\ndef kiosk_enter(request, house_key):\n open_house = get_object_or_404(OpenHouse, key=house_key)\n return render(request, 'openhouses/enter.html', {'open_house': open_house})\n\n@login_required\ndef kiosk_welcome(request, house_key):\n open_house = get_object_or_404(OpenHouse, key=house_key)\n return render(request, 'openhouses/welcome.html', {'open_house': open_house, 'associate': request.user})\n\n@login_required\ndef kiosk_form(request, house_key, kind):\n open_house = get_object_or_404(OpenHouse, key=house_key)\n assert kind in ['broker', 'buyer']\n\n if 'GET' == request.method:\n return render(request, 'openhouses/form.html', {'open_house': open_house, 'kind': kind})\n elif 'POST' == request.method:\n email = request.POST.get('email_personal')\n email_matches = Contact.objects.filter(email_personal=email)\n if email and email_matches.exists():\n contact = email_matches[0]\n else:\n contact = Contact(owner=request.user, open_house_visit=open_house)\n\n for field in Contact.EDITABLE_FIELDS:\n value = request.POST.get(field)\n if value:\n setattr(contact, field, value)\n\n agent_data = {k.split('[')[1].strip(']'): v for k,v in request.POST.items() if k.startswith('agent[')}\n if 'first_name' in agent_data and 'last_name' in agent_data:\n agent_data['name'] = \"{} {}\".format(agent_data['first_name'], agent_data['last_name'])\n\n agent_data = filter_fields(agent_data, ContactTeamMember.EDITABLE_FIELDS)\n mortgage_data = {k.split('[')[1].strip(']'): v for k,v in request.POST.items() if k.startswith('mortgage[')}\n mortgage_data = filter_fields(mortgage_data, ContactTeamMember.EDITABLE_FIELDS)\n\n if len(agent_data):\n contact.team_member_set.create(**agent_data)\n\n if len(mortgage_data):\n contact.team_member_set.create(**mortgage_data)\n\n contact.mortage_qualified = request.POST.get('mortgage_qualified') == 'true'\n contact.save()\n\n\n return redirect(open_house.kiosk_url())\n\n\ndef send_customer_email(contact, open_house):\n pass\n","sub_path":"backend/hltpy/openhouses/kiosk_views.py","file_name":"kiosk_views.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"15905765","text":"import discord\nfrom discord.ext import commands\nfrom random import randint\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport datetime\n\nprint('Discord version:', discord.__version__)\n\nprefix = '!'\n\nwith open('token_test_bot.txt', 'r') as file:\n\ttoken = file.readline()\n\nbot = commands.Bot(command_prefix=prefix)\n\nstart_time = datetime.datetime.now()\n\n@bot.event\nasync def on_ready():\n\tprint('Bot is ready!')\n\n\n@bot.command(pass_context=True)\nasync def weer(ctx, *, msg=None):\n\t'''Toont het weerbericht'''\n\n\tplaatsen_dict = {\n\t\t\"haasrode\": \"http://www.meteo-info.be/nl/europa/belgie/weer-haasrode/details/N-2733974/\",\n\t\t\"leuven\": \"http://www.meteo-info.be/nl/europa/belgie/weer-leuven/details/N-2739976/\",\n\t\t\"oostende\": \"http://www.meteo-info.be/nl/europa/belgie/weer-oostende/details/N-2743704/\",\n\t}\n\n\tte_kiezen_plaatsen = [plaats for plaats in plaatsen_dict]\n\n\tif msg not in te_kiezen_plaatsen:\n\t\tplaatsen_str = ' | '.join(te_kiezen_plaatsen)\n\t\tawait bot.say('Gebruik: `!weer { ' + plaatsen_str + ' }`')\n\n\telse:\n\t\tbezig = await bot.say('Bezig...')\n\n\t\thtml = requests.get(plaatsen_dict[msg]).text[15100:17500]\n\n\t\tsoup = BeautifulSoup(html, features=\"html.parser\")\n\t\tdiv_text = soup.find(\"div\", {\"id\": \"weather-detail-summary\"}).getText()\n\n\t\tsearch_str = 'Gem. wind: (.*) km/h\\n.*Rel. luchtvochtigheid: (.*) %\\n\\n\\n(.*)'\n\t\tm = re.search(search_str, div_text)\n\n\t\twindsnelheid, luchtvochtigheid, temperatuur = m.group(1), m.group(2), m.group(3)\n\n\t\tembed = discord.Embed(title='Weerbericht',\n\t\t\t\t\t\t\t color=randint(0, 0xffffff),\n\t\t\t\t\t\t\t description='Het weer in ' + msg)\n\t\t# color=discord.Color.green()\n\n\t\tembed.set_author(name=bot.user.name, icon_url=bot.user.avatar_url)\n\n\t\tmsg_author = ctx.message.author\n\n\t\tavatar_url = msg_author.avatar_url\n\t\tif not avatar_url:\n\t\t\tavatar_url = msg_author.default_avatar_url\n\t\tembed.set_thumbnail(url=avatar_url)\n\n\t\tembed.add_field(name='Gemiddelde windsnelheid', value=windsnelheid + ' km/h', inline=False)\n\t\tembed.add_field(name='Relatieve luchtvochtigheid', value=luchtvochtigheid + ' %', inline=False)\n\t\tembed.add_field(name='Temperatuur', value=temperatuur, inline=False)\n\t\tembed.add_field(name='Tijd', value=str(datetime.datetime.now().replace(microsecond=0)), inline=False)\n\n\t\tembed.set_footer(text='Gevraagd door ' + msg_author.display_name)\n\n\t\tawait bot.delete_message(bezig)\n\t\tawait bot.say(embed=embed)\n\t\tawait bot.send_message(msg_author, embed=embed)\n\n\n@bot.command()\nasync def uptime():\n\tawait bot.say('Uptime: ' + str(datetime.datetime.now().replace(microsecond=0) - start_time.replace(microsecond=0)))\n\n\nbot.run(token)\n","sub_path":"weer_v1.py","file_name":"weer_v1.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"576107694","text":"ID_PERSISTS = 1\nID_CHANGES_AND_PERSISTS = 2\nID_RESETS = 3\n\ndef main(request, response):\n response.headers.set(\"Content-Type\", \"text/event-stream\")\n try:\n test_type = int(request.GET.first(\"type\", ID_PERSISTS))\n except:\n test_type = ID_PERSISTS\n\n if test_type == ID_PERSISTS:\n return \"id: 1\\ndata: 1\\n\\ndata:2\\n\\n\"\n\n elif test_type == ID_CHANGES_AND_PERSISTS:\n return \"id: 1\\ndata: 1\\n\\nid: 2\\ndata:2\\n\\ndata:3\\n\\n\"\n\n elif test_type == ID_RESETS:\n return \"id: 1\\ndata: 1\\n\\nid:\\ndata:2\\n\\ndata:3\\n\\n\"\n\n else:\n return \"data: invalid_test\\n\\n\"\n","sub_path":"eventsource/resources/last-event-id2.py","file_name":"last-event-id2.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"214964187","text":"#!/usr/bin/env python3\n\ndef fun1(s):\n if len(s) > 8:\n return True\n else:\n return False\n\ndef fun2(s):\n num1 = 0\n num2 = 0\n num3 = 0\n num4 = 0\n for ss in s:\n if 'a'<=ss<='z':\n num1 = 1\n elif 'A' <=ss<='Z':\n num2 = 1\n elif '0'<=ss<='9':\n num3 = 1\n else:\n num4 = 1\n\n if (num1+num2+num3+num4) >= 3:\n return True\n else:\n return False\n\ndef fun3(s):\n for i in range(len(s)-3):\n if s[i:i+3] in s[i+1:]:\n return False\n break\n return True\n\nwhile True:\n try:\n a = input()\n if fun1(a) and fun2(a) and fun3(a):\n print('OK')\n else:\n print('NG')\n\n except:\n break\n","sub_path":"huawei_jishi/20_jiandanmima.py","file_name":"20_jiandanmima.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"491840672","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\nfrom sklearn.linear_model import LogisticRegression\n\ndf = pd.read_csv('C:\\\\Users\\\\Kanishk Wadhwa\\\\MLAPP\\\\trainloan.csv')\nfor column in ['Gender','Married','Dependents','Self_Employed','Loan_Amount_Term','Credit_History']:\n df[column].fillna(df[column].mode()[0],inplace=True)\n\ndf['LoanAmount']=df['LoanAmount'].fillna(df['LoanAmount'].dropna().mean())\ndf['Dependents'] = df['Dependents'].str.rstrip('+')\ndf['Gender'] = df['Gender'].map({'Female':0,'Male':1}).astype(np.int)\ndf['Married'] = df['Married'].map({'No':0, 'Yes':1}).astype(np.int)\ndf['Education'] = df['Education'].map({'Not Graduate':0, 'Graduate':1}).astype(np.int)\ndf['Self_Employed'] = df['Self_Employed'].map({'No':0, 'Yes':1}).astype(np.int)\ndf['Loan_Status'] = df['Loan_Status'].map({'N':0, 'Y':1}).astype(np.int)\ndf['Dependents'] = df['Dependents'].astype(np.int)\n\narray =df.values\n\nX=array[:,6:10]\nX=X.astype('int')\ny=array[:,12]\ny=y.astype('int')\n#X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0)\n\nlr=LogisticRegression()\nlr.fit(X,y)\n\npickle.dump(lr, open('model.pkl','wb'))\n\nmodel = pickle.load(open('model.pkl','rb'))","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"653019951","text":"import wx\nimport sys\nimport praw\nimport csv\nimport os\nimport time\nimport json\n\nclass MyFrame(wx.Frame):\n \"\"\"A class with two boxes, a button and a statictext\"\"\"\n def __init__(self, parent):\n \"\"\"constructor\"\"\"\n wx.Frame.__init__(self, parent, -1, 'Easy Corpus Compiler 0.01', size=(350, 200))\n self.panel = wx.Panel(self)\n\n self.gtext = wx.TextCtrl(self.panel, -1, size =(200,-1), value=\"subreddit goes here\")\n button = wx.Button(self.panel,wx.ID_ANY, label=\"enter\", size=(100, 50))\n self.Bind(wx.EVT_BUTTON, self.yesitstrue, button)\n self.stext = wx.StaticText(self.panel,wx.ID_ANY, label='Ready')\n\n siz = wx.BoxSizer(wx.VERTICAL)\n\n siz.Add(self.gtext,1)\n siz.Add(button,1)\n siz.Add(self.stext,1)\n self.panel.SetSizer(siz)\n\n def yesitstrue(self, event):\n try:\n config = json.loads(open('config.json').read())\n pass\n except:\n dlg2 = wx.MessageDialog(self,'Please make sure that you have the config.json file present in the same folder',wx.OK)\n result2 = dlg2.ShowModal()\n dlg2.Destroy()\n if result2 == wx.ID_OK:\n self.Destroy()\n username = config['username']\n password = config['password'] \n useragent = config['userAgent']\n #login to reddit\n r = praw.Reddit(useragent)\n r.login(username, password)\n self.stext.SetLabel('Logging into Reddit.....')\n #Change to selected location\n #os.chdir(sav)\n #Folder to contain the files\n if not os.path.exists('corpus'):\n os.makedirs('corpus')\n \n \n #Get subreddit\n subrdt = self.gtext.GetValue()\n #raw_input('Please enter a subreddit to collect comments from: ')\n #set up lists\n sublist = []\n comlist = []\n #get hot submissions\n subreddit = r.get_subreddit(subrdt)\n posts = subreddit.get_hot()\n for submission in posts:\n sublist.append(submission.id)\n #set number of loops\n missionlist = len(sublist)\n mlst = str(missionlist)\n self.stext.SetLabel('There are '+mlst+' hot submissions in that subreddit.')\n #define starting point for using the list\n i = 0\n if missionlist > 100:\n missionlist = 100\n self.stext.SetLabel('That\\'s too many, I am going to parse 100 instead.')\n def cleanUp(text):\n alpha = text.encode('utf-8')\n return alpha\n \n while i < missionlist:\n submission = r.get_submission(submission_id=sublist[i])\n submission.replace_more_comments(limit=16, threshold=10)\n flat_comments = praw.helpers.flatten_tree(submission.comments)\n pstitle = submission.title.encode('ascii',errors='ignore')\n self.stext.SetLabel('Now processing: '+pstitle)\n stitle = (((submission.title.encode('ascii',errors='ignore')).replace(' ','_')).replace('\"','')).replace('/','_')\n ftitle = 'corpus/'+stitle[:40]\n file = open('corpus/'+stitle[:40]+'.csv','a')\n writer = csv.writer(file)\n writer.writerow(['Karma','Comment Body','Comment ID'])\n for comment in flat_comments: \n comtitle = comment.id\n if comment.id not in comlist:\n cleancomment = cleanUp(comment.body)\n comlist.append(comment.id)\n cleanscore = str(comment.score)\n writer.writerow([cleanscore,cleancomment,comtitle])\n file.close()\n i+=1\n time.sleep(10)\n \n self.stext.SetLabel('Done')\n\n\nif __name__ == '__main__':\n app = wx.App()\n frame = MyFrame(None)\n frame.Show()\n app.MainLoop()","sub_path":"EasyCorpusCompiler.Mac.py","file_name":"EasyCorpusCompiler.Mac.py","file_ext":"py","file_size_in_byte":3841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"43547126","text":"from titus_isolate import log\nfrom titus_isolate.model.processor.utils import get_workload_ids\n\n\ndef get_updates(cur_cpu, new_cpu):\n updates = {}\n for workload_id in get_workload_ids(new_cpu):\n new_thread_ids = __get_threads(new_cpu, workload_id)\n cur_thread_ids = __get_threads(cur_cpu, workload_id)\n if set(new_thread_ids) != set(cur_thread_ids):\n log.info(\"workload: '{}' updated threads from: '{}' to: '{}'\".format(workload_id, cur_thread_ids, new_thread_ids))\n updates[workload_id] = new_thread_ids\n\n return updates\n\n\ndef __get_threads(cpu, workload_id):\n return [t.get_id() for t in cpu.get_threads() if workload_id in t.get_workload_ids()]\n","sub_path":"titus_isolate/isolate/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"598288895","text":"__author__ = 'Caleytown'\n\nimport numpy as np\nfrom random import randint\nimport random\nfrom networkFolder.functionList import Map, WorldEstimatingNetwork, DigitClassificationNetwork\n\n# Create the world estimating network\nuNet = WorldEstimatingNetwork()\n\n# Create the digit classification network\nclassNet = DigitClassificationNetwork()\n\n\ndef get_goal(digit):\n \"\"\"\n Returns a tuple containing\n - the goal location based on the digit\n \"\"\"\n goals = [(0, 27), (27, 27), (27, 0)]\n if digit in range(0, 3):\n goal = goals.pop(0)\n elif digit in range(3, 6):\n goal = goals.pop(1)\n elif digit in range(6, 10):\n goal = goals.pop(2)\n else:\n raise ValueError(\"Bad digit input: \" + str(digit))\n return goal\n\n\ndef compute_distance(pos1, pos2, which_dist='manhattan'):\n if which_dist == 'manhattan':\n # Manhattan distance\n dist = abs(pos1[0] - pos2[0]) + abs(pos1[1] - pos2[1])\n else:\n # Euclidean distance\n squared_dist = (pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2\n dist = np.sqrt(squared_dist)\n return dist\n\n\ndef entropy(p):\n # Compute the entropy of a probability distribution p\n log_p = np.log2(p)\n return - np.dot(p, log_p)\n\n\ndef softmax(p):\n # p: probability distribution\n return np.exp(p) / sum(np.exp(p))\n\n\ndef get_adjacent_states(position, image=None):\n \"\"\"\n returns the adjacent states to the current position\n Args:\n image: map\n position: current position of the robot\n\n Returns: dictionary of coordinates (values) of adjacent states and direction leading to those states (keys)\n\n \"\"\"\n neighbors = {}\n pos_left = [position[0] - 1, position[1]]\n pos_right = [position[0] + 1, position[1]]\n pos_up = [position[0], position[1] - 1]\n pos_down = [position[0], position[1] + 1]\n for direction, coordinates in zip(['left', 'right', 'down', 'up'], [pos_left, pos_right, pos_down, pos_up]):\n if coordinates[0] < 0 or coordinates[0] > 27 or coordinates[1] < 0 or coordinates[1] > 27:\n continue\n else:\n neighbors[direction] = coordinates\n return neighbors\n\n\ndef get_neighboring_pixels0(image, position):\n \"\"\"\n retrieve the value of the surrounding pixels at location 'position' given the map\n \"\"\"\n pixel_values = -10000 * np.ones((1, 4)).ravel() # if the position is out of the map, return -1\n\n pos_left = [position[0] - 1, position[1]]\n pos_right = [position[0] + 1, position[1]]\n pos_up = [position[0], position[1] - 1]\n pos_down = [position[0], position[1] + 1]\n for i, pos in enumerate([pos_left, pos_right, pos_down, pos_up]):\n if pos[0] < 0 or pos[0] > 27 or pos[1] < 0 or pos[1] > 27:\n continue\n else:\n pixel_values[i] = image[pos[0], pos[1]]\n return pixel_values, [pos_left, pos_right, pos_down, pos_up]\n\n\ndef get_neighboring_pixels(image, neighbors_position):\n \"\"\"\n retrieve the value of the surrounding pixels at location 'position' given the map\n \"\"\"\n pixel_values = {}\n for action in neighbors_position:\n pos = neighbors_position[action]\n # print('pos:', neighbors_position)\n pixel_values[action] = image[pos[0], pos[1]]\n return pixel_values\n\n\nclass InformedNavigator:\n def __init__(self):\n # The random navigator doesn't have any data members\n # But a more complex navigator may need to keep track of things\n # so you can create data members in this constructor\n # self.my_variable = 0\n\n # initialiaze the entropy to one to signal maximum uncertainty in the beginning\n self.better_goal_loc = None\n self.visited_locations = set()\n self.visited_locations.add((0, 0))\n self.directions = ['left', 'right', 'down', 'up']\n self.path = []\n self.alpha = 0.6\n pass\n\n def getAction(self, robot, map):\n \"\"\" Randomly selects a valid direction for the robot to travel\n\n The RandomNavigator completely ignores the incoming map of what has been seen so far.\n Maybe a smarter agent would take this additional info into account...\n \"\"\"\n\n # This loop shows how you can create a mask, an grid of 0s and 1s\n # where 0s represent unexplored areas and 1s represent explored areas\n # This mask is used by the world estimating network\n mask = np.zeros((28, 28))\n for col in range(0, 28):\n for row in range(0, 28):\n if map[col, row] != 128:\n mask[col, row] = 1\n\n # Creates an estimate of what the world looks like\n image = uNet.runNetwork(map, mask)\n\n # Use the classification network on the estimated image\n # to get a guess of what \"world\" we are in (e.g., what the MNIST digit of the world)\n char = classNet.runNetwork(image).ravel()\n output_dist = softmax(char)\n\n robot_loc = robot.getLoc()\n neighbors = get_adjacent_states(robot_loc)\n # neighbors_pixel, neighbors_position = get_neighboring_pixels(image, robot_loc)\n neighbors_pixel = get_neighboring_pixels(image, neighbors)\n # info_gain = np.zeros((1, 4))\n\n self.path.append(robot_loc)\n\n if self.better_goal_loc is not None:\n goal_loc = self.better_goal_loc\n else:\n goal_loc = get_goal(np.argmax(output_dist))\n # print(f'predicted number: {np.argmax(output_dist)} -- goal state returned: {goal_loc}')\n\n direction = None\n\n print('max probability:', max(output_dist))\n\n if max(output_dist) >= 0.40:\n alpha = 0\n else:\n alpha = self.alpha\n\n # elif self.nbr_steps % 15 == 0:\n # self.alpha = self.alpha - self.alpha * 0.05\n\n max_total_cost = -np.inf\n for action in neighbors_pixel.keys():\n info_qual = abs(image[robot_loc[0], robot_loc[1]] - neighbors_pixel[action])\n cost = compute_distance(goal_loc, neighbors[action])\n neighbor_cost = alpha * info_qual - (1 - alpha) * cost\n if neighbor_cost >= max_total_cost:\n max_total_cost = neighbor_cost\n direction = action\n new_pos = neighbors[action]\n else:\n continue\n\n # If it is not a valid move, reset\n if not robot.checkValidLoc(new_pos[0], new_pos[1]) or tuple(new_pos) in self.visited_locations:\n direction = None\n\n if direction is None:\n potential_actions = list(neighbors.keys())\n direction = random.choice(potential_actions)\n\n self.visited_locations.add(tuple(neighbors[direction]))\n return direction\n\n def reset(self):\n self.better_goal_loc = None\n self.visited_locations = set()\n self.visited_locations.add((0, 0))\n self.current_entropy = 1 # np.ones((1, 4))\n self.directions = ['left', 'right', 'down', 'up']\n self.path = []\n","sub_path":"InformedNavigator.py","file_name":"InformedNavigator.py","file_ext":"py","file_size_in_byte":6985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"257817802","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nx_rough= np.loadtxt(\"x.out\")\ny_rough= np.loadtxt(\"y.out\")\nlen_arrays= 2*len(x_rough) + 4 #(4 corners + 4 points on the edge of the fault zone )\nheight_of_domain= 40 #(km)\n\n\n # working on the node part of dynosol 2d\n\nx = np.zeros(len_arrays)\ny= np.zeros(len_arrays)\n\nx[0]= 0.0 \t\t\t\t\t\t\t\t\t\t\t \t\t# top left corner\nx[1]= x_rough[len(x_rough)-1]\t\t\t\t\t\t\t\t\t\t\t\t\t# top right corner\n\nx[2]= x_rough[0]\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# upper center left corner\nx[3:3+len(x_rough)-2]= x_rough[1: (len(x_rough)-1 )] \t\t\t\t\t\t\t\t# upper level of body of rough fault\nx[3+len(x_rough)-2]= x_rough[len(x_rough)-1]\t\t\t\t\t\t\t\t\t# upper center right corner\t\n\nx[3+len(x_rough)-2+ 1]=\tx_rough[0]\t\t\t\t\t\t\t\t\t\t\t\t# lower center left corner\nx[3+len(x_rough)-2+ 2 : 3+ 2* (len(x_rough)-2) +2 ]= x_rough[1:len(x_rough)-1] \t\t# lower level of body of rough fault\nx[3+ 2* (len(x_rough)-2) +2] = x_rough[len(x_rough)-1] \t\t\t\t\t \t\t# lower center right corner\n\nx[3+ 2* (len(x_rough)-2) +3] = 0.0 \t\t\t\t\t\t\t\t\t\t\t# lower left corner\nx[3+ 2* (len(x_rough)-2) +4] = x_rough[len(x_rough)-1] \t\t\t\t\t\t\t# lower right corner\n\n\n\ny[0]= height_of_domain \t\t\t\t\t\t\t\t\t\t# top left corner\ny[1]= height_of_domain\t\t\t\t\t\t\t\t\t\t\t\t\t\t# top right corner\n\ny[2]= 1.0 + y_rough[0] \t\t\t\t\t\t\t\t\t\t\t\t\t\t# upper center left corner\ny[3:3+len(x_rough)-2]= 1.0 + y_rough[1: (len(x_rough)-1 )] \t\t\t\t \t\t# upper level of body of rough fault\ny[3+len(x_rough)-2]= 1.0 + y_rough[len(x_rough)-1]\t\t\t\t\t\t\t\t# upper center right corner\t\n\ny[3+len(x_rough)-2+ 1]=\ty_rough[0] -1.0 \t\t\t\t\t\t\t\t\t\t\t\t\t# lower center left corner\ny[3+len(x_rough)-2+ 2 : 3+ 2* (len(x_rough)-2) +2 ]= y_rough[1:len(x_rough)-1] -1.0 \t\t\t\t\t# lower level of body of rough fault\ny[3+ 2* (len(x_rough)-2) +2] = y_rough[len(x_rough)-1] -1.0 \t\t\t\t\t\t\t\t\t# lower center right corner\n\ny[3+ 2* (len(x_rough)-2) +3] = 0.0 \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t # lower left corner\ny[3+ 2* (len(x_rough)-2) +4] = 0.0 \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# lower right corner\n\n# np.savetxt('x_values.txt', x)\n# np.savetxt('y_values.txt', y)\nx= x*1000.0 # convert to km\ny=y*1000.0\t\t\t\t \n \n\n# working on the element part of dynosol 2d\n\nelement= np.zeros(len_arrays+2)\npj0=\tnp.zeros(len_arrays+2)\npj1=\tnp.zeros(len_arrays+2)\nboundary_flag= np.zeros(len_arrays+2)\n\n\n\n\n# left boundary from top to bottom ----------------------\npj0[0]= 0 \npj1[0]=2\nboundary_flag[0]= 1\n\npj0[1]= 2 \npj1[1]=3+len(x_rough)-2+ 1\nboundary_flag[1]= 1\n\npj0[2]=3+len(x_rough)-2+ 1\npj1[2]=3+ 2* (len(x_rough)-2) +3\nboundary_flag[2]= 1\n\n# Bottom boundary ----------------------\n\npj0[3]= 3+ 2* (len(x_rough)-2) +3\npj1[3]= 3+ 2* (len(x_rough)-2) +4\nboundary_flag[3]= 16\n\n# Right boundary from bottom to top----------------------\npj0[4]= 3+ 2* (len(x_rough)-2) +4\npj1[4]= 3+ 2* (len(x_rough)-2) +2\nboundary_flag[4]= 2\n\npj0[5]= 3+ 2* (len(x_rough)-2) +2\npj1[5]= 3+len(x_rough)-2\nboundary_flag[5]= 2\n\npj0[6]= 3+len(x_rough)-2\npj1[6]= 1\nboundary_flag[6]= 2\n\n# Top boundary ----------------------\npj0[7]= 1\npj1[7]= 0\nboundary_flag[7]= 32\n\n# Not a boundary-- Body segments -------------------\n\n\n\nfor ii in range(len(x_rough)-1):\n\tnext_value= 8+ ii\n\tpj0[8+ ii]= 2+ii\n\tpj1[8+ii] = 2+ii +1 \n\tboundary_flag[8+ii] = 0\n\nfor ii in range(len(x_rough)-1):\t\n\tpj0[next_value+1+ii] = 3+len(x_rough)-2+ 1 + ii\n\tpj1[next_value+1+ii] = 3+len(x_rough)-2+ 1 + ii +1\n\tboundary_flag[next_value+1+ii] = 0 \n\n\n#Write output\nf = open('coupling_input.poly','w')\n\n#need to write this part\n# npoints ndims 0 0\n# 13 2 0 0\nf.write('# input file for dynosol\\n')\nf.write('# \\n')\nf.write(\"{} {} {} {}\\n\".format( '#npoints' , 'ndims', '0', '0' ) )\nf.write(\"{} {} {} {}\\n\".format( len_arrays , 2, 0, 0 ) )\nf.write(\"{} {} {} \\n\".format( '#i' , 'xi', 'yi' ) )\nfor i in range(len_arrays):\n\n\tf.write(\"{} {:E} {:E}\\n\".format(i , x[i], y[i] ) ) \n\n## nsegments 1\n# 16 1\nf.write('# segments\\n')\nf.write(\"{} {}\\n\".format( '#nsegments' , '1' ) )\nf.write(\"{:d} {:d}\\n\".format( len_arrays+2 , 1 ) )\nf.write(\"{} {} {} {}\\n\".format( '#i' , 'pj0', 'pj1', 'boundary_flag' ) )\n\n\nfor i in range(len_arrays+2):\n\tpj_0= int ( pj0[i] )\n\tpj_1 =int ( pj1[i] )\n\tboundary= int( boundary_flag[i] )\n\tf.write(\"{} {:d} {:d} {:d}\\n\".format(i , pj_0, pj_1, boundary ) ) \n#f.write('# author='+author+'\\n')\n\nf.write('# #### holes, must be 0 ####\\n')\nf.write(\"{:d}\\n\".format( 0 ) )\n\nf.write('#### regions ####\\n')\nf.write('# nregions\\n')\nf.write( \"{}\\n\".format(3) )\nf.write(\"{} {} {} {}\\n\".format( '#k' , 'xk', 'yk', 'mattype', 'size' ) )\n\n# Working on the regions part --------------------------------------\n\n\nzones_1_x = 40000.0 \nzones_1_y= 10000.0\nelement_size_1= 400000.0\nelement_type_1= 0\n\nzones_2_x = 0.0\n#zones_2_x = x_rough[1] *1000 +500\nzones_2_y= 20100.0\n#zones_2_y = y_rough[1]*1000 +500.0\nelement_size_2 = 10000.0\nelement_type_2= 1\n\nzones_3_x = 40000.0\nzones_3_y= 30000.0\nelement_size_3= 400000.0\nelement_type_3= 0\n\nf.write(\"{} {:E} {:E} {:d} {:E}\\n\".format(0 , zones_1_x, zones_1_y, element_type_1, element_size_1 ) )\nf.write(\"{} {:E} {:E} {:d} {:E}\\n\".format(1 , zones_2_x, zones_2_y, element_type_2, element_size_2 ) )\nf.write(\"{} {:E} {:E} {:d} {:E}\\n\".format(2 , zones_3_x, zones_3_y ,element_type_3, element_size_3) )\n\nf.close()\n\n\n\n\n\n\n","sub_path":"examples/input_file_script.py","file_name":"input_file_script.py","file_ext":"py","file_size_in_byte":5202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"528197482","text":"#\n# (c) UWA, The University of Western Australia\n# M468/35 Stirling Hwy\n# Perth WA 6009\n# Australia\n#\n# Copyright by UWA, 2012-2015\n# All rights reserved\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston,\n# MA 02111-1307 USA\n#\n\"\"\"\nPopulate the queue\n\"\"\"\nimport multiprocessing\nfrom helpers.logging_helper import use_multiprocessor_logging, get_logger\nfrom helpers.multiprocessing_helper import Consumer\n\nuse_multiprocessor_logging()\n\nimport argparse\nimport sys\nfrom boto.sqs.message import Message\nfrom helpers.sqs_helper import SqsHelper\n\nLOG = get_logger(__name__)\nLOG.info('PYTHONPATH = {0}'.format(sys.path))\n\nNUMBER_PROCESSES = 10\nSEGMENT = 100000 / NUMBER_PROCESSES\n\n\nclass TaskAdd(object):\n def __init__(self, range_id, args):\n self.range_id = range_id\n self.args = args\n\n def __call__(self):\n # noinspection PyBroadException\n try:\n sqs_helper = SqsHelper('us-east-1')\n queue = sqs_helper.get_queue(self.args.queue_name)\n\n for galaxy_id in range(self.range_id * SEGMENT, (self.range_id * SEGMENT) + SEGMENT):\n if self.args.verbosity >= 1 or galaxy_id % 100 == 0:\n LOG.info('Adding {0}'.format(galaxy_id))\n\n message = Message()\n message.set_body('{0}'.format(galaxy_id))\n queue.write(message)\n\n except Exception:\n LOG.exception('Task died')\n\n def __str__(self):\n return 'Adding {0}'.format(self.range_id)\n\n\ndef add_ids_to_queue(args):\n # Create the queue\n queue = multiprocessing.JoinableQueue()\n\n consumers = [Consumer(queue)\n for i in xrange(NUMBER_PROCESSES)]\n\n for consumer in consumers:\n consumer.start()\n\n if args.verbosity >= 1:\n LOG.info('Adding elements')\n\n for range_id in range(0, NUMBER_PROCESSES):\n queue.put(TaskAdd(range_id, args))\n\n # Add a poison pill for each consumer\n for i in xrange(NUMBER_PROCESSES):\n queue.put(None)\n\n # Wait for the queue to terminate\n queue.join()\n\n\ndef main():\n parser = argparse.ArgumentParser('Add galaxy ids to the queue')\n parser.add_argument(\n \"-v\",\n \"--verbosity\",\n action=\"count\",\n default=0,\n help=\"increase output verbosity\")\n parser.add_argument('queue_name', help='the queue to load')\n args = parser.parse_args()\n add_ids_to_queue(args)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/add_galaxy_ids_to_queue.py","file_name":"add_galaxy_ids_to_queue.py","file_ext":"py","file_size_in_byte":3145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"344605608","text":"import sys\nimport numpy as np\nimport PDB_tools\n\nif __name__ == '__main__':\n data = np.array([])\n reader = PDB_tools.PDBreader(sys.argv[1])\n data = reader.getdata()\n posi = PDB_tools.Calc_data(data)\n positions = np.array([])\n positions = posi.getpositions()\n\n one_position = np.array([])\n ans = np.array([])\n counter = 0\n tmp = 0\n for i in range(len(data)):\n l = data[i].split()\n if l[0] == \"ATOM\":\n if int(l[1]) - tmp < 1 or i == len(data)-2:\n print(l[1],tmp)\n one_position = np.reshape(one_position,(int(tmp),3))\n calc = PDB_tools.Calc_data(one_position)\n ans = calc.calc_centerpositions()\n print(ans)\n one_position = np.array([])\n one_position = np.append(one_position,positions[counter])\n counter += 1\n tmp = int(l[1])\n\n #print(one_position)\n","sub_path":"cal_centerposition.py","file_name":"cal_centerposition.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"446908508","text":"import cv2\nimport sys\n\n\nwebcam = cv2.VideoCapture(0) #Use camera 0\nface = cv2.CascadeClassifier('train_face.xml')\n\nwhile True:\n ret,frame = webcam.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = face.detectMultiScale(frame,1.3,5)\n padding = 10\n\n for (x, y, w, h) in faces:\n cv2.rectangle(frame,(x-padding, y-padding), (x+w+padding, y+h+padding), (255, 0, 0), 2)\n sub_face = frame[y:y+h+padding, x:x+w+padding]\n #sub_face = cv2.cvtColor(sub_face, cv2.COLOR_BGR2GRAY)\n FaceFileName = \"facesCapture/face_\" + str(y) + \".jpg\"\n cv2.imwrite(FaceFileName,sub_face)\n cv2.imshow(\"Detecting and storing face\",frame)\n if cv2.waitKey(1)== ord('q'):\n break\nwebcam.release()\ncv2.destroyAllWindows()\n\n\n\n \n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"415128550","text":"import time\nimport numpy as np\nfrom pyquaternion import Quaternion\n\n\ndef tf(position):\n pos, orn = position\n pos = np.array(pos)\n if not isinstance(orn, Quaternion):\n orn = Quaternion(orn[3], *orn[:3])\n return pos, orn\n\n\nclass KinematicConstraint(object):\n def __init__(self, base_position, child_position):\n base_pos, base_orn = tf(base_position)\n child_pos, child_orn = tf(child_position)\n pos = base_orn.inverse.rotate(child_pos - base_pos)\n orn = base_orn.inverse * child_orn\n self._constraint = pos, orn\n\n def get_child(self, base_position):\n base_pos, base_orn = tf(base_position)\n pos, orn = self._constraint\n pos = base_pos + base_orn.rotate(pos)\n orn = base_orn * orn\n return pos, orn\n\n\nclass Rate(object):\n def __init__(self, time_step):\n self._time_step = time_step\n self._next_time = time.time() + time_step\n\n def sleep(self):\n t = time.time()\n if t < self._next_time:\n time.sleep(self._next_time - t)\n else:\n time.sleep(1e-6)\n self._next_time += self._time_step","sub_path":"mime/agent/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"478720487","text":"import argparse\nimport os\nimport shutil\nimport logging\nimport time\nimport pickle\nimport random\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom models import MySTCNN, MyC3D\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom utils import AverageMeter, accuracy, Bar\n\nparser = argparse.ArgumentParser()\n\n# datasets\nparser.add_argument('-d', '--dataset', default='casme2', type=str)\n# parser.add_argument('--dataset-path', default='dataset/CASME2_224_15frames.pickle')\nparser.add_argument('--dataset-path', default='dataset/CASME2_BGR_224_15frames.pickle')\nparser.add_argument('-f', '--folds', default=10, type=int, help='k-folds cross validation')\n\n# optimization options\nparser.add_argument('--epochs', default=20, type=int, metavar='N',\n help='number of total epochs to run')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N',\n help='manual epoch number (useful on restarts)')\nparser.add_argument('--train_batch', default=2, type=int, metavar='N',\n help='train batchsize')\nparser.add_argument('--test-batch', default=2, type=int, metavar='N',\n help='test batchsize')\nparser.add_argument('--lr', '--learning-rate', default=0.0001, type=float,\n metavar='LR', help='initial learning rate')\nparser.add_argument('--drop', '--dropout', default=0, type=float,\n metavar='Dropout', help='Dropout ratio')\nparser.add_argument('--schedule', type=int, nargs='+', default=[20, 30, 40],\n help='Decrease learning rate at these epochs.')\nparser.add_argument('--gamma', type=float, default=0.90, help='LR is multiplied by gamma on schedule.')\nparser.add_argument('--momentum', default=0.8, type=float, metavar='M',\n help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-3, type=float,\n metavar='W', help='weight decay (default: 1e-4)')\n\n# checkpoints\nparser.add_argument('-c', '--checkpoint', default='checkpoints/casme2_c3d', type=str, metavar='PATH',\n help='path to save checkpoint (default:checkpoint)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH')\n\n# architecture\nparser.add_argument('--arch', '-a', metavar='ARCH', default='c3d')\n\n# miscs\nparser.add_argument('--manualSeed', type=int, help='manual seed')\n\nargs = parser.parse_args()\nstate = {k: v for k, v in args._get_kwargs()}\n\n# use CUDA\nuse_cuda = torch.cuda.is_available()\n\n# Random seed\nif args.manualSeed is None:\n args.manualSeed = random.randint(1, 10000)\nrandom.seed(args.manualSeed)\ntorch.manual_seed(args.manualSeed)\nif use_cuda:\n torch.cuda.manual_seed_all(args.manualSeed)\n\nbest_acc = 0 # best test accuracy\n\ndef main():\n global best_acc\n start_epoch = args.start_epoch\n\n # 创建 checkpoint 目录\n if not os.path.isdir(args.checkpoint):\n os.makedirs(args.checkpoint)\n\n # load data\n print('==> Preparing dataset %s' % args.dataset)\n with open(args.dataset_path, 'rb') as f:\n data = pickle.load(f)\n\n # model\n # print(\"==> creating model '{}'\".format(args.arch))\n model = MyC3D(with_classifier=True, num_classes=5)\n if use_cuda:\n model = model.cuda()\n\n # criterion\n criterion = nn.CrossEntropyLoss()\n\n # optimizer\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n # set up logging\n logging.basicConfig(level=logging.DEBUG,\n filename=os.path.join(args.checkpoint, 'log_info.log'),\n filemode='a+',\n format=\"%(asctime)-15s %(levelname)-8s %(message)s\")\n \n # log configuration\n logging.info('-' * 10 + 'configuration' + '*' * 10)\n for arg in vars(args):\n logging.info((arg, str(getattr(args, arg))))\n\n # 10-fold cv\n acc_fold = []\n reset_lr = state['lr']\n for f_num in range(args.folds):\n state['lr'] = reset_lr\n\n # model\n model = MyC3D(with_classifier=True, num_classes=5)\n if use_cuda:\n model = model.cuda()\n model.reset_all_weights()\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n\n average_acc = 0\n best_acc = 0\n\n # prepare input\n train_img, train_label, test_img, test_label = data[f_num]['train_img'], data[f_num]['train_label'], data[f_num]['test_img'], data[f_num]['test_label']\n\n train_img = torch.tensor(train_img, dtype=torch.float) / 255.0 # (b_s, frames, h, w)\n train_img = train_img.permute(0, 4, 1, 2, 3)\n # train_img = train_img.unsqueeze(1)\n\n test_img = torch.tensor(test_img, dtype=torch.float) / 255.0\n test_img = test_img.permute(0, 4, 1, 2, 3)\n # test_img = test_img.unsqueeze(1)\n\n train_label, test_label = torch.tensor(train_label, dtype=torch.long), torch.tensor(test_label, dtype=torch.long)\n\n train_dataset = torch.utils.data.TensorDataset(train_img, train_label)\n train_iter = torch.utils.data.DataLoader(\n dataset=train_dataset,\n batch_size=args.train_batch,\n shuffle=True\n )\n\n test_dataset = torch.utils.data.TensorDataset(test_img, test_label)\n test_iter = torch.utils.data.DataLoader(\n dataset=test_dataset,\n batch_size=args.test_batch,\n shuffle=False\n )\n # train and val\n for epoch in range(start_epoch, args.epochs):\n # 在特定的epoch 调整学习率\n adjust_learning_rate(optimizer, epoch)\n\n print('\\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, optimizer.param_groups[0]['lr']))\n \n train_loss, train_acc = train(train_iter, model, criterion, optimizer, epoch, use_cuda)\n test_loss, test_acc = test(test_iter, model, criterion, epoch, use_cuda)\n\n # logger\n\n # save model\n is_best = test_acc > best_acc\n best_acc = max(test_acc, best_acc)\n save_checkpoint({\n 'epoch': epoch + 1,\n 'state_dict': model.state_dict(),\n 'acc': test_acc,\n 'best_acc': best_acc,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, f_num, checkpoint=args.checkpoint)\n \n # compute average acc\n acc_fold.append(best_acc)\n average_acc = sum(acc_fold) / len(acc_fold)\n\n logging.info('fold: %d, best_acc: %.2f, average_acc: %.2f' % (f_num, best_acc, average_acc))\n \n\n\ndef train(train_iter, model, criterion, optimizer, epoch, user_cuda):\n # switch to train mode\n model.train()\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n end = time.time()\n\n bar = Bar('Processing', max=len(train_iter))\n for batch_idx, (inputs, targets) in enumerate(train_iter):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n \n # compute output\n per_outputs = model(inputs)\n\n per_loss = criterion(per_outputs, targets)\n\n loss = per_loss\n\n # measure accuracy and record loss\n prec = accuracy(per_outputs.data, targets.data, topk=(1,))\n losses.update(loss.item(), inputs.size(0))\n top1.update(prec[0].item(), inputs.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f}'.format(\n batch=batch_idx+1,\n size=len(inputs),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td,\n loss=losses.avg,\n top1=top1.avg,\n )\n bar.next()\n bar.finish()\n return (losses.avg, top1.avg)\n\ndef test(test_iter, model, criterion, epoch, use_cuda):\n global best_acc\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n bar = Bar('Processing', max=len(test_iter))\n for batch_idx, (inputs, targets) in enumerate(test_iter):\n # measure data loading time\n data_time.update(time.time() - end)\n\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n # inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets)\n\n # compute output\n outputs = model(inputs)\n loss = criterion(outputs, targets)\n\n \"\"\"\n np_inputs = inputs.numpy()\n np_att = attention.numpy()\n for item_in, item_att in zip(np_inputs, np_att):\n print(item_in.shape, item_att.shape)\n \"\"\"\n\n # measure accuracy and record loss\n prec = accuracy(outputs.data, targets.data, topk=(1,))\n losses.update(loss.item(), inputs.size(0))\n top1.update(prec[0].item(), inputs.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # plot progress\n bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f}'.format(\n batch=batch_idx+1,\n size=len(inputs),\n data=data_time.avg,\n bt=batch_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td,\n loss=losses.avg,\n top1=top1.avg,\n )\n bar.next()\n bar.finish()\n return (losses.avg, top1.avg)\n\ndef save_checkpoint(state, is_best, f_num, checkpoint='checkpoint', filename='checkpoint.pth.tar'):\n filepath = os.path.join(checkpoint, 'fold_' + str(f_num) + '_' + filename)\n torch.save(state, filepath)\n if is_best:\n shutil.copyfile(filepath, os.path.join(checkpoint, 'fold_' + str(f_num) + '_model_best.pth.tar'))\n\n\ndef adjust_learning_rate(optimizer, epoch):\n global state\n if epoch in args.schedule:\n state['lr'] *= args.gamma\n for param_group in optimizer.param_groups:\n param_group['lr'] *= args.gamma\n\n\nif __name__ == '__main__':\n main()","sub_path":"casme2_c3d.py","file_name":"casme2_c3d.py","file_ext":"py","file_size_in_byte":10895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"152846897","text":"\n\ndef find_white(board):\n for i in range(8):\n for j in range(8):\n if board[i][j] == \"o\":\n return [i, j]\ndef check_move(oboard, white ,dir):\n board = oboard\n wcol = white[1]\n wrow = white[0]\n list_b = []\n\n #left\n if dir == 0:\n if wcol!= 0:\n done = False\n for i in range(wcol - 1, -1, -1):\n interested = board[wrow][i]\n if not done:\n if interested == 'x':\n if i != 0:\n for j in range(i - 1, -1, -1):\n if board[wrow][j] == '.':\n list_b.append([wrow, j])\n elif board[wrow][j] == 'x':\n done = True\n break\n else:\n break\n\n #top\n elif dir == 1:\n if wrow!=0:\n done = False\n for i in range(wrow - 1, -1, -1):\n interested = board[i][wcol]\n if not done:\n if interested == 'x':\n if i != 0:\n for j in range(i - 1, -1, -1):\n if board[j][wcol] == '.':\n list_b.append([j, wcol])\n elif board[j][wcol] == 'x':\n done = True\n break\n else:\n break\n #right\n elif dir == 2:\n if wcol != 7:\n done = False\n for i in range(wcol+1, 8):\n interested = board[wrow][i]\n if not done:\n if interested == 'x':\n if i != 7:\n for j in range(i + 1, 8):\n if board[wrow][j] == '.':\n list_b.append([wrow, j])\n elif board[wrow][j] == 'x':\n done = True\n break\n else:\n break\n #down\n elif dir == 3:\n if wrow != 7:\n done = False\n for i in range(wrow + 1, 8):\n interested = board[i][wcol]\n if not done:\n if interested == 'x':\n if i != 7:\n for j in range(i + 1, 8):\n if board[j][wcol] == '.':\n list_b.append([j, wcol])\n elif board[j][wcol] == 'x':\n done = True\n break\n else:\n break\n return list_b\n\ndef check_move2(board, white):\n list_b = []\n wcol = white[1]\n wrow = white[0]\n #left\n if wcol > 1:\n if board[wrow][wcol-1] == 'x':\n if board[wrow][wcol-2] == '.':\n list_b.append([wrow, wcol-2])\n #top\n if wrow > 1:\n if board[wrow - 1][wcol] == 'x':\n if board[wrow - 2][wcol] == '.':\n list_b.append([wrow - 2, wcol])\n\n #right\n if wcol < 6:\n if board[wrow][wcol + 1] == 'x':\n if board[wrow][wcol + 2] == '.':\n list_b.append([wrow, wcol + 2])\n\n return list_b\n\n\n\n\ndef validmoves(board, king, eat, last):\n pos = find_white(board)\n tmoves = []\n if king:\n if not eat:\n for i in range(4):\n moves = check_move(board,pos, i)\n for j in moves:\n tmoves.append(j)\n if eat:\n for i in range(4):\n if i != last:\n moves = check_move(board, pos, i)\n for j in moves:\n tmoves.append(j)\n else:\n tmoves = check_move2(board,pos)\n\n return tmoves\ndef which_dir(old, new):\n if new[0] - old[0] == 0:\n if new[1] - old[1] > 0:\n return 0\n elif new[1] - old[1] < 0:\n return 2\n elif new[0] - old[0] > 0:\n return 1\n elif new[0] - old[0] <0:\n return 3\n\ndef win(board):\n for i in range(8):\n for j in range(8):\n if board[i][j] == 'x':\n return False\n return True\ndef del_black(oboard, origin, target):\n board = oboard[:]\n if target[0] - origin[0] == 0:\n if target[1] - origin[1] > 0:\n for i in range(origin[1]+1, target[1]):\n if board[origin[0]][i] == 'x':\n board[origin[0]] = list(board[origin[0]])\n board[origin[0]][i] = '.'\n board[origin[0]] = ''.join(board[origin[0]])\n elif target[1] - origin[1] < 0:\n for i in range(target[1] + 1, origin[1]):\n if board[origin[0]][i] == 'x':\n board[origin[0]] = list(board[origin[0]])\n board[origin[0]][i] = '.'\n board[origin[0]] = ''.join(board[origin[0]])\n\n elif target[1] - origin[1] == 0:\n if target[0] - origin[0] > 0:\n for i in range(origin[0] + 1, target[0]):\n if board[i][origin[1]] == 'x':\n board[i] = list(board[i])\n board[i][origin[1]] = '.'\n board[i] = ''.join(board[i])\n\n elif target[0] - origin[0] < 0:\n\n for i in range(target[0] + 1, origin[0]):\n if board[i][origin[1]] == 'x':\n board[i] = list(board[i])\n board[i][origin[1]] = '.'\n board[i] = ''.join(board[i])\n\n return board\ndef print_board(board):\n for i in range(8):\n for j in range(8):\n print(board[i][j], end=' ')\n print()\n\ndef move_the_white(oboard, old, last, king):\n board = oboard\n if win(board):\n return 1\n\n wpos = find_white(board)\n moves = []\n #print(old, end='--')\n #print(wpos, end = ':')\n\n if wpos[0] == 0:\n king = True\n\n\n if last != None:\n moves = validmoves(board, king, True, last)\n else:\n moves = validmoves(board, king, False, last)\n\n if len(moves) == 0:\n #print(\"o\")\n return 0\n\n total_i_dim = 0\n #print(moves)\n #print()\n for i in range(len(moves)):\n new_board = board[:]\n new_move = moves[i]\n\n new_board = del_black(new_board, wpos, new_move)\n new_board[wpos[0]] = list(new_board[wpos[0]])\n new_board[wpos[0]][wpos[1]] = '.'\n new_board[wpos[0]] = ''.join(new_board[wpos[0]])\n\n new_board[new_move[0]] = list(new_board[new_move[0]])\n new_board[new_move[0]][new_move[1]] = 'o'\n new_board[new_move[0]] = ''.join(new_board[new_move[0]])\n #print_board(new_board)\n\n last = which_dir(wpos, new_move)\n total_i_dim += move_the_white(new_board, wpos, last, king)\n\n\n return total_i_dim\n\ncases = int(input())\nfor case in range(cases):\n board = []\n for i in range(8):\n row = input()\n board.append(row)\n if case+1 != cases:\n blank = input()\n wpos = find_white(board)\n a = None\n total = move_the_white(board, wpos, a, False)\n print(total)","sub_path":"draughts.py","file_name":"draughts.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"584611983","text":"import heapq\nfrom collections import deque\n\n\nclass Solution:\n def rearrangeString(self, words, k):\n \"\"\"\n Time complexity:\n The time complexity of the above algorithm is O(N*logN) where ‘N’\n is the number of characters in the input string.\n\n Space complexity:\n The space complexity will be O(N), as in the worst case, we need to\n store all the ‘N’ characters in the HashMap.\n \"\"\"\n\n if k <= 1:\n return words\n\n charFrequencyMap = {}\n for char in words:\n charFrequencyMap[char] = charFrequencyMap.get(char, 0) + 1\n\n maxHeap = []\n # add all characters to the max heap\n for char, frequency in charFrequencyMap.items():\n heappush(maxHeap, (-frequency, char))\n\n queue = deque()\n resultString = []\n while maxHeap:\n frequency, char = heappop(maxHeap)\n # append the current character to the result string and decrement its count\n resultString.append(char)\n # decrement the frequency and append to the queue\n queue.append((char, frequency+1))\n if len(queue) == k:\n char, frequency = queue.popleft()\n if -frequency > 0:\n heappush(maxHeap, (frequency, char))\n\n # if we were successful in appending all the characters to the result string, return it\n return ''.join(resultString) if len(resultString) == len(words) else \"\"\n","sub_path":"Problems/Leetcode/358_RearrangeStringkDistanceApart.py","file_name":"358_RearrangeStringkDistanceApart.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"82666637","text":"from voucherify import Client as voucherifyClient\n\nvoucherify = voucherifyClient(\n application_id=\"c70a6f00-cf91-4756-9df5-47628850002b\",\n client_secret_key=\"3266b9f8-e246-4f79-bdf0-833929b1380c\"\n)\n\ntracking_id = 'PythonTestUser'\ntestVoucher = {\n \"code\": \"PythonVoucherTest\",\n \"discount\": {\n \"type\": \"AMOUNT\",\n \"amount_off\": 12436\n },\n \"category\": \"PythonTestCategory\",\n \"start_date\": \"2016-01-01T00:00:00Z\",\n \"expiration_date\": None,\n \"redemption\": {\n \"quantity\": None,\n \"redeemed_quantity\": 0\n },\n \"active\": True\n}\n\n\ndef test_publishVoucher():\n params = {\n \"channel\": \"Email\",\n \"customer\": \"donny.roll@mail.com\"\n }\n result = voucherify.distributions.publish(params)\n assert result.get('active') is True\n assert result.get('type') == 'DISCOUNT_VOUCHER'\n","sub_path":"tests/test_distributions_e2e.py","file_name":"test_distributions_e2e.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"479014047","text":"import re\nfrom tkinter import *\n\ndef handle_x(x):\n a = re.split(r'[\\+\\-\\*\\/\\(\\)]', x)\n b = re.findall(r'[\\+\\-\\*\\/\\(\\)]', x)\n t = [rv for r in zip(a, b) for rv in r]\n t = list(filter(lambda x: x != '', t))\n if len(a) > len(b):\n t.append(a[len(a) - 1])\n else:\n t.append(b[len(b) - 1])\n return t\n\nclass Stack:\n def __init__(self):\n self.items = []\n\n def isEmpty(self):\n return self.items == []\n\n def push(self, item):\n self.items.append(item)\n\n def pop(self):\n return self.items.pop()\n\n def peek(self):\n return self.items[len(self.items) - 1]\n\n def size(self):\n return len(self.items)\n\ndef postfix(a):\n x={}\n x['*']=3;x['/']=3;x['+']=2;x['-']=2;x['(']=1\n opstack=Stack()\n poststack=[]\n tokenlist=handle_x(a)\n print(tokenlist)\n for token in tokenlist:\n if re.match(r'\\d+',token):\n poststack.append(token)\n elif token == '(':\n opstack.push(token)\n elif token == ')':\n toptoken=opstack.pop()\n while toptoken != '(':\n poststack.append(toptoken)\n toptoken = opstack.pop()\n else:\n while (not opstack.isEmpty()) and (x[opstack.peek()]>=x[token]):\n poststack.append(opstack.pop())\n opstack.push(token)\n while not opstack.isEmpty():\n poststack.append(opstack.pop())\n return poststack\n\ndef calcu(a):\n realstack=Stack()\n for i in a:\n if re.match(r'\\d+', i):\n realstack.push(i)\n elif i == '+':\n t1=realstack.pop()\n t2=realstack.pop()\n realstack.push(int(t2)+int(t1))\n elif i == '-':\n t1 = realstack.pop()\n t2 = realstack.pop()\n realstack.push(int(t2) - int(t1))\n elif i == '*':\n t1 = realstack.pop()\n t2 = realstack.pop()\n realstack.push(int(t2) *int(t1))\n elif i == '/':\n t1 = realstack.pop()\n t2 = realstack.pop()\n realstack.push(int(t2) / int(t1))\n return realstack.pop()\n\ndef fuck(x):\n x=postfix(x)\n return calcu(x)\n\ndef frame(root, side):\n w = Frame(root)\n w.pack(side=side, expand=YES, fill=BOTH)\n return w\n\n\ndef button(root, side, text, command=None):\n w = Button(root, text=text, command=command)\n w.pack(side=side, expand=YES, fill=BOTH)\n return w\n\n\nclass Calculator(Frame):\n def __init__(self):\n\n Frame.__init__(self)\n\n self.pack(expand=YES, fill=BOTH)\n self.master.title('Simple Calculater')\n\n display = StringVar()\n\n Entry(self, relief=SUNKEN,textvariable=display).pack(side=TOP, expand=YES,fill=BOTH)\n\n for key in ('123', '456', '789', '-0.'):\n keyF = frame(self, TOP)\n for char in key:\n button(keyF, LEFT, char, lambda w=display, c=char: w.set(w.get() + c))\n\n opsF = frame(self, TOP)\n for char in '+-*/=':\n if char == '=':\n btn = button(opsF, LEFT, char)\n btn.bind('', lambda e, s=self, w=display: s.calc(w), '+')\n\n else:\n btn = button(opsF, LEFT, char, lambda w=display, s='%s' % char: w.set(w.get() + s))\n\n clearF = frame(self, BOTTOM)\n button(clearF, LEFT, 'clear', lambda w=display: w.set(''))\n\n\n\n def calc(self, display):\n try:\n display.set(fuck(display.get()))\n except:\n display.set(\"ERROR\")\n\n\n\nif __name__ == '__main__':\n print('ok')\n Calculator().mainloop()\n","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"138951876","text":"\"\"\"\n@author: BeBlob\n\"\"\"\nimport argparse\nimport os\nimport pika\nimport S4_simple_queue_publish as ppub \nimport S4_simple_queue_read as rpub \n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--read\", help=\"read the messages\",\n action=\"store_true\")\nparser.add_argument(\"--publish\", help=\"write a message\",\n action=\"store_true\")\nargs = parser.parse_args()\n\nif args.read:\n print(\"read mode turned on\")\n rpub.read_messages()\n \nelif args.publish:\n print(\"publish mode turned on\")\n ppub.publish_message()\n\nelse :\n print('Aucun arguments valables')\n","sub_path":"s4/S4_queue_publish_read.py","file_name":"S4_queue_publish_read.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"12257630","text":"#!/usr/bin/python3\n\nimport pandas\nimport os\nimport requests\nfrom requests.auth import HTTPBasicAuth\nfrom urllib.parse import urljoin\n\nuser = 'avcourt' # your github username\nkey = os.environ.get('GH_API') # your secret access token. you can hardcode yours.\n\nrepos_url = f'https://api.github.com/users/{user}/repos'\nrepo_names = [repo[\"name\"] for repo in requests.get(repos_url).json()]\n\ninsights = []\nbase_url = f'https://api.github.com/repos/{user}/'\n\nprint(\"Getting traffic insights for repos:\")\nfor repo in repo_names:\n print(f\"\\t- github.com/{user}/{repo}/\")\n repo_url = urljoin(base_url, repo + '/')\n traffic = requests.get(urljoin(repo_url, 'traffic/views'),\n auth=HTTPBasicAuth(user, key)).json()\n\n clones = requests.get(urljoin(repo_url, 'traffic/clones'),\n auth=HTTPBasicAuth(user, key)).json()[\"count\"]\n\n insights.append({'repo': repo,\n 'views': traffic['count'],\n 'uniques': traffic['uniques'],\n 'clones': clones\n })\n\nprint(\"\\n-- INSIGHTS --------------- Views / Clones --\")\nprint(\"---------------------------------------------\")\nprint(pandas.DataFrame(insights).to_string(index=False))\n","sub_path":"traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"172593548","text":"# Last changed May 31st, 2017\n# Thresholds set at (4,2) within MPR121_edited.py\n# Always run this script with , otherwise you won't get access to the MPR121\n# Also use python 3, otherwise the datetime.timestamp and datetime.fromtimestamp methods won't work\n\n\"\"\"This function prints the ‘raw’ ADC values for all 12 electrodes from a single sensor.\nNormal untouched ADC values should range between 215-230, whereas they should decrease\nto about 50-90 when touched. Different sensors addresses can be selected by adding the\n-a or –-address option followed by either one of the following addresses:\n0x5A, 0x5B, 0x5C, 0x5D (for sensors 1-4, respectively).\nata can be saved to a .txt file through the -s or –-save option.\n\"\"\"\n\nimport sys, time, argparse\nfrom datetime import datetime\nimport Adafruit_MPR121.MPR121_edited as MPR121\n\n# Set up argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('-s', '--save', help = 'save raw ADC values to a .txt file',\naction = \"store_true\")\nparser.add_argument('-a', '--address', help = 'specify MPR121 address (values: \\\n0x5A (default), 0x5B, 0x5C, 0x5D)', type = str)\nargs = parser.parse_args()\n\nif args.address == None or args.address == '0x5A':\n address = 0x5A\nelif args.address == '0x5B':\n address = 0x5B\nelif args.address == '0x5C':\n address = 0x5C\nelif args.address == '0x5D':\n address = 0x5D \nelif not args.address in ['0x5A', '0x5B', '0x5C', '0x5D']:\n print('Error: invalid address specified (must be 0x5A, 0x5B, 0x5C, or 0x5D).')\n sys.exit(1)\n\n# Create MPR121 instance.\ncap = MPR121.MPR121()\n\n# Start communication with the MPR121 chip.\nif not cap.begin(address=address):\n print('Error initializing MPR121. Check your wiring!')\n sys.exit(1)\n\nif args.save:\n # Ask user for mouse ID and trial number to create corresponding text file.\n filename = input('Filename: ')\n if filename[-4:] != '.txt':\n filename += '.txt'\n\n# Main loop to print a message every time a pin is touched.\n# Note: this takes about 20 ms per loop iteration!\nprint('Collecting data. Press Ctrl-C to quit.')\nwhile True:\n touch_status = cap.touched()\n\n filtered = [cap.filtered_data(i) for i in range(12)]\n\n # Print touch status and raw data:\n print('Touch status: {}, Filtered: {}'.format(str(touch_status).zfill(4),filtered), end=12*' ', flush=True)\n print('\\r', end='', flush=True)\n\n if args.save:\n with open(filename, a) as f:\n f.write('{}, {}, {}\\n'.format(datetime.now().timestamp(),touch_status.zfill(4),filtered.strip('[]'))) # Need to convert touch_status and filtered to strings?\n","sub_path":"single_sensor_test.py","file_name":"single_sensor_test.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"398749570","text":"# ex27 The Truth Terms & The Truth Tables\n# ex28 Boolean Practice\n# ex29 What If\npeople = 20\ncats = 30\ndogs = 15\ndogs += 5\nif people >= dogs:\n print(\"People are greater or equal to dogs\") \nif people <= dogs:\n print(\"People are less than or equal to daogs\")\nif people == dogs:\n print(\"People are dogs.\")\n\n# ex30 Else and If\ncars = 40\ntrucks = 15\nif cars > people:\n print(\"We should take the cars\")\nelif cars < people:\n print(\"We should not take the cars\")\nelse:\n print(\"We can't decide\")","sub_path":"learn-python-the-hard-way/Ex27-30.py","file_name":"Ex27-30.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"399308703","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\"\"\"\nLoad test for the SyncStorage server\n\"\"\"\nimport os\nimport hmac\nimport random\nimport time\nfrom urllib.parse import urlparse, urlunparse\nimport base64\nimport hashlib\n\nfrom tokenlib import make_token, get_derived_secret as derive\nimport browserid.jwt\nimport browserid.tests.support\n\nfrom molotov import (json_request, global_setup, set_var, get_var, scenario,\n setup)\n\n\n# Assertions are good for one year (in seconds).\n# This avoids having to deal with clock-skew in tokenserver requests.\nASSERTION_LIFETIME = 60 * 60 * 24 * 365\n\nMOCKMYID_DOMAIN = \"mockmyid.s3-us-west-2.amazonaws.com\"\nMOCKMYID_PRIVATE_KEY = browserid.jwt.DS128Key({\n \"algorithm\": \"DS\",\n \"x\": \"385cb3509f086e110c5e24bdd395a84b335a09ae\",\n \"y\": \"738ec929b559b604a232a9b55a5295afc368063bb9c20fac4e53a74970a4db795\"\n \"6d48e4c7ed523405f629b4cc83062f13029c4d615bbacb8b97f5e56f0c7ac9bc1\"\n \"d4e23809889fa061425c984061fca1826040c399715ce7ed385c4dd0d40225691\"\n \"2451e03452d3c961614eb458f188e3e8d2782916c43dbe2e571251ce38262\",\n \"p\": \"ff600483db6abfc5b45eab78594b3533d550d9f1bf2a992a7a8daa6dc34f8045a\"\n \"d4e6e0c429d334eeeaaefd7e23d4810be00e4cc1492cba325ba81ff2d5a5b305a\"\n \"8d17eb3bf4a06a349d392e00d329744a5179380344e82a18c47933438f891e22a\"\n \"eef812d69c8f75e326cb70ea000c3f776dfdbd604638c2ef717fc26d02e17\",\n \"q\": \"e21e04f911d1ed7991008ecaab3bf775984309c3\",\n \"g\": \"c52a4a0ff3b7e61fdf1867ce84138369a6154f4afa92966e3c827e25cfa6cf508b\"\n \"90e5de419e1337e07a2e9e2a3cd5dea704d175f8ebf6af397d69e110b96afb17c7\"\n \"a03259329e4829b0d03bbc7896b15b4ade53e130858cc34d96269aa89041f40913\"\n \"6c7242a38895c9d5bccad4f389af1d7a4bd1398bd072dffa896233397a\",\n})\n\n\n_DEFAULT = \"https://token.stage.mozaws.net\"\n\n\ndef b64encode(data):\n return base64.b64encode(data).decode(\"ascii\")\n\n\nclass StorageClient(object):\n def __init__(self, server_url=_DEFAULT):\n self.timeskew = 0\n self.server_url = server_url\n self.auth_token = None\n self.auth_secret = None\n self.endpoint_url = None\n self.endpoint_scheme = None\n self.endpoint_host = None\n self.generate()\n\n def __repr__(self):\n return str(self.auth_token)\n\n def generate(self):\n \"\"\"Pick an identity, log in and generate the auth token.\"\"\"\n # If the server_url has a hash fragment, it's a storage node and\n # that's the secret. Otherwise it's a token server url.\n uid = random.randint(1, 1000000)\n url = urlparse(self.server_url)\n if url.fragment:\n endpoint = url._replace(fragment=\"\", path=\"/1.5/\" + str(uid))\n self.endpoint_url = urlunparse(endpoint)\n data = {\n \"uid\": uid,\n \"node\": urlunparse(url._replace(fragment=\"\")),\n \"expires\": time.time() + ASSERTION_LIFETIME,\n }\n self.auth_token = make_token(data, secret=url.fragment)\n self.auth_secret = derive(self.auth_token, secret=url.fragment)\n else:\n email = \"user%s@%s\" % (uid, MOCKMYID_DOMAIN)\n exp = time.time() + ASSERTION_LIFETIME + self.timeskew\n assertion = browserid.tests.support.make_assertion(\n email=email,\n audience=self.server_url,\n issuer=MOCKMYID_DOMAIN,\n issuer_keypair=(None, MOCKMYID_PRIVATE_KEY),\n exp=int(exp * 1000),\n )\n token_url = self.server_url + \"/1.0/sync/1.5\"\n response = json_request(token_url, headers={\n \"Authorization\": \"BrowserID \" + assertion,\n })\n # Maybe timeskew between client and server?\n if response['status'] == 401:\n server_time = int(response['headers'][\"X-Timestamp\"])\n self.timeskew = server_time - int(time.time())\n exp = time.time() + ASSERTION_LIFETIME + self.timeskew\n assertion = browserid.tests.support.make_assertion(\n email=email,\n audience=self.server_url,\n issuer=MOCKMYID_DOMAIN,\n issuer_keypair=(None, MOCKMYID_PRIVATE_KEY),\n exp=int(exp * 1000),\n )\n response = json_request(token_url, headers={\n \"Authorization\": \"BrowserID \" + assertion,\n })\n\n if response['status'] > 299:\n raise ValueError(response['status'])\n\n credentials = response['content']\n self.auth_token = credentials[\"id\"].encode('ascii')\n self.auth_secret = credentials[\"key\"].encode('ascii')\n self.endpoint_url = credentials[\"api_endpoint\"]\n\n url = urlparse(self.endpoint_url)\n self.endpoint_scheme = url.scheme\n if ':' in url.netloc:\n self.endpoint_host, self.endpoint_port = url.netloc.rsplit(\":\", 1)\n else:\n self.endpoint_host = url.netloc\n if url.scheme == \"http\":\n self.endpoint_port = \"80\"\n else:\n self.endpoint_port = \"443\"\n\n def _normalize(self, params, path_qs, meth='GET'):\n bits = []\n bits.append(\"hawk.1.header\")\n bits.append(params[\"ts\"])\n bits.append(params[\"nonce\"])\n bits.append(meth)\n bits.append(path_qs)\n bits.append(self.endpoint_host.lower())\n bits.append(self.endpoint_port)\n bits.append(params.get(\"hash\", \"\"))\n bits.append(params.get(\"ext\", \"\"))\n bits.append(\"\") # to get the trailing newline\n return \"\\n\".join(bits)\n\n def _sign(self, params, path_qs, meth='GET'):\n algorithm = \"sha256\"\n sigstr = self._normalize(params, path_qs, meth)\n sigstr = sigstr.encode(\"ascii\")\n key = self.auth_secret\n hashmod = hashlib.sha256\n return b64encode(hmac.new(key, sigstr, hashmod).digest())\n\n def _auth(self, params, path_qs, meth='GET'):\n params = {\"ts\": str(int(time.time()) + self.timeskew)}\n params[\"id\"] = self.auth_token.decode('ascii')\n params[\"ts\"] = str(int(time.time()))\n params[\"nonce\"] = b64encode(os.urandom(5))\n params[\"mac\"] = self._sign(params, path_qs, meth)\n res = ', '.join(['%s=\"%s\"' % (k, v) for k, v in params.items()])\n return 'Hawk ' + res\n\n async def get(self, session, path_qs, *args, **kw):\n url = self.endpoint_url + path_qs\n headers = {'Authorization': self._auth('GET', path_qs),\n 'Host': self.endpoint_host}\n\n async with session.get(url, headers=headers) as resp:\n if resp.status == 401:\n server_time = int(float(resp.headers[\"X-Weave-Timestamp\"]))\n self.timeskew = server_time - int(time.time())\n headers['Authorization'] = self._auth('GET', path_qs)\n async with session.get(url, headers=headers) as resp:\n return resp\n else:\n return resp\n\n\n@global_setup()\ndef set_token(args):\n set_var('client', StorageClient())\n\n\n@scenario(1)\nasync def test(session):\n storage = get_var('client')\n url = \"/info/collections\"\n\n resp = await storage.get(session, url)\n assert resp.status in (200, 404)\n","sub_path":"loadtest.py","file_name":"loadtest.py","file_ext":"py","file_size_in_byte":7461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"272049247","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Data handling\nimport pandas as pd\nimport numpy as np\n\n# Bokeh libraries\nfrom bokeh.io import output_file, output_notebook\nfrom bokeh.plotting import figure, show\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.layouts import row, column, gridplot\nfrom bokeh.models.widgets import Tabs, Panel\n\n#Plotting\nimport geopandas as gpd\n\nfig = figure()\n\n\ndf = pd.read_csv('cleanfeatures.csv', index_col=0)\ndf.rename(columns={'Election type':'Election_type'}, inplace=True)\n\n# Import reset_output (only needed once)\nfrom bokeh.plotting import reset_output\n\n# Use reset_output() between subsequent show() calls, as needed\nreset_output()\n\n\nshapefile = 'Shape_Files/ne_110m_admin_0_countries.shp'\n#Read shapefile using Geopandas\ngdf = gpd.read_file(shapefile)[['ADMIN', 'ADM0_A3', 'geometry']]\n#Rename columns.\ngdf.columns = ['country', 'country_code', 'geometry']\ngdf = gdf.drop(gdf.index[159])\n\n\n#Drop row corresponding to 'Antarctica'\ngdf = gdf.drop(gdf.index[159])\n\n\nfrom bokeh.io import curdoc, output_notebook\nfrom bokeh.models import Slider, HoverTool\nfrom bokeh.layouts import widgetbox, row, column\nfrom bokeh.models import GeoJSONDataSource, LinearColorMapper, ColorBar\nfrom bokeh.palettes import brewer\nimport json\n#Define function that returns json_data for year selected by user.\n\ndef json_data(selectedYear):\n yr = selectedYear\n df_yr = df[df['Year'] == yr]\n merged = gdf.merge(df_yr, left_on = 'country_code', right_on ='iso3', how = 'left')\n merged.fillna('No data', inplace = True)\n merged_json = json.loads(merged.to_json())\n json_data = json.dumps(merged_json)\n return json_data\n#Input GeoJSON source that contains features for plotting.\ngeosource = GeoJSONDataSource(geojson = json_data('2016'))\n#Define a sequential multi-hue color palette.\npalette = brewer['YlGnBu'][5]\n#Reverse color order so that dark blue is highest obesity.\npalette = palette[::-1]\n#Instantiate LinearColorMapper that linearly maps numbers in a range, into a sequence of colors. Input nan_color.\ncolor_mapper = LinearColorMapper(palette = palette, low = 0, high = 100, nan_color = '#d9d9d9')\n#Define custom tick labels for color bar.\ntick_labels = {'0': '0%', '20':'20%', '40':'40%', '60':'60%', '80': '80%', '100': '100%'}\n#Add hover tool\nhover = HoverTool(tooltips = [ ('Country/region','@Country'),('Type of election', '@Election_type'),('% of voting age population that voted', '@VAP_Turnout_Percentage{11.11}'), ('Compulsory Voting', '@Compulsory_voting')])\n#Create color bar.\ncolor_bar = ColorBar(color_mapper=color_mapper, label_standoff=8,width = 500, height = 20,\n border_line_color=None,location = (0,0), orientation = 'horizontal', major_label_overrides = tick_labels)\n#Create figure object.\np = figure(title = 'Registered Voters who Voted, 2016', plot_height = 600 , plot_width = 950, toolbar_location = 'below', toolbar_sticky=True, tools = [hover])\np.xgrid.grid_line_color = None\np.ygrid.grid_line_color = None\n#Add patch renderer to figure.\np.patches('xs','ys', source = geosource,fill_color = {'field' :'VAP_Turnout_Percentage', 'transform' : color_mapper},\n line_color = 'black', line_width = 0.25, fill_alpha = 1)\n#Specify layout\np.add_layout(color_bar, 'below')\n# Define the callback function: update_plot\ndef update_plot(attr, old, new):\n yr = slider.value\n new_data = json_data(yr)\n geosource.geojson = new_data\n p.title.text = 'Voting Age Population That Voted, %d' %yr\n\n# Make a slider object: slider\nslider = Slider(title = 'Year',start = 1990, end = 2017, step = 1, value = 2016)\nslider.on_change('value', update_plot)\n# Make a column layout of widgetbox(slider) and plot, and add it to the current document\nlayout = column(p,widgetbox(slider))\ncurdoc().add_root(layout)\n#Display plot inline in Jupyter notebook\noutput_notebook()\n#Display plot\nshow(layout)\n","sub_path":"MapScript.py","file_name":"MapScript.py","file_ext":"py","file_size_in_byte":3880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"569956654","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 29 20:01:32 2017\n\n@author: Joao Marcos Costa\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import integrate\n\ndef _cos(x,func,n_,w_):\n\treturn func(x)*np.cos(x*n_*w_)\n\t\n\n\n\t\ndef _sin(x,func,n_,w_):\n\treturn func(x)*np.sin(x*n_*w_)\n\t\ndef an_bn(func,T0,n=10):\n\tbn=np.zeros(n)\n\tW = 2*np.pi/T0\n\tan=np.zeros(n)\n\n\tfor i in range(n):\n\t\tan[i]=(2/T0)*(integrate.quad(_cos,0,T0,args=(func,i,W))[0])\n\t\tbn[i]=(2/T0)*(integrate.quad(_sin,0,T0,args=(func,i,W))[0])\n\n\treturn an,bn\n\ndef rebuild(an_coefs,bn_coefs,T,x):\n\tw0 = 2*np.pi/T\n\tN = len(an_coefs)\n\tf_sum = 0\n\tfor n in range(N):\n\t\tf_sum += an_coefs[n]*np.cos(x*n*w0)\n\t\tf_sum += bn_coefs[n]*np.sin(x*n*w0)\n\treturn f_sum\n\t\nx = np.linspace(0,10,10000)\nT0 = 1/5000\n\n\n\n","sub_path":"fseries.py","file_name":"fseries.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"168916831","text":"from tensorflow.examples.tutorials.mnist import input_data\r\nimport tensorflow as tf\r\nimport tensorflow.contrib.slim as slim\r\nimport numpy as np\r\nimport argparse\r\n\r\n\r\ndef model(input, is_training):\r\n with slim.arg_scope([slim.conv2d, slim.fully_connected],\r\n activation_fn=tf.nn.crelu,\r\n normalizer_fn=slim.batch_norm,\r\n normalizer_params={'is_training':is_training, 'decay':0.9}):\r\n conv1 = slim.conv2d(input, 16, kernel_size=3, scope='conv1')\r\n pool1 = slim.max_pool2d(conv1, kernel_size=2, scope='pool1')\r\n conv2 = slim.conv2d(pool1, 32, kernel_size=3, scope='conv2')\r\n pool2 = slim.max_pool2d(conv2, kernel_size=2, scope='pool2')\r\n flatten = slim.flatten(pool2, scope='flatten')\r\n fc1 = slim.fully_connected(flatten, 500, scope='fc1')\r\n dropout = slim.dropout(fc1, is_training=is_training)\r\n fc2 = slim.fully_connected(dropout, 10, activation_fn=None, scope='out')\r\n return fc2\r\n\r\ndef train(mnist):\r\n x = tf.placeholder(tf.float32, [None, 28, 28, 1], name='x-input')\r\n y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')\r\n global_step = tf.Variable(0, trainable=False)\r\n\r\n y = model(x, True)\r\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=y, labels=y_)\r\n cross_entropy = tf.reduce_mean(cross_entropy)\r\n\r\n accuracy = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\r\n accuracy = tf.reduce_mean(tf.cast(accuracy, tf.float32))\r\n\r\n learning_rate = tf.train.exponential_decay(learning_rate=args.learning_rate, global_step=global_step,\r\n decay_steps=mnist.train.num_examples // args.batch_size,\r\n decay_rate=args.learning_rate_decay)\r\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\r\n with tf.control_dependencies(update_ops):\r\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy, global_step)\r\n\r\n saver = tf.train.Saver()\r\n init = tf.global_variables_initializer()\r\n\r\n with tf.Session() as sess:\r\n init.run()\r\n for i in range(args.max_step):\r\n xs, ys = mnist.train.next_batch(args.batch_size)\r\n xs = np.reshape(xs, [args.batch_size, 28, 28, 1])\r\n _, loss, acc, step = sess.run([train_step, cross_entropy, accuracy, global_step], feed_dict={x: xs, y_: ys})\r\n if step % 500 == 0:\r\n print('{} epoches, loss: {}, accuracy: {}'.format(step, loss, acc))\r\n\r\n if step % 1000 == 0:\r\n saver.save(sess, args.logs + 'mnist_bn_model', global_step)\r\n\r\n\r\ndef evaluate(mnist):\r\n x = tf.placeholder(tf.float32, [None, 28, 28, 1], name='x-input')\r\n y_ = tf.placeholder(tf.float32, [None, 10], name='y-input')\r\n\r\n y = model(x, False)\r\n\r\n pred = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\r\n accuracy = tf.reduce_mean(tf.cast(pred, tf.float32))\r\n\r\n feed_dict = {x: np.reshape(mnist.validation.images, [-1, 28, 28, 1]), y_: mnist.validation.labels}\r\n saver = tf.train.Saver()\r\n\r\n with tf.Session() as sess:\r\n saver.restore(sess, args.logs + 'mnist_bn_model-' + str(args.max_step))\r\n acc = sess.run(accuracy, feed_dict=feed_dict)\r\n print('test accuracy: {}'.format(acc))\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='train or evaluate mnist of using bn layer')\r\n parser.add_argument('command', metavar='', help='train or evaluate')\r\n parser.add_argument('--batch_size', default=100, help='num pictures of one batch')\r\n parser.add_argument('--learning_rate', default=0.8, help='initial learning rate of the net')\r\n parser.add_argument('--learning_rate_decay', default=0.9, help='rate decay after one epoch')\r\n parser.add_argument('--max_step',default=3000, help='total step for training')\r\n parser.add_argument('--logs', default='path/logs/', help='Logs and checkpoints directory')\r\n\r\n args = parser.parse_args()\r\n print('command:', args.command)\r\n print('batch_size:', args.batch_size)\r\n print('learning_rate:', args.learning_rate)\r\n print('learning_rate_decay:', args.learning_rate_decay)\r\n print('max_step:', args.max_step)\r\n\r\n mnist = input_data.read_data_sets('MNIST_data', one_hot=True)\r\n if args.command == 'train':\r\n train(mnist)\r\n else:\r\n print('loading weights...')\r\n evaluate(mnist)","sub_path":"tensorlfow bn_layer/mnist_bn.py","file_name":"mnist_bn.py","file_ext":"py","file_size_in_byte":4465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"593395100","text":"class Solution(object):\n def pivotIndex(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n l_sum = 0 \n r_sum = sum(nums)\n n = len(nums)\n \n for i in range(0,n):\n r_sum -= nums[i]\n if l_sum==r_sum:\n return i\n l_sum += nums[i]\n \n return -1","sub_path":"Pivot_index/pivot_index_sol1.py","file_name":"pivot_index_sol1.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"574885955","text":"from typing import TypeVar, Sequence, Mapping, Set, Tuple\nfrom scipy.linalg import eig\nfrom base_files import mdp\nfrom base_files import value_iteration\nimport numpy as np\nimport random\nimport copy\nimport math\n\nS = Tuple[float, float]\nA = Tuple[float, float]\n\ntrans_matrix_type = Mapping[S, Mapping[A, Mapping[S, float]]]\nreward_type = Mapping[S, Mapping[A, float]]\npolicy_type = Mapping[S, Mapping[A, float]]\n\nclass mertonPortofolio():\n def __init__(self, \n expiry : float, \n r : float, \n mu : np.ndarray,\n cov : np.ndarray,\n epsilon: float,\n gamma: float):\n self.expiry = expiry # = T\n self.r = r # = risk-free rate\n self.mu = mu # = risky rate means (1-D array of length num risky assets)\n self.cov = cov # = risky rate covariances (2-D square array of length num risky assets)\n self.epsilon = epsilon # = bequest parameter\n self.gamma = gamma # = CRRA parameter\n\n def getMertonTransition(self, \n state: Tuple[float, float], \n action: Tuple[float, float]):\n risky_return = np.random.normal(self.mu[0], self.cov[0])\n wealth = state[1]\n risky_allocation = action[0]\n wealth_consumption = action[1]\n next_wealth = (wealth - wealth_consumption) * \\\n ((1 - risky_allocation) * (1 + self.r) + risky_allocation * (1 + risky_return))\n return [state[0] + 1, next_wealth[0]]\n\n def getMertonReward(self, state: S) ->float :\n time = state[0]\n wealth = state[1]\n if time != self.expiry:\n if self.gamma == 0:\n return np.log(wealth)\n else:\n return wealth ** (1.0-self.gamma) / (1.0-self.gamma)\n else:\n return 0.0\n\n def getMertonDataAll(self,\n state: Tuple[float, float]) -> Tuple[trans_matrix_type, reward_type, policy_type]:\n transition_matrix : trans_matrix_type = {}\n reward : reward_type = {}\n policy : policy_type = {}\n t = 0\n all_merton_states = []\n all_merton_states.append(state)\n while t < self.expiry:\n state = tuple(all_merton_states.pop(0))\n # assume for each state, there are two possible actions, each are generated randomly\n # this assumption is made so that we can simplify the MDP\n action_1 = tuple([random.uniform(0, 5), random.uniform(0, 5)])\n action_2 = tuple([random.uniform(0, 5), random.uniform(0, 5)])\n next_state_1 = tuple(self.getMertonTransition(state, action_1))\n next_state_2 = tuple(self.getMertonTransition(state, action_2))\n sub_dict_1 = {action_1: {next_state_1: 1.0}}\n sub_dict_2 = {action_2: {next_state_2: 1.0}} \n if state in transition_matrix.keys():\n list1 = list(transition_matrix.items())\n list2 = list(sub_dict_1.items())\n list1[0][1][list2[0][0]] = list2[0][1]\n transition_matrix = dict(list1)\n list1 = list(transition_matrix.items())\n list2 = list(sub_dict_2.items())\n list1[0][1][list2[0][0]] = list2[0][1]\n transition_matrix = dict(list1)\n else:\n transition_matrix[state] = copy.deepcopy(sub_dict_1)\n list1 = list(transition_matrix.items())\n list2 = list(sub_dict_2.items())\n list1[0][1][list2[0][0]] = list2[0][1]\n transition_matrix = dict(list1)\n reward[state] = {action_1: self.getMertonReward(state), \n action_2: self.getMertonReward(state)}\n policy[state] = {action_1: 0.5, action_2: 0.5}\n all_merton_states.append(next_state_1)\n all_merton_states.append(next_state_2)\n t += 0.1\n return [transition_matrix, reward, policy]\n\nif __name__ == '__main__':\n expiry = 0.4\n r = 0.04\n mu = np.array([0.08])\n cov = np.array([[0.0009]])\n epsilon = 1e-8\n gamma = 0.2\n discount_rate = 0.8\n\n mp = mertonPortofolio(expiry, r, mu, cov, epsilon, gamma)\n\n initial_wealth = 10\n initial_state = [0, initial_wealth]\n transition_matrix, reward, policy = mp.getMertonDataAll(initial_state)\n # print(policy)\n\n mdp = mdp.MDP(transition_matrix, reward, policy, discount_rate)\n vi_dict = value_iteration.valueIteration(mdp, 100)[0]\n new_dict = {}\n for key, val in vi_dict.items():\n if val != None and val != float('nan'):\n new_dict[key] = val\n print(\"Value iteration: \", new_dict)\n\n","sub_path":"Assignments/2_Financial_Application/merton_MDP_value_iteration.py","file_name":"merton_MDP_value_iteration.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"43328850","text":"# Example:\n# values = [{\"name\": \"Michelangelo\", \"food\": \"PIZZA\"}, {\"name\": \"Garfield\", \"food\": \"lasagna\"}]\n# string_factory(values)\n# [\"Hi, I'm Michelangelo and I love to eat PIZZA!\", \"Hi, I'm Garfield and I love to eat lasagna!\"]\n\ntemplate = \"Hi, I'm {name} and I love to eat {food}!\"\n\ndef string_factory(val):\n new = []\n for v in val:\n new.append(template.format(**v))\n # new.append(template + \".format(**v)\")\n return new\nvalues = [{\"name\": \"Michelangelo\", \"food\": \"PIZZA\"}, {\"name\": \"Garfield\", \"food\": \"lasagna\"}]\nprint(string_factory(values))\n","sub_path":"learn/th/python collection/dic/prac.py","file_name":"prac.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"229055306","text":"import sys\nimport pprint\nimport collections\nsys.stdin = open('가능한 시험 점수.txt','r')\n\n\nT = int(input())\nfor tc in range(1,T+1):\n N = int(input())\n scores = list(map(int,input().split()))\n visit = [0] * 10001\n visit[0] = 1\n for s in scores:\n for i in range(10000,-1,-1):\n if visit[i]:\n visit[i + s] = 1\n print('#{} {}'.format(tc,sum(visit)))\n \n \n\n \n","sub_path":"10월/1001/가능한 시험 점수 너비우선.py","file_name":"가능한 시험 점수 너비우선.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"574255230","text":"import requests\nimport os \nimport aiohttp \nimport asyncio \nif not os.path.exists('./pic_as'):\n os.mkdir('./pic_as')\nasync def fetch(session,url):\n print('发送请求',url)\n async with session.get(url,verify_ssl=False) as response:\n content = await response.content.read()\n file_path = './pic_as/'+ url.rsplit('/')[-1]\n with open(file_path,'wb') as fb:\n fb.write(content)\n print('下载完成:',url)\n\nasync def main():\n async with aiohttp.ClientSession() as session:\n url_list = {\n 'https://pic.qiushibaike.com/system/pictures/12436/124360753/medium/7TKGSDY0E3FBRC8Q.jpg',\n 'https://pic.qiushibaike.com/system/pictures/12437/124377444/medium/JBYNB7E71BC5NYRB.jpg',\n 'https://pic.qiushibaike.com/system/pictures/12437/124375149/medium/00EA4WHMKKPFHTJN.jpg'\n }\n tasks = [asyncio.create_task(fetch(session,url)) for url in url_list]\n await asyncio.wait(tasks)\n\nif __name__ == '__main__':\n asyncio.run(main())","sub_path":"补充知识_异步编程/协程方式下载图片.py","file_name":"协程方式下载图片.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"571060158","text":"import os\nimport subprocess\n\nfrom api import api_call, post_call\nfrom config import SETTINGS\nfrom helpers import create_embed, LetterboxdError\n\n\nasync def user_embed(username):\n username = username.lower()\n url = 'https://letterboxd.com/{}'.format(username)\n lbxd_id = __check_if_fixed_search(username)\n if not lbxd_id:\n lbxd_id = await __search_profile(username)\n member_json = await __get_userjson(lbxd_id)\n display_name, avatar_url, description = await __get_infos(member_json, lbxd_id)\n fav_text, fav_posters_link = __get_favs(member_json)\n description += fav_text\n fav_img_link = ''\n if fav_posters_link:\n fav_img_link = await __upload_fav_posters(username, fav_posters_link)\n return create_embed(display_name, url, description, avatar_url,\n fav_img_link)\n\n\nasync def user_details(username):\n username = username.lower()\n lbxd_id = __check_if_fixed_search(username)\n if not lbxd_id:\n lbxd_id = await __search_profile(username)\n member_json = await __get_userjson(lbxd_id)\n display_name, avatar_url, __ = await __get_infos(member_json, lbxd_id, False)\n return username, display_name, lbxd_id, avatar_url\n\n\ndef __check_if_fixed_search(username):\n for fixed_username, lbxd_id in SETTINGS['fixed_user_search'].items():\n if fixed_username.lower() == username:\n return lbxd_id\n return ''\n\n\nasync def __search_profile(username):\n params = {\n 'input': username.replace('_', ' '),\n 'include': 'MemberSearchItem',\n 'perPage': '100'\n }\n while True:\n response = await api_call('search', params)\n if not response['items']:\n break\n for result in response['items']:\n if result['member']['username'].lower() == username:\n return result['member']['id']\n if response.get('next'):\n params['cursor'] = response['next']\n else:\n break\n raise LetterboxdError('The user **' + username + '** wasn\\'t found.')\n\n\nasync def __get_userjson(lbxd_id):\n member_response = await api_call('member/{}'.format(lbxd_id))\n if member_response == '':\n raise LetterboxdError(\n 'The user wasn\\'t found. ' +\n 'They may have refused to be reachable via the API.')\n return member_response\n\n\nasync def __get_infos(member_json, lbxd_id, with_stats=True):\n display_name = member_json['displayName']\n avatar_url = member_json['avatar']['sizes'][-1]['url']\n description = '**'\n if member_json.get('location'):\n description += member_json['location'] + '** -- **'\n if with_stats:\n stats_json = await api_call('member/{}/statistics'.format(lbxd_id))\n description += str(stats_json['counts']['watches']) + ' films**\\n'\n return display_name, avatar_url, description\n\n\ndef __get_favs(member_json):\n description = ''\n fav_posters_link = list()\n for fav_film in member_json['favoriteFilms']:\n fav_name = fav_film['name']\n if fav_film.get('poster'):\n for poster in fav_film['poster']['sizes']:\n if 150 < poster['width'] < 250:\n fav_posters_link.append(poster['url'])\n if fav_film.get('releaseYear'):\n fav_name += ' (' + str(fav_film['releaseYear']) + ')'\n for link in fav_film['links']:\n if link['type'] == 'letterboxd':\n fav_url = link['url']\n description += '[{0}]({1})\\n'.format(fav_name, fav_url)\n return description, fav_posters_link\n\n\nasync def __upload_fav_posters(username, fav_posters_link):\n # Download posters\n if not os.path.exists(username):\n os.popen('mkdir ' + username)\n img_cmd = 'convert '\n for index, fav_poster in enumerate(fav_posters_link):\n img_data = await api_call(fav_poster, None, False, False)\n temp_fav = '{0}/fav{1}.jpg'.format(username, index)\n img_cmd += temp_fav + ' '\n with open(temp_fav, 'wb') as handler:\n handler.write(img_data)\n\n # Upload to Cloudinary\n img_cmd += '+append {}/fav.jpg'.format(username)\n subprocess.call(img_cmd, shell=True)\n with open('{}/fav.jpg'.format(username), 'rb') as pic:\n bin_pic = pic.read()\n os.popen('rm -r ' + username)\n upload_url = 'https://api.cloudinary.com/v1_1/'\n upload_url += SETTINGS['cloudinary']['cloud_name'] + '/image/upload'\n params = {'file': bin_pic,\n 'upload_preset': SETTINGS['cloudinary']['preset']}\n result = await post_call(upload_url, params)\n return result['url']\n","sub_path":"user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"63756285","text":"import pandas as pd\r\nimport numpy as np\r\nimport math\r\nimport csv\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\nfrom lineaRegression import LineaRegression\r\n\r\n#command line argument variables\r\ntrainData = sys.argv[1]\r\ntestData = sys.argv[2]\r\n\r\n#Add the Labels\r\nhouse_train = pd.read_csv(trainData, names =['CRIM','ZIN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PIRATIO','B','LSTAT','MEDV'] )\r\nhouse_test = pd.read_csv(testData, names =['CRIM','ZIN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PIRATIO','B','LSTAT','MEDV'] )\r\n\r\n#setting up python lists for plotting\r\ntrainX = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]\r\ntestX = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20]\r\ntrainY = []\r\ntestY = []\r\nd = 2\r\nwhile d <= 20:\r\n\r\n #insert our 2 random sampled rows\r\n s1 = np.random.normal(0, 0.1, len(house_train.index))\r\n s2 = np.random.normal(0, 0.1, len(house_train.index))\r\n house_train.insert(0, \"\", s1, True)\r\n house_train.insert(0, \"\", s2, True)\r\n s1 = np.random.normal(0, 0.1, len(house_test.index))\r\n s2 = np.random.normal(0, 0.1, len(house_test.index))\r\n house_test.insert(0, \"\", s1, True)\r\n house_test.insert(0, \"\", s2, True)\r\n\r\n # Selecting all but last column of data frame with all rows\r\n x_train = house_train.iloc[:,0:-1].values\r\n x_test = house_test.iloc[:,0:-1].values\r\n\r\n # Selecting last column of data frame for train/test data as matrix\r\n y_train = (np.matrix(house_train.iloc[:,-1].values, dtype=float)).T\r\n y_test = (np.matrix(house_test.iloc[:,-1].values, dtype=float)).T\r\n\r\n #Do the Calculation\r\n output = LineaRegression(np.matrix(x_train), y_train)\r\n output2 = LineaRegression(np.matrix(x_test), y_test)\r\n\r\n trainY.append(output.ase())\r\n testY.append(output2.ase())\r\n\r\n d = d + 2\r\n\r\n#plot graphs\r\nplt.plot(trainX, trainY)\r\nplt.xlabel('d')\r\nplt.ylabel('ASE')\r\nplt.title('Training ASE over different d')\r\nplt.show()\r\n\r\nplt.plot(testX, testY)\r\nplt.xlabel('d')\r\nplt.ylabel('ASE')\r\nplt.title('Testing ASE over different D')\r\nplt.show()\r\n","sub_path":"q1_4.py","file_name":"q1_4.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"358439637","text":"import random\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport os\nimport sys\nfrom distutils.dir_util import copy_tree\nimport aws_utils\nimport pickle\nimport logging\nfrom copy import deepcopy\n\n\nclass RNGSeed:\n def __init__(self, seed, deterministic=True):\n self.seed = seed\n self.deterministic = deterministic\n self.set_random_seeds()\n\n def set_random_seeds(self):\n seed = self.seed\n random.seed(seed)\n np.random.seed(seed)\n cudnn.enabled = True\n\n if self.deterministic:\n cudnn.benchmark = False\n cudnn.deterministic = True\n else:\n cudnn.benchmark = True\n\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n def get_save_states(self):\n rng_states = {\n \"random_state\": random.getstate(),\n \"np_random_state\": np.random.get_state(),\n \"torch_random_state\": torch.get_rng_state(),\n \"torch_cuda_random_state\": torch.cuda.get_rng_state_all(),\n }\n return rng_states\n\n def load_states(self, rng_states):\n random.setstate(rng_states[\"random_state\"])\n np.random.set_state(rng_states[\"np_random_state\"])\n torch.set_rng_state(rng_states[\"torch_random_state\"])\n torch.cuda.set_rng_state_all(rng_states[\"torch_cuda_random_state\"])\n\n\ndef save(\n folder,\n epochs,\n rng_seed,\n model,\n optimizer,\n history=None,\n s3_bucket=None,\n):\n\n checkpoint = {\n \"epochs\": epochs,\n \"rng_seed\": rng_seed.get_save_states(),\n \"optimizer\": optimizer.state_dict(),\n \"model\": model.state_dict(),\n \"arch_params\": model._modules['module']._arch_parameters\n }\n\n ckpt = os.path.join(folder, \"model.ckpt\")\n torch.save(checkpoint, ckpt)\n\n if history is not None:\n history_file = os.path.join(folder, \"history.pkl\")\n with open(history_file, \"wb\") as f:\n pickle.dump(history, f)\n\n log = os.path.join(folder, \"log.txt\")\n\n if s3_bucket is not None:\n aws_utils.upload_to_s3(ckpt, s3_bucket, ckpt)\n aws_utils.upload_to_s3(log, s3_bucket, log)\n if history is not None:\n aws_utils.upload_to_s3(history_file, s3_bucket, history_file)\n\ndef load(folder, rng_seed, model, optimizer, s3_bucket=None):\n # Try to download log and ckpt from s3 first to see if a ckpt exists.\n ckpt = os.path.join(folder, \"model.ckpt\")\n history_file = os.path.join(folder, \"history.pkl\")\n history = None\n\n if s3_bucket is not None:\n aws_utils.download_from_s3(ckpt, s3_bucket, ckpt)\n try:\n aws_utils.download_from_s3(history_file, s3_bucket, history_file)\n except:\n logging.info(\"history.pkl not in s3 bucket\")\n\n if os.path.exists(history_file):\n with open(history_file, \"rb\") as f:\n history = pickle.load(f)\n\n checkpoint = torch.load(ckpt)\n\n epochs = checkpoint[\"epochs\"]\n rng_seed.load_states(checkpoint[\"rng_seed\"])\n model.load_state_dict(checkpoint[\"model\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n module = model.module\n params = [\n module.alphas_normal,\n module.alphas_reduce,\n module.betas_normal,\n module.betas_reduce,\n ]\n \n for p, s in zip(params, checkpoint['arch_params']):\n p = s\n\n logging.info(\"Resumed model trained for %d epochs\" % epochs)\n\n return epochs, history\n\n","sub_path":"train_utils.py","file_name":"train_utils.py","file_ext":"py","file_size_in_byte":3500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"279722496","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport time\n \nclass Timer(object):\n def __init__(self, start_time=time.time(), limit=100):\n self.start_time = start_time\n self.limit = limit\n \n def __call__(self, step, mess='', prints=True):\n if prints and (step % self.limit != 0) and (step > 10):\n return\n message = '[%8d][%s] %s' % (step, hms(self.start_time), mess)\n if prints:\n print(message)\n else:\n return message\n \n\ndef hms(start_time):\n t = int(time.time() - start_time)\n m, s = t//60, t % 60\n h, m = m//60, m % 60\n if h > 0:\n return '%2dh%02dm%02ds' % (h, m, s)\n elif m > 0:\n return '%5dm%02ds' % (m, s)\n else:\n return '%8ds' % s","sub_path":"image_generation/utils/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"39621447","text":"import cv2\nimport numpy as np\nimport math\nimport subprocess\nimport sys\nimport random\nimport os\nimport time\n\nSTART_OFFSET = 2 / 3\nCOEFF = 185\n\n\ndef getScreenshot(screenshotPath):\n process = subprocess.Popen(\n 'adb shell screencap -p', shell=True, stdout=subprocess.PIPE)\n screenshot = process.stdout.read()\n if sys.platform == 'win32':\n screenshot = screenshot.replace(b'\\r\\n', b'\\n')\n f = open(screenshotPath, 'wb')\n f.write(screenshot)\n f.close()\n\n\ndef press(time):\n command = 'adb shell input swipe {x1} {y1} {x2} {y2} {duration}'.format(\n x1=random.randint(0, 1080),\n y1=random.randint(0, 1920),\n x2=random.randint(0, 1080),\n y2=random.randint(0, 1920),\n duration=round(time)\n )\n os.system(command)\n\n\ndef getDistance(screenshotPath):\n screenshot = cv2.imread(screenshotPath)\n hsvScreenshot = cv2.cvtColor(screenshot, cv2.COLOR_BGR2HSV)\n\n penguinMask = cv2.inRange(hsvScreenshot, np.array(\n [0, 2, 30]), np.array([175, 35, 65]))\n cv2.imwrite(\"penguin_mask.png\", penguinMask)\n\n possiblePenguinBottomYWithW = []\n tempW = 1\n preAppend = []\n for i, j in enumerate(penguinMask):\n if (j == 255).any():\n if (penguinMask[i - 1] != 255).all():\n preAppend.append(i)\n if (penguinMask[i + 1] != 255).all():\n preAppend = preAppend + [i, tempW]\n possiblePenguinBottomYWithW.append(preAppend)\n tempW = 1\n preAppend = []\n else:\n tempW = tempW + 1\n\n for i in possiblePenguinBottomYWithW:\n print(\"Penguin: Possible Y: Start:\",\n i[0], \"End:\", i[1], \"Weight:\", i[2])\n\n def getW(elem): return elem[-1]\n possiblePenguinBottomYWithW.sort(key=getW, reverse=True)\n penguinBottomY = possiblePenguinBottomYWithW[0][1]\n penguinHeight = penguinBottomY - possiblePenguinBottomYWithW[0][0]\n print(\"Penguin: Selected Y:\", penguinBottomY)\n print(\"Penguin: Height:\", penguinHeight)\n\n possiblePenguinBottomX = np.where(penguinMask[penguinBottomY] == 255)[0]\n for i in possiblePenguinBottomX:\n print(\"Penguin: Possible X:\", i)\n\n penguinBottomX = possiblePenguinBottomX[round(\n len(possiblePenguinBottomX)/2)]\n print(\"Penguin: Selected X:\", penguinBottomX)\n\n startCenterX = penguinBottomX\n startCenterY = penguinBottomY - round(penguinHeight / 15)\n\n print(\"Start Center:\", startCenterX, startCenterY)\n\n endMask = cv2.inRange(hsvScreenshot, np.array(\n [0, 180, 255]), np.array([1, 195, 255]))\n cv2.imwrite(\"end_mask.png\", endMask)\n\n possibleEndBottomYWithW = []\n tempW = 1\n for i, j in enumerate(endMask):\n if (j == 255).any():\n if (endMask[i + 1] != 255).all():\n possibleEndBottomYWithW.append([i, tempW])\n tempW = 1\n else:\n tempW = tempW + 1\n\n for i in possibleEndBottomYWithW:\n print(\"End: Possible Y:\", i[0], \"Weight:\", i[1])\n\n possibleEndBottomYWithW.sort(key=getW, reverse=True)\n for i, j in enumerate(possibleEndBottomYWithW):\n try:\n if abs(j[1] - possibleEndBottomYWithW[i + 1][1]) < 3:\n if j[0] < possibleEndBottomYWithW[i + 1][0]:\n possibleEndBottomYWithW.pop(i + 1)\n else:\n possibleEndBottomYWithW.pop(i)\n except:\n continue\n\n endBottomY = possibleEndBottomYWithW[0][0]\n print(\"End: Selected Y:\", endBottomY)\n\n possibleEndBottomX = np.where(endMask[endBottomY] == 255)[0]\n for i in possibleEndBottomX:\n print(\"End: Possible X:\", i)\n\n endBottomX = possibleEndBottomX[round(\n len(possibleEndBottomX)/2)]\n print(\"End: Selected X:\", endBottomX)\n\n endCenterX = endBottomX\n endCenterY = endBottomY - round(penguinHeight * 1 / 8)\n\n print(\"End Center:\", endCenterX, endCenterY)\n\n distance = math.sqrt((endCenterX - startCenterX)\n ** 2 + (endCenterY - startCenterY) ** 2) / penguinHeight\n print(\"Distance:\", distance)\n\n return distance\n\n\ndef main():\n while True:\n input()\n getScreenshot(\"screenshot.png\")\n press((getDistance(\"screenshot.png\") - START_OFFSET) * COEFF)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"82457884","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_regression\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom keras.models import Sequential\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom keras.optimizers import SGD, Adam\nfrom keras.constraints import maxnorm\nfrom keras.layers import Dense, Conv2D, MaxPooling2D\nfrom ast import literal_eval\nimport my_functions\nfrom operator import add\nfrom itertools import chain\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import KFold\nnp.set_printoptions(threshold=np.inf)\n\ndf = pd.read_csv('Data/residual_dataset_new.csv', header=0)\ndf.reset_index(drop=True,inplace=True)\n\n# Model configuration\nbatch_size = 5\nloss_function = 'mean_squared_error'\nno_epochs = 100\noptimizer = Adam(lr=0.001)\nids = []\ntesting_pers =[]\ntesting_lables = []\ntesting_post_MM = []\ntesting_post_true = [] \ntesting_id = []\nevaluating_pers = []\nevaluating_lables = []\nevaluating_post_MM = []\nevaluating_post_true = []\nevaluating_id = []\n\ndef get_data():\n\n training_set = []\n lables = []\n post_ts_MM = []\n post_ts_true = []\n pre_ts_true = []\n\n for index, row in df.iterrows():\n if sum(literal_eval(df.true_post_finger_pressure_cycle[index])) == 0:\n continue\n if (df.roottable_case_id_text.values[index]) == 217:\n testing_pers.append([df.roottable_age_value.values[index], df.roottable_sex_item.values[index], df.clinical_visits_body_mass_index_value.values[index], df.clinical_visits_cpet_vo2max_value.values[index],\n df.clinical_visits_pre_24h_dbp_mean_value.values[index], df.clinical_visits_pre_24h_sbp_mean_value.values[index], df.exercise_value.values[index]])\n testing_lables.append(literal_eval(df.estimate_error[index]))\n testing_post_MM.append(literal_eval(df.mm_post_finger_pressure_cycle[index]))\n testing_post_true.append(literal_eval(df.true_post_finger_pressure_cycle[index]))\n testing_id.append(df.roottable_case_id_text.values[index])\n elif (df.roottable_case_id_text.values[index]) == 67:\n evaluating_pers.append([df.roottable_age_value.values[index], df.roottable_sex_item.values[index], df.clinical_visits_body_mass_index_value.values[index], df.clinical_visits_cpet_vo2max_value.values[index],\n df.clinical_visits_pre_24h_dbp_mean_value.values[index], df.clinical_visits_pre_24h_sbp_mean_value.values[index], df.exercise_value.values[index]])\n evaluating_lables.append(literal_eval(df.estimate_error[index]))\n evaluating_post_MM.append(literal_eval(df.mm_post_finger_pressure_cycle[index]))\n evaluating_post_true.append(literal_eval(df.true_post_finger_pressure_cycle[index]))\n evaluating_id.append(df.roottable_case_id_text.values[index])\n else:\n training_set.append([df.roottable_age_value.values[index], df.roottable_sex_item.values[index], df.clinical_visits_body_mass_index_value.values[index], df.clinical_visits_cpet_vo2max_value.values[index],\n df.clinical_visits_pre_24h_dbp_mean_value.values[index], df.clinical_visits_pre_24h_sbp_mean_value.values[index], df.exercise_value.values[index]])\n lables.append(literal_eval(df.estimate_error[index]))\n post_ts_MM.append(literal_eval(df.mm_post_finger_pressure_cycle[index]))\n post_ts_true.append(literal_eval(df.true_post_finger_pressure_cycle[index]))\n pre_ts_true.append(literal_eval(df.pre_finger_pressure_cycle[index]))\n ids.append(df.roottable_case_id_text.values[index])\n\n x_values = pd.DataFrame(training_set).values\n y_all = lables \n\n ts = [] \n ts.extend(pre_ts_true)\n ts.extend(post_ts_true)\n ts = np.array(ts)\n mean_ts = my_functions.make_mean_vector(ts)\n ts_wo_mean = my_functions.subtract_mean_from_post_ts_data(ts, mean_ts)\n mean_vector_pre = my_functions.make_mean_vector(pre_ts_true)\n pre_ts_wo_mean = my_functions.subtract_mean_from_post_ts_data(pre_ts_true, mean_vector_pre)\n\n # PCA\n pca_model = PCA(0.95)\n pca_model.fit(ts_wo_mean)\n loadings = pca_model.transform(pre_ts_wo_mean) # number of components: 4\n\n min_max_scaler = MinMaxScaler()\n x_norm = min_max_scaler.fit_transform(x_values)\n y_all = np.array(y_all)\n\n train_x = np.array(x_norm)\n train_y = np.array(y_all)\n\n return train_x, train_y, post_ts_MM, post_ts_true, min_max_scaler \ntrain_x, train_y, post_ts_MM, post_ts_true, min_max_scaler = get_data()\nprint('Predicting for person with trial ID: ', evaluating_id[0])\n\ndef create_model():\n ### FUNCTIONAL API MODEL ###\n inputs = keras.Input(shape=(7,))\n dense1 = layers.Dense(50, activation=\"relu\")(inputs)\n dense2 = layers.Dense(100, activation=\"relu\")(dense1)\n dense3 = layers.Dense(150, activation=\"relu\")(dense2)\n outputs = layers.Dense(100)(dense3)\n model = keras.Model(inputs=inputs, outputs=outputs)\n\n model.compile(loss=loss_function, optimizer=optimizer)\n\n return model\n# model = create_model()\n\ndef fit_model(model):\n history = model.fit(train_x, train_y, epochs=no_epochs, batch_size=batch_size, verbose=0)\n\n # Evaluate the model\n # train_mse = model.evaluate(train_x, train_y, verbose=0)\n # test_mse = model.evaluate(test_x, test_y, verbose=0)\n # print('Train loss: %.3f, Test: %.3f' % (train_mse, test_mse))\n\n return model\n# model = fit_model()\n\ndef create_model_saved_weights():\n inputs = keras.Input(shape=(7,))\n dense1 = layers.Dense(50, activation=\"relu\")(inputs)\n dense2 = layers.Dense(100, activation=\"relu\")(dense1)\n dense3 = layers.Dense(150, activation=\"relu\")(dense2)\n outputs = layers.Dense(100)(dense3)\n model = keras.Model(inputs=inputs, outputs=outputs)\n\n model.compile(loss=loss_function, optimizer=optimizer)\n\n model.load_weights(\"Data/residual_weights_real_pat_\" + str(evaluating_id[0]))\n\n # model.summary()\n\n return model\n\n## TESTING ##\ndef predict(predict_pers_x, predict_pers_y, model, post_ts_MM, post_ts_true, i):\n predict_pers_xx = predict_pers_x.reshape(predict_pers_x.shape[0], 1)\n prediction = model.predict(predict_pers_xx.T)\n \n adding_prediction = [b - a for a, b in zip(prediction[0], post_ts_MM[0])]\n\n if i == 1:\n plt.plot(prediction[0], 'g')\n plt.plot(predict_pers_y[0], 'b')\n plt.show()\n\n plt.plot(adding_prediction, color='darkorange', label = 'Prediction')\n plt.plot(post_ts_true[0], color='midnightblue', label= 'True curve')\n plt.plot(post_ts_MM[0], color='crimson', label='Mechanistic model estimate')\n plt.legend()\n plt.title('Residual model with real data')\n plt.xlabel('Time points [-]')\n plt.ylabel('Blood pressure [mmHg]')\n plt.gcf().set_dpi(200)\n plt.show()\n\n # Calculate erorrs\n dbp = min(adding_prediction)\n sbp = max(adding_prediction)\n pp = sbp-dbp\n MAP = np.mean(adding_prediction)\n dbp_true = min(post_ts_true[0])\n sbp_true = max(post_ts_true[0])\n pp_true = sbp_true-dbp_true\n MAP_true = np.mean(post_ts_true[0])\n\n point_error = abs(prediction[0]-predict_pers_y[0])\n total_cycle_error = sum(abs(prediction[0]-predict_pers_y[0]))\n dbp_error = abs(dbp-dbp_true)\n sbp_error = abs(sbp-sbp_true)\n pp_error = abs(pp-pp_true)\n MAP_error = abs(MAP-MAP_true)\n\n print('Point error = ', np.mean(point_error), np.std(point_error))\n print('DBP error = ', np.mean(dbp_error), np.std(dbp_error))\n print('SBP error = ', np.mean(sbp_error), np.std(sbp_error))\n print('PP error = ', np.mean(pp_error), np.std(pp_error))\n print('MAP error = ', np.mean(MAP_error), np.std(MAP_error))\n print('Total error = ', np.mean(total_cycle_error), np.std(total_cycle_error))\n\n return np.mean(total_cycle_error)\n# error = predict()\n\n\n## RUN SEVERAL TIMES AND SAVE THE BEST MODEL ##\n'''\nbest_error = 4000\nevaluating_pers = np.array(evaluating_pers) \nevaluating_pers = min_max_scaler.transform(evaluating_pers)\nfor i in range(30):\n model_eval = create_model()\n model_fitted = fit_model(model_eval)\n error = predict(evaluating_pers.T, evaluating_lables, model_fitted, evaluating_post_MM, evaluating_post_true, 0)\n print('Iteration: ', i, ' with error: ', error)\n if error < best_error:\n print('New best error on number ', i,'. Error = ', error)\n model_fitted.save_weights('Data/residual_weights_real_pat_' + str(evaluating_id[0]))\n best_error = error\nprint('The best achieved error for ', evaluating_id[0], ' was: ', best_error)\n'''\n\n## EVALUATE ON THE PERSON LEFT OUT ##\ntesting_pers = np.array(testing_pers) \ntesting_pers = min_max_scaler.transform(testing_pers)\ntesting_lables = np.array(testing_lables)\nmodel_test = create_model_saved_weights()\nprint('Testing on person with trial ID: ', testing_id[0])\nprint(testing_pers.T)\nerror1 = predict(testing_pers.T, testing_lables, model_test, testing_post_MM, testing_post_true, 1)\n","sub_path":"Models/residual_real_model.py","file_name":"residual_real_model.py","file_ext":"py","file_size_in_byte":9037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"475669873","text":"# coding:utf8\n\nimport re\nfrom os import path\nfrom util.utils import get_string\nfrom util.log import warn\nfrom traceback import format_exc\nfrom util.redis_cache import get_cache, set_cache, del_cache\nimport os\nfrom settings_cms import REDIS_CONFIG, COOKIE_KEY, BASE_DIR, USE_CUSTOM_SETTING\n\n_base_path = path.join(BASE_DIR, 'ui', USE_CUSTOM_SETTING)\nimport shutil\nfrom settings_cms import USER_FILES_PATH, STATIC_FILES_PATH\nimport json\nfrom distutils import dir_util\nfrom util.config_utils import *\nfrom datetime import datetime\n\n\ndef get_abs_file_path_and_new_file_path(file_path):\n file_path = '/'.join(file_path.split('/')[1:])\n file_name = file_path.split('/')[-1].split('.')[0]\n file_ext = file_path.split('/')[-1].split('.')[1]\n new_file_name = file_name + '_old.' + file_ext\n f_list = file_path.split('/')[:-1]\n f_list.append(new_file_name)\n new_file_path = '/'.join(f_list)\n return path.join(_base_path, file_path), path.join(_base_path, new_file_path)\n\n\ndef parse_filename(file_path):\n basename = os.path.basename(file_path)\n filename, file_extension = os.path.splitext(basename)\n dirname = os.path.dirname(file_path)\n return dirname, filename, file_extension\n\n\ndef backup_file(file_path):\n basename = os.path.basename(file_path)\n dirname = os.path.dirname(file_path)\n filename, file_extension = os.path.splitext(basename)\n old_filename = ''.join([filename + \"_old\", file_extension])\n old_path = os.path.join(dirname, old_filename)\n if os.path.exists(file_path):\n shutil.copy(file_path, old_path)\n return True\n\n\ndef get_file_list(user, folder):\n res = {'success': False, 'data': '', 'message': '操作失败', 'user': user}\n if not user or not folder:\n return res\n folder_path = os.path.join(USER_FILES_PATH, user, folder)\n _files = os.listdir(folder_path)\n files = [os.path.join('/static', folder, x) for x in _files if \"_old.\" not in x]\n res.update({'data': files})\n res = {'success': True, 'data': '', 'message': '操作成功', 'user': user}\n return json.dumps(res)\n\n\ndef copy_static_files(user, source_path):\n tmp_path = source_path.replace(STATIC_FILES_PATH, '')\n path_suffix = os.path.join(*[x for x in tmp_path.split('/') if x])\n path_prefix = os.path.join(USER_FILES_PATH, user)\n target_path = os.path.join(path_prefix, path_suffix)\n if not os.path.exists(source_path):\n return False\n folder = os.path.dirname(target_path)\n if not os.path.exists(folder):\n os.makedirs(folder)\n shutil.copy(source_path, target_path)\n return True\n\n\ndef concate_path(prefix, suffix, junction=None):\n if not junction:\n junction = [x for x in prefix.split(os.sep) if x][-1]\n suffix_list = [x for x in suffix.split(os.sep) if x]\n idx = 0\n if junction in suffix_list:\n idx = suffix_list.index(junction) + 1\n _suffix = os.path.join(*suffix_list[idx:])\n absolute_path = os.path.join(prefix, _suffix)\n return absolute_path\n\n\ndef map_to_user_folder(user, source_path):\n \"\"\"\n 公共的模板路径对应至用户文件夹\n :param user:\n :param source_path:\n :return:\n \"\"\"\n _suffix = os.path.join(*source_path.replace(STATIC_FILES_PATH, '').split(os.sep))\n target_path = os.path.join(USER_FILES_PATH, user, _suffix)\n return target_path\n\n\ndef parse_category(absolute_path, identifier=\"web/html\", is_html=True):\n splits = os.path.join(*[x for x in absolute_path.split(identifier)[-1].split(os.sep) if x])\n if is_html:\n catetory = os.path.dirname(splits)\n else:\n catetory = os.path.join(*splits.rsplit('.', 1)[:-1])\n return catetory\n\n\ndef get_links(path):\n \"\"\"\n 获取指定文件夹下,文件的访问链接\n :param path:\n :return:\n \"\"\"\n static_files = ['html', 'css', 'js', 'images', 'web', 'app']\n absolute_path_list = []\n urls = []\n for (dir_path, a, files) in os.walk(path):\n if files:\n absolute_path_list.extend([os.path.join(dir_path, x) for x in files])\n for apl in absolute_path_list:\n if apl.startswith(USER_FILES_PATH):\n apl = apl.replace(USER_FILES_PATH, '')\n apl_list = [x for x in apl.split('/') if x][1:]\n else:\n apl = apl.replace(STATIC_FILES_PATH, '')\n apl_list = [x for x in apl.split('/') if x]\n _url = os.path.join('/static', *apl_list)\n if _url.endswith('html'):\n urls.append(_url)\n return urls\n\n\ndef get_absolute_path_by_url(_url, user=None):\n \"\"\"\n 根据模板的链接获取模板所在的文件路径\n :param _url:\n :param user:\n :return:\n \"\"\"\n if user:\n path_prefix = os.path.join(USER_FILES_PATH, user)\n else:\n path_prefix = STATIC_FILES_PATH\n if _url.startswith('/static/') or _url.startswith('static/'):\n path_suffix = os.path.join(*[x for x in _url.split('/') if x][1:])\n path = os.path.join(path_prefix, path_suffix)\n if not os.path.exists(path):\n path = os.path.join(STATIC_FILES_PATH, path_suffix)\n if not os.path.exists(path):\n return ''\n return path\n\n\ndef related_static_files(html_path):\n pass\n\n\ndef copy_templates_by_config(user, config_file):\n paths = []\n config = get_config(user, config_file)\n if not config or not user:\n return {\"success\": False, \"info\": \"模板不存在\", 'user': user}\n try:\n pages = config['web_module'].keys()\n modules = config['web_module']['index']['body']\n for ms in modules:\n link = ms.values()[0]['url']\n absolute_path = get_absolute_path_by_url(link)\n if os.path.exists(absolute_path):\n paths.append(absolute_path)\n target_path = map_to_user_folder(user, absolute_path)\n copy_with_create_dir(absolute_path, target_path) # 复制模板\n module_file_name = os.path.splitext(os.path.basename(absolute_path))[0]\n\n # 复制图片\n images_folder = os.path.join(STATIC_FILES_PATH, 'images', parse_category(absolute_path),\n module_file_name)\n if os.path.exists(images_folder):\n copy_folder(images_folder, map_to_user_folder(user, images_folder))\n # 复制css文件\n css_folder = os.path.join(STATIC_FILES_PATH, 'css')\n if os.path.exists(css_folder):\n copy_folder(css_folder, map_to_user_folder(user, css_folder))\n # 复制js文件\n js_folder = os.path.join(STATIC_FILES_PATH, 'js')\n if os.path.exists(js_folder):\n copy_folder(js_folder, map_to_user_folder(user, js_folder))\n res = {\"success\": True, \"message\": \"操作成功\"}\n except Exception as e:\n res = {\"success\": False, \"message\": \"操作失败\", \"info\": str(e)}\n return res\n\n\n\"\"\"\ncopy\n\"\"\"\n\n\ndef copy_with_create_dir(source, target):\n if not os.path.exists(source):\n return False\n\n folder = os.path.dirname(target)\n if not os.path.exists(folder):\n os.makedirs(folder)\n shutil.copy(source, target)\n return True\n\n\ndef copy_by_paths(paths, to_dir):\n try:\n if not os.path.exists(to_dir):\n os.makedirs(to_dir)\n for p in paths:\n if os.path.exists(p):\n shutil.copy(p, to_dir)\n return True\n except Exception as e:\n return False\n\n\ndef copy_folder(source, target):\n if not os.path.exists(source):\n return False\n dir_util.copy_tree(source, target)\n return True\n\n\ndef copy_by_template(client, token, user_dir):\n client = client if client else 'web'\n now = datetime.now().strftime('%Y%m%d%H%M%S')\n old_project_path = os.path.join(user_dir, client, 'project')\n new_project_path = os.path.join(user_dir, client, 'project-' + now)\n if os.path.exists(old_project_path):\n os.rename(old_project_path, new_project_path)\n dir_util._path_created = {}\n images_dir = os.path.join(STATIC_FILES_PATH, 'images', 'templates', token)\n images_target = os.path.join(user_dir, client, 'project', 'static', 'images', token)\n js_dir = os.path.join(STATIC_FILES_PATH, 'js/lib')\n user_images_dir = os.path.join(user_dir, 'images')\n if os.path.exists(user_images_dir):\n user_images_folder = os.path.join(user_dir, client, 'project', 'static', 'images')\n copy_folder(user_images_dir, user_images_folder)\n if os.path.exists(images_dir):\n copy_folder(images_dir, images_target)\n if os.path.exists(js_dir):\n copy_folder(js_dir, os.path.join(user_dir, client, 'project/static/js/lib'))\n\n\ndef read_static_file(user, static_url):\n static_path_list = [x for x in static_url.split(os.sep) if x]\n if static_path_list[0] == 'static':\n static_path_list.pop(0)\n _static_path = os.path.join(*static_path_list)\n user_absolute_path = os.path.join(USER_FILES_PATH, _static_path)\n content = \"\"\n if os.path.exists(user_absolute_path):\n content = open(user_absolute_path, 'r').read()\n return content\n absolute_path = os.path.join(STATIC_FILES_PATH, _static_path)\n if os.path.exists(absolute_path):\n content = open(absolute_path, 'r').read()\n return content\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"util/file_utils.py","file_name":"file_utils.py","file_ext":"py","file_size_in_byte":9342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"382000134","text":"import numpy as np\nimport tensorflow as tf\nimport tensorlayer as tl\n\nsess = tf.InteractiveSession()\n\nfeat_train, lab_train, feat_valid, lab_valid, feat_test, lab_test = tl.files.load_mnist_dataset(shape=(-1,784))\n\ninput_feature = tf.placeholder(tf.float32, [None,784] , name='input_feature')\ninput_label = tf.placeholder(tf.int64, [None,], name='input_label')\n\n# print(type(input_feature)) # \n# print(input_feature._shape) # (?, 784)\n\nnetwork = tl.layers.InputLayer(inputs = input_feature, name ='input_layer')\n#print(network.all_layers) # []\n#print(network.all_params) # []\n#print(network.all_drop) # {}\n\n#print(type(network)) # \n\nnetwork = tl.layers.DropoutLayer(network, keep=0.8, name='dropout_layer_1')\n#print(network.all_layers) # []\n#print(network.all_params) # []\n#print(network.all_drop) # { dtype=float32>: 0.8}\n\nnetwork = tl.layers.DenseLayer(network, n_units=800,act = tf.nn.relu, name='dense_relu_1')\n#print(network.all_layers) # [, ]\n#print(network.all_params) # [, ]\n#print(network.all_drop) # { dtype=float32>: 0.8}\n\nnetwork = tl.layers.DropoutLayer(network, keep=0.5, name='dropout_layer_2')\n#print(type(network)) # \n\nnetwork = tl.layers.DenseLayer(network, n_units=800,act = tf.nn.relu, name='dense_relu_2')\n#print(type(network)) # \n\nnetwork = tl.layers.DropoutLayer(network, keep=0.5, name='dropout_layer_3')\n#print(type(network)) # \n\nnetwork = tl.layers.DenseLayer(network, n_units=10, act = tf.identity, name='output_layer')\n#print(type(network)) # \n\npredict_label = network.outputs\n#print(type(predict_label)) # \n\n#print(predict_label._shape) # (?, 10)\n#print(input_label._shape) # (?,)\ncost = tl.cost.cross_entropy(predict_label, input_label) \n#print(type(cost)) # \n\ntrain_params = network.all_params\n#print(train_params)#[, , , , , ]\noptimizer = tf.train.AdamOptimizer(0.0001).minimize(cost, var_list=train_params)\n\ncorrect = tf.equal(tf.argmax(predict_label, 1), input_label)\naccuracy = tf.reduce_mean(tf.cast(correct, tf.float32))\n\nsess.run(tf.initialize_all_variables())\n\nnetwork.print_params()\nnetwork.print_layers()\n\ntl.utils.fit(sess, network, optimizer, cost, feat_train, lab_train, input_feature, input_label,\n acc=accuracy, batch_size=500, n_epoch=500, print_freq=5,\n X_val=feat_valid, y_val=lab_valid, eval_train=True)\n\ntl.utils.test(sess, network, accuracy, feat_test, lab_test, input_feature, input_label, batch_size=None, cost=cost)\n\ntl.files.save_npz(network.all_params , name='model.npz')\n\nsess.close()","sub_path":"Basic/mnist_simple.py","file_name":"mnist_simple.py","file_ext":"py","file_size_in_byte":3616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"543778538","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('vcard', '0014_project_description'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='projecttag',\n name='alias',\n ),\n migrations.AddField(\n model_name='projecttag',\n name='url_name',\n field=models.SlugField(default='some-url-name', max_length=255),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='project',\n name='url_name',\n field=models.SlugField(max_length=255, unique=True),\n ),\n ]\n","sub_path":"vcard/migrations/0015_auto_20160618_0816.py","file_name":"0015_auto_20160618_0816.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"341304278","text":"# -*- mode: python ; coding: utf-8 -*-\n\n# Build empress as macOS app bundle\n\nblock_cipher = None\n\n\na = Analysis(['../empress_gui.py'],\n pathex=['pyinstaller_spec'],\n binaries=[],\n datas=[(\"../assets\", \"./assets\")],\n # pkg_resources.py2_warn hidden import needed if setuptools>=45.0.0\n # https://github.com/pypa/setuptools/issues/1963#issuecomment-574265532\n hiddenimports=['pkg_resources.py2_warn'],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n [],\n exclude_binaries=True,\n name='empress_gui',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n console=False )\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=False,\n upx=True,\n upx_exclude=[],\n name='empress')\napp = BUNDLE(coll,\n name='empress.app',\n icon=None,\n bundle_identifier=None,\n info_plist={\n 'NSPrincipalClass': 'NSApplication', # Enable retina display\n 'CFBundleName': 'Empress DTL Computational Biology Tool', # Enable Siri\n }\n )\n","sub_path":"pyinstaller_spec/empress_gui_app.spec","file_name":"empress_gui_app.spec","file_ext":"spec","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"528747563","text":"import configs.titanic as cfg\nfrom utils.utils import *\nfrom utils.data_loading import load_processed_dataset\nimport os\nfrom CSDGAN.classes.tabular.TabularCGAN import TabularCGAN\nfrom CSDGAN.classes.tabular.TabularDataset import TabularDataset\nfrom torch.utils import data\nimport pickle as pkl\nimport random\n\n# Set random seem for reproducibility\nprint(\"Random Seed: \", cfg.MANUAL_SEED)\nrandom.seed(cfg.MANUAL_SEED)\ntorch.manual_seed(cfg.MANUAL_SEED)\n\n# Ensure directory exists for outputs\nexp_path = os.path.join(\"experiments\", cfg.EXPERIMENT_NAME)\nos.makedirs(exp_path, exist_ok=True)\n\n# Import data\ntitanic = load_processed_dataset('titanic')\n\n# Automatically determine these parameters and complete preprocessing\ndevice = torch.device(\"cuda:0\" if (torch.cuda.is_available()) else \"cpu\")\n\n# Instantiate data set and generator\ndataset = TabularDataset(df=titanic,\n dep_var=cfg.DEP_VAR,\n cont_inputs=cfg.CONT_INPUTS,\n int_inputs=cfg.INT_INPUTS,\n test_size=cfg.TEST_SIZE,\n seed=cfg.MANUAL_SEED)\ndataset.to_dev(device)\ndata_gen = data.DataLoader(dataset, **cfg.TRAINING_PARAMS)\n\n# Define GAN\nCGAN = TabularCGAN(data_gen=data_gen,\n device=device,\n path=exp_path,\n seed=cfg.MANUAL_SEED,\n eval_param_grid=cfg.EVAL_PARAM_GRID,\n eval_folds=cfg.EVAL_FOLDS,\n test_ranges=cfg.TEST_RANGES,\n eval_stratify=dataset.eval_stratify,\n **cfg.CGAN_INIT_PARAMS)\n\n# Eval on real data\nscore_real = train_test_logistic_reg(x_train=dataset.x_train.numpy(),\n y_train=dataset.y_train.numpy(),\n x_test=dataset.x_test.numpy(),\n y_test=dataset.y_test.numpy(),\n param_grid=cfg.EVAL_PARAM_GRID,\n cv=cfg.EVAL_FOLDS,\n random_state=cfg.MANUAL_SEED,\n labels_list=dataset.labels_list,\n verbose=True)\n\n# Train GAN\nCGAN.train_gan(num_epochs=cfg.NUM_EPOCHS, cadence=cfg.CADENCE, print_freq=cfg.PRINT_FREQ, eval_freq=cfg.EVAL_FREQ)\n\n# Load best-performing GAN\nCGAN.load_netG(best=True)\n\n# Fit another model to double-check results\nCGAN.test_model(stratify=CGAN.eval_stratify)\n\n# Save GAN\nwith open(os.path.join(exp_path, \"CGAN.pkl\"), 'wb') as f:\n pkl.dump(CGAN, f)\n\n# Visualizations\nCGAN.plot_progress(benchmark_acc=score_real, show=True, save=exp_path)\nCGAN.plot_training_plots(show=True, save=exp_path)\nCGAN.netG.plot_layer_scatters(title=\"Generator\", show=True, save=exp_path)\nCGAN.netD.plot_layer_scatters(title=\"Discriminator\", show=True, save=exp_path)\nCGAN.netG.plot_layer_hists(title=\"Generator\", show=True, save=exp_path)\nCGAN.netD.plot_layer_hists(title=\"Discriminator\", show=True, save=exp_path)\n\ngenned_df = CGAN.gen_data(size=cfg.TEST_RANGES[3], stratify=dataset.eval_stratify)\nplot_scatter_matrix(df=genned_df, cont_inputs=cfg.CONT_INPUTS, title=\"Fake Data\", scaler=None, show=True, save=exp_path)\nplot_scatter_matrix(df=titanic, cont_inputs=cfg.CONT_INPUTS, title=\"Real Data\", scaler=None, show=True, save=exp_path)\n\ncompare_cats(real_df=titanic, fake_df=genned_df, x='Sex', y='Survived', hue='Pclass', show=True, save=exp_path)\n\nplot_conditional_scatter(col1='sepal_len',\n col2='sepal_wid',\n real_df=titanic,\n fake_df=genned_df,\n dep_var=cfg.DEP_VAR,\n cont_inputs=cfg.CONT_INPUTS,\n labels_list=dataset.labels_list,\n scaler=None,\n alpha=0.25,\n show=True,\n save=exp_path)\n\nplot_conditional_density(col='petal_len',\n real_df=titanic,\n fake_df=genned_df,\n dep_var=cfg.DEP_VAR,\n cont_inputs=cfg.CONT_INPUTS,\n labels_list=dataset.labels_list,\n scaler=None,\n show=True,\n save=exp_path)\n","sub_path":"notebooks/prototypes/titanic/titanic_v2.py","file_name":"titanic_v2.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"295740262","text":"import lab7a\n\ndb = [[['författare', ['john', 'zelle']],\n ['titel', ['python', 'programming', 'an', 'introduction', 'to',\n 'computer', 'science']],\n ['år', 2010],\n ['genre', \"programmering\"]],\n [['författare', ['armen', 'asratian']],\n ['titel', ['diskret', 'matematik']],\n ['år', 2012],\n ['genre', \"matematik\"]],\n [['författare', ['j', 'glenn', 'brookshear']],\n ['titel', ['computer', 'science', 'an', 'overview']],\n ['år', 2011],\n ['genre', \"matematik\"]],\n [['författare', ['john', 'zelle']],\n ['titel', ['data', 'structures', 'and', 'algorithms', 'using', 'python',\n 'and', 'c++']],\n ['år', 2009],\n ['genre', \"programmering\"]],\n [['författare', ['anders', 'haraldsson']],\n ['titel', ['programmering', 'i', 'lisp']],\n ['år', 1993],\n ['genre', \"programmering\"]]]\n\n\ndef test():\n expected_results = [[db[0], db[3]], [db[0], db[2]], []]\n\n result = lab7a.search(['författare', ['john', '&']], db)\n assert result == expected_results[0]\n\n result = lab7a.search(['titel', ['--', 'an', '--']], db)\n assert result == expected_results[1]\n\n result = lab7a.search(['år', 2007], db)\n assert result == expected_results[2]\n\n result = lab7a.search([['författare', ['&', '&']], ['titel',\n ['--', 'python', '--']], ['år', \"&\"],\n ['genre', \"programmering\"]], db)\n assert result == expected_results[0]\n\n result = lab7a.search([['författare', ['&', 'zelle']], ['titel',\n ['--', 'python', '--']], ['genre', \"svenska\"]], db)\n assert result == expected_results[2]\n\n result = lab7a.search([], db)\n assert result == expected_results[2]\n\n result = lab7a.search(['genre', \"&\"], db)\n assert result == db\n print(\"Passed all tests\")\n\n\ntest()\n","sub_path":"lab7/testa.py","file_name":"testa.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"475264135","text":"import pandas as pd\nfrom sklearn import ensemble, preprocessing\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import MiniBatchKMeans, KMeans\n\n\ndata = pd.read_csv('drivers_50000.csv')\n\ndata.at[data['Accidents'] > 0, 'AccidentsBin'] = 1\ndata.at[data['Accidents'] == 0, 'AccidentsBin'] = 0\n\nXX = data[\n ['Age', 'Experience', 'PreviousAccidents', 'RouteDistance', 'Distance', 'HomeLat', 'HomeLng', 'WorkLat', 'WorkLng']]\ny = data['AccidentsBin']\nX = preprocessing.scale(XX)\nfeature_names = XX.columns\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=11)\n\ndef show_feature_importances(X, y,feature_names):\n\n rf = ensemble.RandomForestClassifier(random_state=11)\n param_grid = {'n_estimators':[55,75,100],'criterion':[\"entropy\",\"gini\"]}\n grid = GridSearchCV(estimator=rf, param_grid=param_grid, scoring='neg_log_loss')\n grid.fit(X,y)\n best=grid.best_estimator_\n importances = best.feature_importances_\n indices = np.argsort(importances)[::-1]\n\n\n d_first = len(feature_names)\n plt.figure(figsize=(8, 8))\n plt.title(\"Feature importances\")\n plt.bar(range(d_first), importances[indices[:d_first]], align='center')\n plt.xticks(range(d_first), np.array(feature_names)[indices[:d_first]], rotation=90)\n plt.xlim([-1, d_first]);\n plt.show()\n\nshow_feature_importances(X_train,y_train,feature_names)\n\n\ndef cluster_locations(data_input,n_clusters=10):\n homeLoc = data[['HomeLat', 'HomeLng']].values\n workLoc = data[['WorkLat','WorkLng']].values\n k_means_home = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)\n k_means_home.fit(homeLoc)\n k_means_work = KMeans(init='k-means++', n_clusters=n_clusters, n_init=10)\n k_means_work.fit(workLoc)\n data_input = data_input.assign(work=pd.Series(k_means_work.labels_, dtype=\"category\"))\n data_input = data_input.assign(home=pd.Series(k_means_home.labels_, dtype=\"category\"))\n data_input = data_input.drop(['WorkLat', 'WorkLng', 'HomeLat', 'HomeLng'], axis=1)\n data_input = data_input.dropna()\n home_work_vector = pd.get_dummies(data_input[['home', 'work']])\n data_input = data_input.drop(['home','work'],axis=1)\n data_input= pd.concat([data_input, home_work_vector], axis=1)\n return data_input\n\ndata=cluster_locations(data)\nprint(data.info())\nXX = data.drop(['AccidentsBin','Accidents','Skill','RushFactor'],axis=1)\ny = data['AccidentsBin']\nX = preprocessing.scale(XX)\nfeature_names = XX.columns\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=11)\n\nshow_feature_importances(X_train,y_train,feature_names)","sub_path":"feature_selection.py","file_name":"feature_selection.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"205436766","text":"### pickler.py\n### the pickle module can save objects to a binary file\n### this is called SERIALIZATION.\n### the file can be 'unpickled' to retrieve the object.\n\ndef main():\n\n states = {'FL':'Tallahassee','GA':'Atlanta',\n 'NY':'New York','CA':'Sacramento',\n 'OH':'Columbus','NH':'Concord'}\n\n\n\n \nmain()\n\n\n\n \n","sub_path":"Week 3 Files/dicts 510/pickler.py","file_name":"pickler.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"463484582","text":"#! /usr/bin/env python3\n\n\"\"\"\nThe program check bam or cram from the filename.\n\"\"\"\n\nfrom pathlib import Path\nimport pysam\nimport subprocess\n\n\ndef juncmut_supportread_count(input_file, output_file, bam_file, reference):\n\n\n def check_read(read):\n\n check_flag = True \n\n # get the flag information\n flags = format(int(read.flag), \"#014b\")[:1:-1]\n\n # skip unmapped read \n if flags[2] == \"1\" or flags[3] == \"1\": check_flag = False\n \n # skip supplementary alignmentx\n if flags[8] == \"1\" or flags[11] == \"1\": check_flag = False\n\n # skip duplicated reads\n if flags[10] == \"1\": check_flag = False\n\n return(check_flag)\n\n \n def tidy_reads(seq, qualities, read_ids, mut_mut):\n import re\n read_id_list = read_ids.split(',')\n proc = \"\"\n Q = 15\n pos_read_id_list = []\n seq_length = len(seq)\n baseIndex = 0\n # modify seq to 1 base presented by a char.\n while baseIndex < seq_length:\n #A ’>’ or ’<’ for a reference skip.\n # The deleted bases will be presented as ‘*’ in the following lines. \n if seq[baseIndex] == '>' or seq[baseIndex] == '<' or seq[baseIndex] == '*' :\n proc = proc + seq[baseIndex]\n baseIndex += 1 \n #A '^' the end or start of read, following the quality and the base.\n elif seq[baseIndex] == '^':\n proc = proc + seq[baseIndex+2]\n baseIndex += 3\n #A '$' is the last position of read. \n elif seq[baseIndex] == '$':\n baseIndex += 1\n #\\+[0-9]+[bases] or -[0-9]+[bases] means the deletion and the insertion. For example, +2AG means insertion of AG in the forward strand\n elif seq[baseIndex] == '+' or seq[baseIndex] == '-':\n indel_length = re.search(r'\\d+', seq[baseIndex:]).group()\n baseIndex += len(str(indel_length))+int(indel_length)+1 \n else:\n proc = proc + seq[baseIndex]\n baseIndex += 1\n \n # quality and base check. extract id.\n for i in range(0, len(proc),1):\n if proc[i].upper() == mut_mut:\n if (ord(qualities[i])-33) > Q:\n pos_read_id_list.append(read_id_list[i])\n \n return pos_read_id_list\n\n b_path = Path(bam_file)\n\n if b_path.suffix == '.bam':\n bamfile = pysam.AlignmentFile(bam_file, 'rb')\n if b_path.suffix == '.cram':\n bamfile = pysam.AlignmentFile(bam_file, 'rc')\n \n ## start \n hout = open(output_file, 'w') \n header = [\"Mut_key\", \"SJ_key\", \"Sample\", \"SJ_Type\", \"SJ_Strand\", \"SJ_Read_Count\", \"SJ_Depth\", \"SJ_Freq\",\n \"Ref_Motif\", \"Possivle_Alt_Motif\",\"Possible_Alt_key\", \"Is_GT/AG\", \"Is_in_exon\",\"SJ_Overlap_Count\", \n \"Chr\",\"Mut_Pos\", \"Mut_Ref\", \"Mut_Alt\", \"Mut_Count\", \"Mut_Depth\", \"Mut_Freq\",\n \"Realign_No_SJ_Neg\", \"Realign_No_SJ_Pos\", \"Realign_Target_SJ_Neg\", \"Reaglin_Target_SJ_Pos\",\n \"Realign_Normal_SJ_Neg\", \"Realign_Normal_SJ_Pos\",\"Realign_result\",\"support_read_rmdup\",\"RNA_Mut\"]\n print('\\t'.join(header), file = hout)\n # for each row.\n with open(input_file, 'r') as hin:\n next(hin)\n for line in hin:\n lie = line.rstrip('\\n')\n F = line.rstrip('\\n').split('\\t')\n # Is a position of mutation in Exon or Intron\n if F[-1] != \"True\": continue\n #print(lie + \"\\t0\\tFalse\", file = hout) \n else:\n #mpileup\n mut_elm = F[0].split(',')\n mut_chr = mut_elm[0]\n mut_pos = str(mut_elm[1])\n mut_mut = mut_elm[3]\n #samtools mpileup -r chr4:162087015-162087015 -f /Volumes/NIIDA_SSD1R/genome DRR016694.Aligned.sortedByCoord.out.bam\n mpileup_commands = [\"samtools\", \"mpileup\", \"-r\", mut_chr+\":\"+mut_pos+\"-\"+mut_pos, \"-f\", reference, bam_file, \"--output-QNAME\", \"-o\", output_file + \".tmp1.txt\"]\n subprocess.run(mpileup_commands)\n \n # extract read id with mutations.\n pos_read_list = []\n with open(output_file + \".tmp1.txt\", 'r') as tin:\n for line in tin: \n col = line.rstrip('\\n').split('\\t')\n bases = col[4]\n qualities = col[5]\n read_ids = col[6]\n \n reads_with_mut_list = tidy_reads(bases, qualities, read_ids, mut_mut)\n \n for read in bamfile.fetch(region = str(mut_chr) + ':' + str(mut_pos) + '-' + str(mut_pos)):\n if not check_read(read): continue\n else:\n if read.qname in reads_with_mut_list:\n pos_read = str(read.reference_start) + '_' + str(read.reference_end) + '_' + str(read.next_reference_start) \n pos_read_list.append(pos_read)\n support_read_rmdup = len(set(pos_read_list))\n if support_read_rmdup >= 2:\n rna_mut = \"True\"\n print(lie + \"\\t\"+ str(support_read_rmdup) + \"\\t\" + str(rna_mut), file = hout) \n #else: rna_mut = \"False\"\n \n #print(lie + \"\\t\"+ str(support_read_rmdup) + \"\\t\" + str(rna_mut), file = hout) \n \n Path(output_file + \".tmp1.txt\").unlink()\n\n bamfile.close()\n hout.close()\n\n \n\nif __name__ == \"__main__\":\n \n import argparse\n \n parser = argparse.ArgumentParser() #make a parser\n \n parser.add_argument(\"-input_file\", metavar = \"input_file\", default = None, type = str,\n help = \"input file\") \n parser.add_argument(\"-output_file\", metavar = \"output_file\", default = None, type = str,\n help = \"output files\") \n parser.add_argument(\"-bam_file\", metavar = \"bam_file\", default = None, type = str,\n help = \"output files\") \n parser.add_argument(\"-reference\", metavar = \"reference\", default = None, type = str,\n help = \"reference\") \n args = parser.parse_args()\n \n input_file = args.input_file\n output_file = args.output_file\n bam_file = args.bam_file\n reference = args.reference\n \n juncmut_supportread_count(input_file, output_file, bam_file, reference)\n \n\"\"\"\ntidy_bases(bases, qualities)\nbases = \"<<>><>><><><<<<>><<>>>>>>G>>><>>>><<>>>><>><<>>>>>>>>>CCC\"\nqualities =\t\"FFmcFllHDJmJJJJHsIJJiFJI7JmJGJJJk>JJJsJJIFJCDH7FFFDFDFJFF\"\n\"\"\"\n\n\n","sub_path":"juncmut/juncmut_supportread_count.py","file_name":"juncmut_supportread_count.py","file_ext":"py","file_size_in_byte":6763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"125765791","text":"# Reference: http://hexo.tanglei.name/blog/aprioriall-algorithm-in-python.html\n# Reference: https://blog.csdn.net/tszw1007/article/details/77871133\nimport copy\nimport math\nimport re\n\ndef getSubSets(items, remove_origin=False):\n # the power set of the empty set has one element, the empty set\n result = [[]]\n for x in items:\n result.extend([subset + [x] for subset in result])\n if(remove_origin):\n result.pop()\n result.remove([])\n return result\n\nclass Basket():\n items=[]#Apple,orange,....\n def __init__(self,items):\n self.items = items\n def setItems(self,items):\n self.items = items\n def __str__(self):\n mystr='Basket[ '\n for i in self.items:\n mystr = mystr + i +' , '\n mystr += ']'\n return mystr\n \nclass Custom():\n baskets=[]#basket1,basket2\n mapNums=set()#maped num\n def __init__(self,baskets):\n self.baskets = baskets\n def setBaskets(self,baskets):\n self.baskets = baskets\n def setMapedNums(self,mapNums):\n self.mapNums = mapNums\n def __str__(self):\n mystr='Custom[ '\n for i in self.baskets:\n mystr = mystr + i.__str__() +' , '\n mystr += ']'\n return mystr\n def getMapedNums(self):\n return self.mapNums\n \nclass AprioriAll():\n customs=[]\n minSuppCount = 0#count number ,considering the min_supp and the num of transactions\n allBaskets=[]\n transMap={}\n def __init__(self,min_supp=0.4,datafile='aprioriall.txt'):\n inputfile = open(datafile,\"r\")\n self.min_supp = min_supp\n baskets=[]\n self.customs=[]\n for line in inputfile.readlines():\n if(line != \"\\n\"):\n items = re.compile(r\"\\w+\").findall(line)\n basket = Basket(items)\n baskets.append(basket)\n else:\n custom = Custom((baskets))\n self.customs.append(custom)\n baskets=[] \n #add the last custom \n custom = Custom((baskets))\n self.customs.append(custom)\n \n self.minSuppCount = math.ceil(min_supp * len(self.customs))\n \n def sortPhase(self):\n '''sort the transaction db :with customer-id as the major key and \n transaction-time as the minor key. '''\n #has been done in the constructor\n pass\n \n def litemsetPhase(self):\n ''' find all the fequent-itemsets whose support is above the threshold'''\n litemset = []\n items = []\n allBaskets = []\n for custom in self.customs:\n for basket in custom.baskets:\n allBaskets.append(basket)\n for item in basket.items:\n if [item] not in items:\n items.append([item])\n \n items.sort()\n \n #remove who blow the threshold\n candidates=items\n while True:\n temp=[]\n for item1 in candidates:\n count = 0\n for basket in allBaskets:\n set1 = set(item1)\n if set1.issubset(basket.items):\n count += 1\n if count >= self.minSuppCount:\n print(\"Frequent %d-itemset : %s\" %(len(item1),item1))\n temp.append(item1)\n litemset.append(item1)\n \n candidates = self.__genCandidate(temp)\n if len(candidates) == 0 :\n break\n self.allBaskets = allBaskets\n return litemset\n \n def transformationPhase(self,transmap):\n for custom in self.customs:\n mapNums=set()#store the maped numbers of each custom\n for basket in custom.baskets:\n for k in transmap.keys():\n s1 = set(transmap[k])\n s2 = set(basket.items)\n if s1.issubset(s2):\n mapNums.add(k)\n custom.setMapedNums(mapNums) \n \n def sequencePhase(self,mapNums):\n \n item1set = set()#\n for num in mapNums :\n item1set=item1set.union(num)\n \n item1list=list(item1set)\n item1list.sort()\n \n seqresult=[]\n candidates=[]\n for item in item1list:\n candidates.append([item])\n while True:\n for item in candidates:\n count = 0 \n for seq in mapNums:\n s1 = set(item)\n if s1.issubset(seq):\n count += 1\n if count >= self.minSuppCount:\n print(\"Frequent %-itemsets : %s\" %(len(item),item))\n seqresult.append(item) \n candidates = self.__genCandidate(candidates) \n if len(candidates) == 0 :\n break\n return seqresult\n def maxSeq(self,seqs):\n maxSeq=copy.deepcopy(seqs)\n for seq in seqs:\n t_set = set(seq)\n for seq1 in seqs:\n t_set1 = set(seq1)\n if t_set1 != t_set and t_set1.issuperset(t_set):\n maxSeq.remove(seq)\n break\n return self.__map2seq(maxSeq) \n def createTransMap(self,litemset):\n transmap = {}\n value = 1\n for each in litemset:\n transmap[value]=each\n value += 1\n self.transMap = transmap\n return transmap\n \n def __map2seq(self,seqs):\n #transform numseq to original seq\n origSeqs = []\n for seq in seqs:\n origSeq=[]\n for item in seq: \n origSeq.append(self.transMap[item])\n origSeqs.append(origSeq)\n return origSeqs \n def __genCandidate(self,frequentItems): \n #gen new canidate\n length = len(frequentItems) \n result = []#add one item \n for i in range(length):\n for j in range(i+1,length):\n if self.__lastDiff(frequentItems[i],frequentItems[j]):\n item = copy.deepcopy(frequentItems[i])\n item.insert(len(frequentItems[i]),frequentItems[j][len(frequentItems[j])-1])\n if False == self.__has_inFrequentItemsets(frequentItems, item):\n result.append(item)\n return result\n #check if there is none subsets of item in the frequentItems \n def __has_inFrequentItemsets(self,frequentItems,item):\n subs = getSubSets(item,remove_origin=True)\n for each in subs:\n if(each == []):\n continue\n flag=False\n for i in frequentItems:\n if i == each:\n flag=True\n break \n if flag==False:\n return True \n return False #there is at least one subset in the freq-items\n \n def __lastDiff(self,items1,items2):\n if len(items2) != len(items1):#length should be the same\n return False\n if items1 == items2:#if all the same,return false\n return False\n return items1[:-1] == items2[:-1] \n\n\nif __name__ == '__main__':\n aa = AprioriAll(min_supp=0.4,datafile='aprioriall2.txt')\n litemset = aa.litemsetPhase()\n print(\"litemset:\");print(litemset)\n transmap = aa.createTransMap(litemset);\n print(\"transformation map :\");print(transmap)\n aa.transformationPhase(transmap)\n customs = aa.customs\n mapNums = []\n for each in customs:\n mapNums.append(each.getMapedNums())\n seqNums = aa.sequencePhase(mapNums)\n maxSeqs= aa.maxSeq(seqNums)\n print(\"The sequential patterns :\");print(maxSeqs)","sub_path":"anonymous-msweb/aprioriall.py","file_name":"aprioriall.py","file_ext":"py","file_size_in_byte":7736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"493458944","text":"#binary search\ndef search_pos(A, i, key):\n\tlow = 0\n\thigh = i - 1\n\twhile low < high:\n\t\tmid = (low + high)//2\n\t\tif key < A[mid]:\n\t\t\thigh = mid - 1\n\t\telse:\n\t\t\tlow = mid + 1\n\treturn low\n\n#with recursion\ndef search_pos(A, i, key):\n\tlow = 0\n\thigh = i - 1\n\tmid = (low + high)// 2\n\tif key >= A[mid] and key <= A[mid + 1]:\n\t\treturn mid\n\tif key < A[mid]:\n\t\treturn search_pos(A[low:mid], key)\n\tif key > A[mid]:\n\t\treturn search_pos(A[mid:high], key)\n\ndef binary_insertion_sort(A):\n\tfor i in range(1, len(A)):\n\t\tkey = A[i]\n\t\tpos = binary_search(A, i, key)\n\t\tfor j in range(i, pos, -1):\n\t\t\tA[j] = A[j - 1]\n\t\tA[pos] = key\n\treturn A\n\t\n","sub_path":"sort/binary_insertion_sort.py","file_name":"binary_insertion_sort.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"417825781","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\ndataset = pd.read_csv('Marketing_Data.csv')\nX = dataset.iloc[:, :-1]\ny = dataset.iloc[:, :1]\nprint(dataset)\nprint(X)\nprint(y)\n\n\n# In[3]:\n\n\ndataset.isnull().sum()\n\n\n# In[4]:\n\n\ndataset.head()\n\n\n# In[5]:\n\n\ndataset.tail()\n\n\n# In[6]:\n\n\ndataset.describe()\n\n\n# In[7]:\n\n\ndataset.info()\n\n\n# In[8]:\n\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 2)\nX_train, X_test, y_train, y_test\n\n\n# In[9]:\n\n\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(X_train, y_train)\n\n\n# In[10]:\n\n\ny_pred = regressor.predict(X_test)\ny_pred\n\n\n# In[13]:\n\n\nfor i in dataset.columns:\n sns.boxplot(dataset[i])\n plt.show()\n \n\n\n# In[14]:\n\n\nfrom sklearn.metrics import r2_score, accuracy_score\nr2_score(y_test,y_pred)*100\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Mulitple_linear_Regression.py","file_name":"Mulitple_linear_Regression.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"338770205","text":"# -*- coding: utf-8-unix -*-\n\nfrom collections import defaultdict\nfrom pprint import pprint\n\nimport redmine_wrapper.lib as lib\nfrom redmine_wrapper.node import RedmineWrapperProjectNode\nfrom redmine_wrapper.node import RedmineWrapperIssueNode\n\n\nclass RedmineWrapperContainer(object):\n def __init__(self, Node, url, rs):\n self.each_result = []\n self.url = url\n\n self.node = dict([(r['id'], Node(r)) for r in rs])\n self.version = dict([n.version() for n in self.node.values()])\n\n node_v = dict()\n for v in self.version:\n node_v[v] = [id for id in self.node if self.node[id].version_id() == v]\n\n self.tree = {}\n for v in self.version:\n self.tree[v] = defaultdict(list)\n for pid in set([self.node[id].parent_id() for id in node_v[v]]):\n self.tree[v][pid] = [id for id in node_v[v]\n if self.node[id].parent_id() is pid]\n\n self.sort()\n\n\n def sort(self):\n for v in self.tree:\n for id in self.tree[v]:\n self.tree[v][id].sort(\n key=lambda x:(self.node[x].start(), self.node[x].id())\n )\n\n\n def each(self, vfunc, func, gen = 0, id = None):\n self.each_result = []\n for v in self.version:\n flag, g, result = vfunc(v, self.version[v])\n if flag == True:\n self.each_result.append(result)\n gen += g\n self._each(func, v, gen, id)\n return self.each_result\n\n\n def _each(self, func, ver, gen, id):\n flag = True\n next_gen = gen\n if id is not None:\n flag, result = func(self.node[id], gen)\n self.each_result.append(result)\n next_gen += 1\n if flag == True:\n for next_id in self.tree[ver][id]:\n self._each(func, ver, next_gen, next_id)\n\n\nclass RedmineWrapperProjectContainer(RedmineWrapperContainer):\n def __init__(self, url, rs, **kwargs):\n super().__init__(RedmineWrapperProjectNode, url, rs)\n self.filter(**kwargs)\n\n\n def filter(self, **kwargs):\n if 'project_id' in kwargs:\n value = int(kwargs['project_id'])\n for v in self.version:\n for id in list(self.tree[v].keys()):\n if id != value:\n del self.tree[v][id]\n self.tree[v][None] = [value]\n\n if 'subproject_id' in kwargs:\n value = kwargs['subproject_id']\n if value == '!*':\n for v in self.version:\n for id in self.tree[v][None]:\n if id in self.tree[v]:\n del self.tree[v][id]\n else:\n value = int(value)\n for v in self.version:\n for id in self.tree[v][None]:\n self.tree[v][id] = [value] if value in self.tree[v][id] else []\n\n\n def text(self):\n def vfunc(id, name):\n return (False, None, None)\n\n def func(node, gen):\n return (True, node.text(gen))\n\n result = self.each(vfunc, func)\n return '\\n'.join(result).format(__url__=self.url)\n\n\n def markdown(self):\n pass\n\n\n def html(self):\n pass\n\n\nclass RedmineWrapperIssueContainer(RedmineWrapperContainer):\n def __init__(self, url, rs, **kwargs):\n super().__init__(RedmineWrapperIssueNode, url, rs)\n self.filter(**kwargs)\n\n\n def filter(self, **kwargs):\n for key, value in kwargs.items():\n if key == 'period':\n s = lib.str2date(value[0])\n e = lib.str2date(value[1])\n for v in self.version:\n for pid in self.tree[v]:\n self.tree[v][pid] = [id for id in self.tree[v][pid]\n if self.node[id].within(s, e)]\n if key == 'start':\n s = lib.str2date(value)\n for v in self.version:\n for pid in self.tree[v]:\n self.tree[v][pid] = [id for id in self.tree[v][pid]\n if s <= self.node[id].start()]\n if key == 'end':\n e = lib.str2date(value)\n for v in self.version:\n for pid in self.tree[v]:\n self.tree[v][pid] = [id for id in self.tree[v][pid]\n if self.node[id].end() < e]\n\n\n def text(self):\n def vfunc(id, name):\n return (True, 0, '(({}))'.format(name))\n\n def func(node, gen):\n return (True, node.text(gen))\n\n result = self.each(vfunc, func)\n return '\\n'.join(result).format(__url__=self.url)\n\n\n def markdown(self):\n pass\n\n\n def html(self):\n pass\n\n","sub_path":"redmine_wrapper/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":4232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"509994639","text":"import os\n\nfrom conans import ConanFile, tools, AutoToolsBuildEnvironment, MSBuild\nfrom conans.tools import Version\n\n\nclass LcmsConan(ConanFile):\n name = \"lcms\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"A free, open source, CMM engine.\"\n license = \"MIT\"\n homepage = \"https://github.com/mm2/Little-CMS\"\n topics = (\"conan\", \"lcms\", \"cmm\", \"icc\", \"cmm-engine\")\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True}\n generators = \"cmake\"\n _source_subfolder = \"source_subfolder\"\n\n def build_requirements(self):\n if tools.os_info.is_windows and \"CONAN_BASH_PATH\" not in os.environ and \\\n tools.os_info.detect_windows_subsystem() != \"msys2\":\n self.build_requires(\"msys2/20190524\")\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n os.rename(\"Little-CMS-lcms%s\" % self.version, self._source_subfolder)\n\n def _build_visual_studio(self):\n # since VS2015 vsnprintf is built-in\n if Version(self.settings.compiler.version) >= \"14\":\n path = os.path.join(self._source_subfolder, \"src\", \"lcms2_internal.h\")\n tools.replace_in_file(path, \"# define vsnprintf _vsnprintf\", \"\")\n\n with tools.chdir(os.path.join(self._source_subfolder, \"Projects\", \"VC2013\")):\n target = \"lcms2_DLL\" if self.options.shared else \"lcms2_static\"\n upgrade_project = Version(self.settings.compiler.version) > \"12\"\n # run build\n msbuild = MSBuild(self)\n msbuild.build(\"lcms2.sln\", targets=[target], platforms={\"x86\": \"Win32\"}, upgrade_project=upgrade_project)\n\n def _build_configure(self):\n if self.settings.os == \"Android\" and tools.os_info.is_windows:\n # remove escape for quotation marks, to make ndk on windows happy\n tools.replace_in_file(os.path.join(self._source_subfolder, \"configure\"),\n \"s/[\t `~#$^&*(){}\\\\\\\\|;'\\\\\\''\\\"<>?]/\\\\\\\\&/g\", \"s/[\t `~#$^&*(){}\\\\\\\\|;<>?]/\\\\\\\\&/g\")\n env_build = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n with tools.chdir(self._source_subfolder):\n args = [\"prefix=%s\" % self.package_folder]\n if self.options.shared:\n args.extend([\"--disable-static\", \"--enable-shared\"])\n else:\n args.extend([\"--disable-shared\", \"--enable-static\"])\n args.append(\"--without-tiff\")\n args.append(\"--without-jpeg\")\n env_build.configure(args=args)\n env_build.make()\n env_build.make(args=[\"install\"])\n\n def build(self):\n if self.settings.compiler == \"Visual Studio\":\n self._build_visual_studio()\n else:\n self._build_configure()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n if self.settings.compiler == \"Visual Studio\":\n self.copy(pattern=\"*.h\", src=os.path.join(self._source_subfolder, \"include\"), dst=\"include\", keep_path=True)\n if self.options.shared:\n self.copy(pattern=\"*.lib\", src=os.path.join(self._source_subfolder, \"bin\"), dst=\"lib\", keep_path=False)\n self.copy(pattern=\"*.dll\", src=os.path.join(self._source_subfolder, \"bin\"), dst=\"bin\", keep_path=False)\n else:\n self.copy(pattern=\"*.lib\", src=os.path.join(self._source_subfolder, \"Lib\", \"MS\"), dst=\"lib\",\n keep_path=False)\n # remove entire share directory\n tools.rmdir(os.path.join(self.package_folder, \"share\"))\n # remove pkgconfig\n tools.rmdir(os.path.join(self.package_folder, \"lib\", \"pkgconfig\"))\n # remove la files\n la = os.path.join(self.package_folder, \"lib\", \"liblcms2.la\")\n if os.path.isfile(la):\n os.unlink(la)\n # remove binaries\n for bin_program in [\"tificc\", \"linkicc\", \"transicc\", \"psicc\", \"jpgicc\"]:\n for ext in [\"\", \".exe\"]:\n try:\n os.remove(os.path.join(self.package_folder, \"bin\", bin_program + ext))\n except:\n pass\n\n def package_info(self):\n if self.settings.compiler == \"Visual Studio\":\n self.cpp_info.libs = [\"lcms2\" if self.options.shared else \"lcms2_static\"]\n if self.options.shared:\n self.cpp_info.defines.append(\"CMS_DLL\")\n else:\n self.cpp_info.libs = [\"lcms2\"]\n","sub_path":"recipes/lcms/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":4813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"441914259","text":"#!/usr/bin/env python\n# Note that this needs:\n# sudo pip install websocket-client\n# not the library called 'websocket'\n\nimport json\nfrom uuid import uuid4\nimport websocket\nimport yaml\nfrom geometry_msgs.msg import PoseStamped\nimport rospy\nfrom std_msgs.msg import Header,String,Float32,Int8\nfrom sensor_msgs.msg import CompressedImage,Image\nfrom rospy_message_converter import message_converter\nimport cv2\nimport numpy as np\n\nclass WebsocketROSClient(object):\n def __init__(self, websocket_ip, port=9090):\n \"\"\"\n Class to manage publishing to ROS thru a rosbridge websocket.\n :param str websocket_ip: IP of the machine with the rosbridge server.\n :param int port: Port of the websocket server, defaults to 9090.\n \"\"\"\n #print(\"Connecting to websocket: {}:{}\".format(websocket_ip, port))\n \n self.ws = websocket.create_connection(\n 'ws://' + websocket_ip + ':' + str(port))\n self._advertise_dict = {}\n\n def _advertise(self, topic_name, topic_type):\n \"\"\"\n Advertise a topic with it's type in 'package/Message' format.\n :param str topic_name: ROS topic name.\n :param str topic_type: ROS topic type, e.g. std_msgs/String.\n :returns str: ID to de-advertise later on.\n \"\"\"\n new_uuid = str(uuid4())\n self._advertise_dict[new_uuid] = {'topic_name': topic_name,\n 'topic_type': topic_type}\n advertise_msg = {\"op\": \"advertise\",\n \"id\": new_uuid,\n \"topic\": topic_name,\n \"type\": topic_type\n }\n self.ws.send(json.dumps(advertise_msg))\n return new_uuid\n\n def _unadvertise(self, uuid):\n unad_msg = {\"op\": \"unadvertise\",\n \"id\": uuid,\n # \"topic\": topic_name\n }\n self.ws.send(json.dumps(unad_msg))\n \n def __del__(self):\n \"\"\"Cleanup all advertisings\"\"\"\n d = self._advertise_dict\n for k in d:\n self._unadvertise(k)\n\n def _publish(self, topic_name, message):\n \"\"\"\n Publish onto the already advertised topic the msg in the shape of\n a Python dict.\n :param str topic_name: ROS topic name.\n :param dict msg: Dictionary containing the definition of the message.\n \"\"\"\n msg = {\n 'op': 'publish',\n 'topic': topic_name,\n 'msg': message\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)\n\n def publish(self, topic_name, ros_message):\n \"\"\"\n Publish on a topic given ROS message thru rosbridge.\n :param str topic_name: ROS topic name.\n :param * ros_message: Any ROS message instance, e.g. LaserScan()\n from sensor_msgs/LaserScan.\n \"\"\"\n # First check if we already advertised the topic\n d = self._advertise_dict\n for k in d:\n if d[k]['topic_name'] == topic_name:\n # Already advertised, do nothing\n break\n else:\n # Not advertised, so we advertise\n topic_type = ros_message._type\n self._advertise(topic_name, topic_type)\n # Converting ROS message to a dictionary thru YAML\n ros_message_as_dict = yaml.load(ros_message.__str__(), Loader=yaml.FullLoader)\n # Publishing\n self._publish(topic_name, ros_message_as_dict)\n\n def subscribe(self,topic_name, ros_message):\n # First check if we already advertised the topic\n d = self._advertise_dict\n for k in d:\n if d[k]['topic_name'] == topic_name:\n # Already advertised, do nothing\n break\n else:\n # Not advertised, so we advertise\n topic_type = ros_message._type\n self._advertise(topic_name, topic_type)\n # Converting ROS message to a dictionary thru YAML\n ros_message_as_dict = yaml.load(ros_message.__str__(), Loader=yaml.FullLoader)\n # Publishing\n return self._subscribe(topic_name, ros_message_as_dict, ros_message._type)\n\n def _subscribe(self, topic_name, message, type):\n \"\"\"\n Publish onto the already advertised topic the msg in the shape of\n a Python dict.\n :param str topic_name: ROS topic name.\n :param dict msg: Dictionary containing the definition of the message.\n \"\"\"\n msg = {\n 'op': 'subscribe',\n 'topic': topic_name,\n 'type' : type\n }\n json_msg = json.dumps(msg)\n self.ws.send(json_msg)\n json_message = self.ws.recv()\n\n dictionary = json.loads(json_message)['msg']\n result = message_converter.convert_dictionary_to_ros_message(type, dictionary)\n #print(\"Type: '%s' \\n Received: '%s'\" % (type, result))\n return result\n\n#if __name__ == '__main__':\n# connect = WebsocketROSClient('127.0.0.1')\n \n","sub_path":"goodgame_fptu_dl/scripts/extensions/rosws.py","file_name":"rosws.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"143825915","text":"#\n# @section License\n#\n# The MIT License (MIT)\n# \n# Copyright (c) 2016, Erik Moqvist\n# \n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation\n# files (the \"Software\"), to deal in the Software without\n# restriction, including without limitation the rights to use, copy,\n# modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS\n# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN\n# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# This file is part of the Pumbaa project.\n#\n\n\nimport select\nimport socket\nimport harness\nfrom harness import assert_raises\n\n\ndef test_print():\n print(socket)\n\n\ndef test_tcp_client():\n client = socket.socket()\n client.connect((\"192.168.1.101\", 80))\n assert client.send(b'foo') == 3\n assert client.recv(3) == b'bar'\n client.close()\n\n\ndef test_tcp_server():\n listener = socket.socket()\n listener.bind((\"192.168.1.102\", 8080))\n listener.listen(5)\n listener.accept()\n listener.close()\n\n\ndef test_udp():\n socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n\ndef test_select():\n poll = select.poll()\n tcp = socket.socket()\n\n # Register both event channels.\n poll.register(tcp)\n\n # Timeout waiting for data on the socket.\n assert poll.poll(0.01) == []\n\n tcp.close()\n\n\ndef test_bad_arguments():\n # Bad socket family.\n with assert_raises(OSError):\n socket.socket(-1)\n\n # Bad socket type.\n with assert_raises(OSError):\n socket.socket(socket.AF_INET, -1)\n\n\ndef main():\n testcases = [\n (test_print, \"test_print\"),\n (test_tcp_client, \"test_tcp_client\"),\n (test_tcp_server, \"test_tcp_server\"),\n (test_udp, \"test_udp\"),\n (test_select, \"test_select\"),\n (test_bad_arguments, \"test_bad_arguments\")\n ]\n harness.run(testcases)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tst/socket/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"273136726","text":"import abc\nimport enum\n\nfrom . import t_tkinter\nfrom misc import Path\nfrom misc.Windows import w as Windows\n\n@enum.unique\nclass ColorSchemeEnum(enum.Enum):\n background = 'gray10'\n transparent = 'white'\n p1_text = '#93A1A1'\n p2_text = '#586E75'\n system_text = 'lawn green'\n advantage_plus = 'DodgerBlue2'\n advantage_slight_minus = 'ivory2'\n advantage_safe_minus = 'ivory3'\n advantage_punishible = 'orchid2'\n advantage_very_punishible = 'deep pink'\n advantage_text = 'black'\n\nclass Overlay:\n padding = 15\n\n @abc.abstractmethod\n def update_state(self):\n pass\n\n @abc.abstractmethod\n def get_geometry(self):\n pass\n\n def __init__(self):\n self.visible = False\n\n window_name = self.get_name()\n print(\"Launching {}\".format(window_name))\n\n self.toplevel = t_tkinter.Toplevel()\n\n self.toplevel.wm_title(window_name)\n self.toplevel.iconbitmap(Path.path('./img/tekken_bot_close.ico'))\n self.toplevel.overrideredirect(True)\n\n self.background_color = ColorSchemeEnum.background.value\n self.tranparency_color = self.background_color\n self.toplevel.configure(background=self.tranparency_color)\n\n self.toplevel.attributes(\"-topmost\", True)\n\n def get_name(self):\n return self.__class__.__name__\n\n def update_location(self, game_reader):\n if Windows.valid:\n tekken_rect = game_reader.get_window_rect()\n else:\n tekken_rect = FullscreenTekkenRect(self.toplevel)\n if tekken_rect is not None:\n x, y = self.get_geometry(tekken_rect)\n geometry = '+%d+%d' % (x, y)\n self.toplevel.geometry(geometry)\n if not self.visible:\n self.show()\n else:\n self.hide()\n\n def show(self):\n self.toplevel.deiconify()\n self.visible = True\n\n def hide(self):\n self.toplevel.withdraw()\n self.visible = False\n\nclass FullscreenTekkenRect:\n def __init__(self, toplevel):\n self.left = 0\n self.right = toplevel.winfo_screenwidth()\n self.top = 0\n self.bottom = toplevel.winfo_screenheight()\n","sub_path":"src/gui/Overlay.py","file_name":"Overlay.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"186047610","text":"import uuid\n\nfrom app.main import db\nfrom app.main.model.favorites import Favorites\nfrom app.main.model.application import Application\n\ndef favorites_list(user_email):\n return Favorites.query.filter_by(user_email=user_email).all()\n\ndef favorites_save(data):\n application = Application.query.filter_by(public_id=data['application_public_id']).first()\n favorites = Favorites.query.filter_by(user_email=data['user_email']).filter_by(application_public_id=data['application_public_id']).first()\n if not favorites:\n new_favorites = Favorites(\n public_id=str(uuid.uuid4()),\n user_email=data['user_email'],\n application_public_id=data['application_public_id'],\n application_name=application.name,\n application_category=application.category,\n application_rating_average=application.rating_average,\n application_image_logo=application.image_logo,\n application_price=application.price\n )\n save_changes(new_favorites)\n response_object = {\n 'status': 'success',\n 'message': 'Successfully saved.',\n }\n return response_object, 201\n else:\n response_object = {\n 'status': 'fail',\n 'message': 'favorites already exists.',\n }\n return response_object, 409\n\ndef favorites_remove(public_id):\n favorites = Favorites.query.filter_by(public_id=public_id).first()\n remove_changes(favorites)\n response_object = {\n 'status': 'success',\n 'message': 'Successfully removed.',\n }\n return response_object, 200\n\ndef save_changes(data):\n db.session.add(data)\n db.session.commit()\n\ndef remove_changes(data):\n db.session.delete(data)\n db.session.commit()","sub_path":"apppedia/app/main/service/favorites_service.py","file_name":"favorites_service.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"400429064","text":"from project import db\nfrom project.api.models import List, Item\n\n\ndef add_list(title):\n new_list = List(title=title)\n db.session.add(new_list)\n db.session.commit()\n return new_list\n\n\ndef add_item(item, list_id):\n new_item = Item(item=item, list_id=list_id)\n db.session.add(new_item)\n db.session.commit()\n return new_item\n","sub_path":"services/todolist/project/tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"368482892","text":"test_dict = {'gfg' : [5, 6, 7, 8],\n 'is' : [10, 11, 7, 5],\n 'best' : [6, 12, 10, 8],\n 'for' : [1, 2, 5]}\n#count=1\nk=[]\ns=[]\nfor i in test_dict.values():\n print(i)\n s.extend(i)\nprint(s)\nfor m in s:\n if m not in k:\n k.append(m)\n #count+=1\n\nprint(k)\nk.sort()\nprint(k)\nprint(max(k))\nprint(min(k))\n","sub_path":"Programs/Dictionary/Extract Unique values dictionary values.py","file_name":"Extract Unique values dictionary values.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"521443441","text":"from __future__ import unicode_literals\n\nfrom pygments.token import Token\nfrom ..enums import IncrementalSearchDirection\nfrom .utils import token_list_len\nfrom .processors import Processor\n\n__all__ = (\n 'DefaultPrompt',\n)\n\n\nclass DefaultPrompt(Processor):\n \"\"\"\n Default prompt. This one shows the 'arg' and reverse search like\n Bash/readline normally do.\n \"\"\"\n def __init__(self, prompt='> '):\n self.prompt = prompt\n\n def run(self, cli, buffer, tokens):\n # Get text before cursor.\n if buffer.isearch_state:\n before = _get_isearch_tokens(buffer.isearch_state)\n\n elif cli.input_processor.arg is not None:\n before = _get_arg_tokens(cli)\n\n else:\n before = [(Token.Prompt, self.prompt)]\n\n # Insert before buffer text.\n shift_position = token_list_len(before)\n\n return before + tokens, lambda i: i + shift_position\n\n def invalidation_hash(self, cli, buffer):\n return (\n cli.input_processor.arg,\n buffer.isearch_state,\n buffer.isearch_state and buffer.isearch_state.isearch_text,\n )\n\n\ndef _get_isearch_tokens(isearch_state):\n def before():\n if isearch_state.isearch_direction == IncrementalSearchDirection.BACKWARD:\n text = 'reverse-i-search'\n else:\n text = 'i-search'\n\n return [(Token.Prompt.Search, '(%s)`' % text)]\n\n def text():\n index = isearch_state.no_match_from_index\n text = isearch_state.isearch_text\n\n if index is None:\n return [(Token.Prompt.Search.Text, text)]\n else:\n return [\n (Token.Prompt.Search.Text, text[:index]),\n (Token.Prompt.Search.Text.NoMatch, text[index:])\n ]\n\n def after():\n return [(Token.Prompt.Search, '`: ')]\n\n return before() + text() + after()\n\n\ndef _get_arg_tokens(cli):\n \"\"\"\n Tokens for the arg-prompt.\n \"\"\"\n arg = cli.input_processor.arg\n\n return [\n (Token.Prompt.Arg, '(arg: '),\n (Token.Prompt.Arg.Text, str(arg)),\n (Token.Prompt.Arg, ') '),\n ]\n","sub_path":"prompt_toolkit/layout/prompt.py","file_name":"prompt.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"327139168","text":"#!/usr/bin/env python\n#-*- encoding:utf8 -*-\n# File Name:t.py\n# Author:ul1n(linlin152@foxmail.com)\n# Time:2018年10月31日 星期三 15时06分25秒\n\nimport asyncio\nimport os\nimport sys\n\nfrom mworker.manager import WorkerManager, WorkerType\nfrom mworker.utils import get_logger\n\n\ndev = True\n\nlog = get_logger('examples.mymanager.MyManager')\n\ncwd = os.getcwd()\n\nclass MyManager(WorkerManager):\n\n def __init__(self):\n self._check_timeout = 10\n if not dev:\n config = dict(\n tag='myworker',\n rds_config={\n 'host':'r3d1s',\n 'db': 7,\n 'port':6379,\n },\n image='myworker',\n command='python3 -m examples.myworker',\n network='redis_default',\n hostwd=cwd\n )\n super(MyManager, self).__init__(WorkerType.CONTAINER, **config)\n else:\n config = dict(\n tag='myworker',\n rds_config={\n 'host': 'r3d1s',\n 'db': 7,\n 'port': 6379,\n },\n command='gnome-terminal -- python3 -m examples.myworker',\n hostwd=cwd\n )\n super(MyManager, self).__init__(WorkerType.PROCESS, **config)\n self._count = 0\n\n async def _setup(self):\n await super(MyManager, self)._setup()\n self.set_setup_done()\n\n def gen_worker_id(self):\n self._count += 1\n return 'zone'+ str(self._count)\n\n async def check(self):\n wcount = await self.cache.get_all_worker_id()\n log.info(wcount)\n self._count = len(wcount)\n if self._count == 0:\n wid = self._gen_worker_id()\n args = dict(name='worker1', count=3)\n await self.cache.set_worker_args(wid, args, jsn=True)\n self.start_worker(wid)\n wid = self._gen_worker_id()\n args = dict(name='worker2', count=3)\n await self.cache.set_worker_args(wid, args, jsn=True)\n self.start_worker(wid)\n\n await asyncio.sleep(0.01)\n if self.should_log():\n log.info('checking')\n\n\ndef main():\n m = MyManager()\n try:\n m.start()\n except KeyboardInterrupt as e:\n log.info('exit by user')\n asyncio.ensure_future(m.stop())\n log.info('exited')\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"examples/mymanager.py","file_name":"mymanager.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"598101500","text":"import os\nimport sys\nfrom requests.compat import urlparse, is_windows, bytes, str\n\n\nclass Environment(object):\n \"\"\"Holds information about the execution context.\n\n Groups various aspects of the environment in a changeable object\n and allows for mocking.\n\n \"\"\"\n\n #noinspection PyUnresolvedReferences\n is_windows = is_windows\n\n progname = os.path.basename(sys.argv[0])\n if progname not in ['http', 'https']:\n progname = 'http'\n\n stdin_isatty = sys.stdin.isatty()\n stdin = sys.stdin\n stdout_isatty = sys.stdout.isatty()\n stdout = sys.stdout\n stderr = sys.stderr\n\n # Can be set to 0 to disable colors completely.\n colors = 256 if '256color' in os.environ.get('TERM', '') else 88\n\n def __init__(self, **kwargs):\n self.__dict__.update(**kwargs)\n\n def init_colors(self):\n # We check for real Window here, not self.is_windows as\n # it could be mocked.\n if (is_windows and not self.__colors_initialized\n and self.stdout == sys.stdout):\n import colorama.initialise\n self.stdout = colorama.initialise.wrap_stream(\n self.stdout, autoreset=False,\n convert=None, strip=None, wrap=True)\n self.__colors_initialized = True\n __colors_initialized = False\n\n\nclass HTTPMessage(object):\n \"\"\"Model representing an HTTP message.\"\"\"\n\n def __init__(self, orig):\n self._orig = orig\n\n @property\n def content_type(self):\n return str(self._orig.headers.get('Content-Type', ''))\n\n\nclass HTTPResponse(HTTPMessage):\n \"\"\"A `requests.models.Response` wrapper.\"\"\"\n\n def __iter__(self):\n mb = 1024 * 1024\n return self._orig.iter_content(chunk_size=2 * mb)\n\n @property\n def line(self):\n \"\"\"Return Status-Line\"\"\"\n original = self._orig.raw._original_response\n return str('HTTP/{version} {status} {reason}'.format(\n version='.'.join(str(original.version)),\n status=original.status,\n reason=original.reason\n ))\n\n @property\n def headers(self):\n return str(self._orig.raw._original_response.msg)\n\n @property\n def encoding(self):\n return self._orig.encoding or 'utf8'\n\n @property\n def body(self):\n # Only now the response body is fetched.\n # Shouldn't be touched unless the body is actually needed.\n return self._orig.content\n\n\nclass HTTPRequest(HTTPMessage):\n \"\"\"A `requests.models.Request` wrapper.\"\"\"\n\n def __iter__(self):\n yield self.body\n\n @property\n def line(self):\n \"\"\"Return Request-Line\"\"\"\n url = urlparse(self._orig.url)\n\n # Querystring\n qs = ''\n if url.query or self._orig.params:\n qs = '?'\n if url.query:\n qs += url.query\n # Requests doesn't make params part of ``request.url``.\n if self._orig.params:\n if url.query:\n qs += '&'\n #noinspection PyUnresolvedReferences\n qs += type(self._orig)._encode_params(self._orig.params)\n\n # Request-Line\n return str('{method} {path}{query} HTTP/1.1'.format(\n method=self._orig.method,\n path=url.path or '/',\n query=qs\n ))\n\n @property\n def headers(self):\n headers = dict(self._orig.headers)\n content_type = headers.get('Content-Type')\n\n if isinstance(content_type, bytes):\n # Happens when uploading files.\n # TODO: submit a bug report for Requests\n headers['Content-Type'] = str(content_type)\n\n if 'Host' not in headers:\n headers['Host'] = urlparse(self._orig.url).netloc\n\n return '\\n'.join('%s: %s' % (name, value)\n for name, value in headers.items())\n\n @property\n def encoding(self):\n return 'utf8'\n\n @property\n def body(self):\n \"\"\"Reconstruct and return the original request body bytes.\"\"\"\n if self._orig.files:\n # TODO: would be nice if we didn't need to encode the files again\n # FIXME: Also the boundary header doesn't match the one used.\n for fn, fd in self._orig.files.values():\n # Rewind the files as they have already been read before.\n fd.seek(0)\n body, _ = self._orig._encode_files(self._orig.files)\n else:\n try:\n body = self._orig.data\n except AttributeError:\n # requests < 0.12.1\n body = self._orig._enc_data\n\n if isinstance(body, dict):\n #noinspection PyUnresolvedReferences\n body = type(self._orig)._encode_params(body)\n\n if isinstance(body, str):\n body = body.encode('utf8')\n\n return body\n","sub_path":"httpie/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"575686879","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/rimuapi.py\n# Compiled at: 2015-03-24 11:50:03\nimport urllib, os\nfrom requests import Request, Session\ntry:\n import json\nexcept:\n import simplejson as json\n\ndef sort_uniq(sequence):\n import itertools, operator\n return itertools.imap(operator.itemgetter(0), itertools.groupby(sorted(sequence)))\n\n\ndef valid_domain_name(domain_name):\n import re\n if len(domain_name) > 255:\n return False\n domain_name.rstrip('.')\n allowed = re.compile('(?!-)[A-Z\\\\d-]{1,63}(? 0]) / X[X > 0]).tolist())\n\n\t\tX = numpy.reshape(X, (-1, 1))\n\t\tY = numpy.reshape(Y, (-1, 1))\n\n\t\tself.train_x = X\n\t\tself.train_y = Y\n\n\t\tX = numpy.array(numpy.arange(-30, 0, 0.01).tolist() + [0.0] + numpy.arange(0.01, 30.01, 0.01).tolist())\n\t\tY = numpy.array((numpy.sin(X[X < 0]) / X[X < 0]).tolist() + [1.0] + (numpy.sin(X[X > 0]) / X[X > 0]).tolist())\n\n\t\tX = numpy.reshape(X, (-1, 1))\n\t\tY = numpy.reshape(Y, (-1, 1))\n\t\t\n\t\tself.test_x = X\n\t\tself.test_y = Y","sub_path":"Experiments/FFNN/MLELM/Datasets/Sinc.py","file_name":"Sinc.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"187268714","text":"from django.conf.urls import url\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom . import views\n\nouter_list = views.OuterViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nouter_detail = views.OuterViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\ninner_list = views.InnerViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\ninner_detail = views.InnerViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nplaylist_list = views.PlaylistViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nplaylist_detail = views.PlaylistViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nurlpatterns = format_suffix_patterns([\n url(r'outer/$', outer_list, name='outer_list'),\n url(r'outer/(?P[0-9]+)/$', outer_detail, name='outer_detail'),\n url(r'inner/$', inner_list, name='inner_list'),\n url(r'inner/(?P[0-9]+)/$', inner_detail, name='inner_detail'),\n url(r'playlist/$', playlist_list, name='playlist_list'),\n url(r'playlist/(?P[0-9]+)/$', playlist_detail, name='playlist_detail'),\n url(r'$', views.api_root),\n])\n","sub_path":"django_base/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"353299295","text":"#!/usr/bin/python\n\nimport math\nimport matplotlib.pyplot as plt\n\n\ndef g():\n return 9.80665\n\n\ndef euler(theta, tf, dt, vf = 0):\n sin_t = math.sin(theta)\n t = 0.0\n y = 0.0\n v = 0.0\n y_vec = []\n v_vec = []\n t_vec = []\n while(t <= tf):\n y += v*dt\n v += g()*sin_t*dt\n if vf != 0:\n v *= (1-(v/vf))\n y_vec.append(y)\n v_vec.append(v)\n t_vec.append(t)\n t += dt\n return y_vec, v_vec, t_vec\n\n\ndef euler_cromer(theta, tf, dt, vf = 0):\n sin_t = math.sin(theta)\n t = 0.0\n y = 0.0\n v = 0.0\n y_vec = []\n v_vec = []\n t_vec = []\n while(t <= tf):\n v += g()*sin_t*dt\n if vf != 0:\n v *= (1-(v/vf))\n y += v*dt\n y_vec.append(y)\n v_vec.append(v)\n t_vec.append(t)\n t += dt\n return y_vec, v_vec, t_vec\n\n\n#------------------------------------------------------------------------------\n\n# Dados iniciais\ntheta = 0.122173 # 7 graus\ntf = 6.0\ndt = 0.1\n\n# Mude esta linha para obter diferentes constantes de atrito\nvf = 40 # velocidade terminal (0: sem resistência)\n\npos, vel, t = euler(theta, tf, dt, vf)\npos_c, vel_c, t_c = euler_cromer(theta, tf, dt, vf)\n\n# Euler\nplt.plot(t, pos, label = \"Posição (m)\")\nplt.plot(t, vel, label = \"Velocidade (m/s)\")\n\n# Euler-Cromer\nplt.plot(t_c, pos_c, label = \"Posição - Cromer (m)\")\nplt.plot(t_c, vel_c, label = \"Velocidade - Cromer (m/s)\")\n\n# Dados coletados\ndata = [0.0, 2.6, 4.08, 5.18, 5.88]\ny = [0.0, 2.5, 5, 7.5, 10.0]\nplt.plot(data, y, 's', label = \"Posição coletada (m)\")\n\nplt.legend(loc=2)\nplt.xlabel('Tempo (s)')\nplt.show()\n","sub_path":"Old/6º Semestre/Modelagem & Simulação/EP2/rampa.py","file_name":"rampa.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"480377909","text":"# Подсчитать, сколько было выделено памяти под переменные в ранее разработанных программах\n# в рамках первых трех уроков. Проанализировать результат и определить программы с наиболее\n# эффективным использованием памяти.\n\nimport sys\n\n# Урок 3, задача 8\n# Матрица 5x4 заполняется вводом с клавиатуры кроме последних элементов строк.\n# Программа должна вычислять сумму введенных элементов каждой строки и записывать ее\n# в последнюю ячейку строки. В конце следует вывести полученную матрицу.\n\na = []\n\nfor i in range(5):\n b = []\n for j in range(3):\n b.append(int(input(f'Введите элемент {i + 1}-{j + 1}: ')))\n b.append(b[0] + b[1] + b[2])\n a.append(b)\n\nfor i in range(5):\n print()\n for j in range(4):\n print(a[i][j], end=' ')\n\n\n# подсчет памяти\nsum_var = sys.getsizeof(a) + sys.getsizeof(b) + sys.getsizeof(i) + sys.getsizeof(j)\nprint(sum_var)\n\n# Python 3.9.5\n# Windows, 64-разрядная ОС\n\n# Запуск позволил определить, что объем памяти - 264 байта\n# Под списки отведено 120 и 88 байт, под остальные переменные (int) - 28 байт\n# Объем под переменные зависит от счетчика ссылок на объект, ссылки на тип объекта, версии Python\n\n# Общие выводы в первой задаче\n","sub_path":"dz6_task2.py","file_name":"dz6_task2.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"55617757","text":"import json\nfrom botocore.vendored import requests\nfrom python.shared import BeautifulSoup\n\ndef news(event, context):\n\n result = requests.get('https://www.inside.com.tw/?page=1')\n\n html = result.text\n\n soup = BeautifulSoup(html, 'html.parser')\n\n # delete special div in index\n if soup.find('div',class_='Independent_study'):\n soup.find('div',class_='Independent_study').decompose()\n\n if soup.find('div',class_='Independent_study_down'):\n soup.find('div',class_='Independent_study_down').decompose()\n\n posts = soup.find_all('div',class_='post_list_item')\n resp=[]\n\n for item in posts:\n # print(item)\n obj = item.find('a', class_='js-auto_break_title')\n if not isinstance(obj,type(None)):\n resp.append({\n 'title':item.find('a', class_='js-auto_break_title').text,\n 'url': item.find('a', class_='js-auto_break_title')['href'],\n 'description':item.find('p', class_='post_description').text\n })\n\n response = {\n \"statusCode\": 200,\n \"body\": json.dumps(resp)\n }\n\n return response\n","sub_path":"api/crawler/inside.py","file_name":"inside.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"566341755","text":"# -*- coding: UTF-8 -*-\n# ver: Python-2.7.10\n\nimport urllib\nimport os\nimport sys\nimport datetime\nimport json\n\n# progressbar-2.3\nfrom progressbar import ProgressBar\n\nimport func\nfunc.init()\n\n# path:\ndownPath = func.downPath + 'weibo\\\\'\n\n# name:\ncookieFileName = 'weibo.txt'\n\n# URL:\nhostURL = 'http://photo.weibo.com/'\nalbumGetURL = '/albums/get_all?'\nphotoGetURL = hostURL + 'photos/get_all?'\nlargePicURL = 'http://ww4.sinaimg.cn/large/'\n\nif not os.path.isdir(downPath):\n os.mkdir(downPath)\n \n# get cookie & make headers\nif not os.path.isfile(func.cookiePath + cookieFileName):\n print('cookie file not found...(%s)' % (func.cookiePath + cookieFileName))\n sys.exit()\nfp = open(func.cookiePath + cookieFileName)\ncookie = fp.readline()\nfp.close()\nheaders = func.headers_for_urllib2\nheaders['Cookie'] = cookie\n \n# get WeiboUID\nWeiboUID = input('input WeiboUID:')\n\n# get Albums from index\nindex = json.loads(func.getJSHtml(hostURL + str(WeiboUID) + albumGetURL, headers = headers))\n\n# perpare to download\nif index['data']['total'] == 0:\n print('WeiboUID error or no this UID')\n sys.exit()\nif not os.path.isdir(downPath + str(WeiboUID)):\n os.mkdir(downPath + str(WeiboUID))\ndownPath = downPath + str(WeiboUID) + '\\\\'\n\n# get photoName in each Albums and download\nprint('downloading...')\nfor album in index['data']['album_list']:\n newPhotoCount = 0\n if not os.path.isdir(downPath + album['caption']):\n os.mkdir(downPath + album['caption'])\n print('albumName:%s\\ncount:%d' % (album['caption'], album['count']['photos']))\n \n photoCount = album['count']['photos']\n progress = ProgressBar(maxval = album['count']['photos'])\n progress.start()\n count = 0\n page = 1\n while photoCount > 0:\n photos = json.loads(func.getJSHtml(photoGetURL +\n 'uid=' + str(WeiboUID) +\n '&album_id=' + str(album['album_id']) +\n '&count=100' +\n '&type=' + str(album['type']) +\n '&page=' + str(page),\n headers = headers))\n for photo in photos['data']['photo_list']:\n if not os.path.isfile(downPath + album['caption'] + '\\\\%s.jpg' % (photo['photo_id'])):\n imgurl = largePicURL + photo['pic_name']\n urllib.urlretrieve(imgurl, downPath + album['caption'] + '\\\\%s.jpg' % (photo['photo_id']))\n newPhotoCount += 1\n count += 1\n progress.update(count)\n photoCount -= 100\n page += 1\n progress.finish()\n print('new pic count:%d\\n' % (newPhotoCount))\n\nfp = open(downPath + 'log.txt', 'a+')\nhomePage = func.getJSHtml(hostURL + str(WeiboUID), headers = headers)\nname = func.dealHtml(nameRE, homePage)[0]\nfp.write('time:%s\\nname:%s\\n\\n' % (datetime.datetime.now(), name))\nfp.close()\n","sub_path":"Python/ImageDownloader/Weibo_Albums.py","file_name":"Weibo_Albums.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"576581351","text":"#!/bin/python3\n\nimport sys\n\n\ndef staircase(n):\n # Complete this function\n n1 = 0\n n2 = 0\n for i in range(n):\n arr = []\n for j in range(n - i - 1):\n print(\" \",end=\"\")\n for k in range(i + 1):\n print(\"#\", end=\"\")\n print('\\t')\n\n\n\nif __name__ == \"__main__\":\n n = int(input().strip())\n staircase(n)\n","sub_path":"Leetcode_test/2_printTrankle.py","file_name":"2_printTrankle.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"512977502","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\n__author__ = \"Andrew Bian\"\n\nimport xml.etree.ElementTree as ET\nimport logging\nfrom platform import system\n\nif system() == 'Windows':\n path_prefix = __file__[:__file__.rfind('\\\\') + 1] + '../'\nelse:\n path_prefix = __file__[:__file__.rfind('/') + 1] + '../'\n\n\nDATAPATH = {\n 'sbgv_daily_status_path': path_prefix + 'data/sbgv/daily_status/',\n 'sbgv_daily_inflow_path': path_prefix + 'data/sbgv/daily_inflow/',\n 'sbgv_daily_outflow_path': path_prefix + 'data/sbgv/daily_outflow/',\n 'sbgv_yearly': path_prefix + 'data/sbgv/yearly/',\n 'sbgv_all': path_prefix + 'data/sbgv/',\n 'sbg_daily_status_path': path_prefix + 'data/sbg/daily_status/',\n 'sbg_daily_inflow_path': path_prefix + 'data/sbg/daily_inflow/',\n 'sbg_daily_outflow_path': path_prefix + 'data/sbg/daily_outflow/',\n 'sbg_yearly': path_prefix + 'data/sbg/yearly/',\n 'sbg_all': path_prefix + 'data/sbg/',\n 'issue': path_prefix + 'data/issue_data/',\n}\n\n# ------------------------------------------------------------------------\n# storage api functions\n# ------------------------------------------------------------------------\n\n\n\ndef get_day_trlist(in_date, category, tag):\n filepath = build_path(in_date, category, tag)\n return get_trlist(filepath)\n\n\ndef get_day_trnum(in_date, category, tag):\n root = get_rootele(in_date, category, tag)\n elements = root.find('elements')\n row_num = len(elements.findall('row'))\n return row_num\n\n\ndef get_day_trlist_raw(in_date, category):\n filepath = build_path(in_date, category)\n return ET.parse(filepath)\n\n\ndef write_day_trlist_raw(in_date, category, data):\n filepath = build_path(in_date, category)\n data.write(filepath)\n\n\n# x stands for get data from yearly data file\ndef get_year_trlist(year, tag):\n filepath = build_path_year(year, tag)\n return get_trlist(filepath)\n\n\ndef get_all_trlist(tag):\n filepath = build_path_all(tag)\n return get_trlist(filepath)\n\ndef get_issue_list():\n filepath = build_path_issue()\n return get_trlist(filepath)\n\n# ------------------------------------------------------------------------\n# helper functions\n# ------------------------------------------------------------------------\n\n\ndef get_trlist(filepath):\n tree = ET.parse(filepath)\n root = tree.getroot()\n treedict = xml_to_dict(root[1])\n rawtrs = treedict.values()[0]\n rawtrs = [rawtr['row'] for rawtr in rawtrs]\n trlist = [build_trobject(rawtr) for rawtr in rawtrs]\n return trlist\n\n\ndef build_path(in_date, category, tag):\n return DATAPATH[tag + '_' + category + '_path'] + in_date.isoformat() + \".xml\"\n\n\ndef build_path_all(tag):\n return DATAPATH[tag + '_' + 'all'] + 'all.xml'\n\ndef build_path_issue():\n return DATAPATH['issue'] + 'issue.xml'\n\ndef build_path_year(year, tag):\n return DATAPATH[tag + '_' + 'yearly'] + str(year) + '.xml'\n\n\ndef get_rootele(in_date, category, tag):\n filepath = DATAPATH[tag + '_' + category + '_path'] + in_date.isoformat() + \".xml\"\n tree = ET.parse(filepath)\n root = tree.getroot()\n return root\n\n\ndef xml_to_dict(el):\n d = {}\n if el.text:\n d[el.tag] = el.text\n else:\n d[el.tag] = {}\n children = el.getchildren()\n if children:\n d[el.tag] = [xml_to_dict(child) for child in children]\n return d\n\n\ndef build_trobject(rawtr):\n tr = {}\n for column in rawtr:\n tr[column['column'][0]['name']] = column['column'][1]['value']\n return tr\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"tr_statistics/module/storage_xml.py","file_name":"storage_xml.py","file_ext":"py","file_size_in_byte":3601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"127286147","text":"\nfrom django.shortcuts import render, get_object_or_404, redirect, Http404\nfrom django.contrib.admin.views.decorators import staff_member_required\n\nfrom .forms import ArticleCreateForm, ArticleEditForm\nfrom .models import Article\nfrom .filters import slugify\n\n\n# Create your views here.\n\n\ndef home_view(request):\n head_title = \"Blog\"\n main_head = \"Blog\"\n article = Article.objects.order_by('num_views').last()\n context = {\n \"main_head\": main_head,\n \"head_title\": head_title,\n 'article': article,\n }\n return render(request, 'home.html', context)\n\n#CRUD (with list as part of retieve)\n#Create\n@staff_member_required\ndef article_create_view(request):\n form = ArticleCreateForm(request.POST or None, request.FILES or None)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.slug = slugify(form.cleaned_data['title'])\n obj.author = request.user\n obj.save()\n form = ArticleCreateForm()\n\n head_title = \"Create Article\"\n main_head = \"Create Article\"\n\n context = {\n 'form': form,\n \"head_title\": head_title,\n \"main_head\": main_head,\n }\n return render(request, 'article_create.html', context)\n\n\n#Retrieve\ndef article_detail_view(request, slug):\n article = get_object_or_404(Article, slug=slug)\n article.num_views += 1\n article.save()\n head_title = article.title\n main_head = \"Selected Article:\"\n context = {\n \"article\": article,\n \"head_title\": head_title,\n \"main_head\": main_head,\n }\n return render(request, 'article_detail.html', context)\n\n\n#List\ndef articles_view(request):\n queryset = Article.objects.all() if request.user.is_staff else Article.objects.published()\n head_title = \"Articles\"\n main_head = \"Articles\"\n context = {\n \"object_list\": queryset,\n \"head_title\": head_title,\n \"main_head\": main_head,\n }\n return render(request, 'articles.html', context)\n\n\n#Update\n@staff_member_required\ndef article_edit_view(request, slug):\n article = get_object_or_404(Article, slug=slug)\n if not request.user.is_superuser and request.user != article.author:\n raise Http404\n form = ArticleEditForm(request.POST or None, instance=article)\n if form.is_valid():\n obj = form.save(commit=False)\n obj.slug = slugify(form.cleaned_data['title'])\n obj.save()\n return redirect(f\"/articles/{obj.slug}\")\n head_title = \"Edit Article\"\n main_head = \"Edit Article\"\n context = {\n \"article\": article,\n \"head_title\": head_title,\n \"main_head\": main_head,\n \"form\": form,\n }\n return render(request, 'article_edit.html', context)\n\n\n#Delete\n@staff_member_required\ndef article_delete_view(request, slug):\n article = get_object_or_404(Article, slug=slug)\n if not request.user.is_superuser and request.user != article.author:\n raise Http404\n if request.method == \"POST\":\n article.delete()\n return redirect(\"/articles\")\n head_title = \"Delete Article\"\n main_head = \"Delete Article\"\n context = {\n \"article\": article,\n \"head_title\": head_title,\n \"main_head\": main_head,\n }\n return render(request, 'article_delete.html', context)\n","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"163558451","text":"# Google # Amazon\n# https://leetcode.com/problems/minimum-difference-between-largest-and-smallest-value-in-three-moves/\n\n# O(n logn) time, O(1) space\nclass Solution:\n def minDifference(self, nums: List[int]) -> int:\n nums.sort()\n if len(nums) <= 4:\n return 0\n \n diff1 = nums[-1] - nums[3]\n diff2 = nums[-2] - nums[2]\n diff3 = nums[-3] - nums[1]\n diff4 = nums[-4] - nums[0]\n return min(diff1, diff2, diff3, diff4)\n\n# Optimization by creating priority queue for whole array\n# O(log n) time (from O(k log n) where k = 3), O(n) space \nimport heapq\nclass Solution:\n def minDifference(self, nums: List[int]) -> int:\n if len(nums) <= 4:\n return 0\n \n minus_nums = [-num for num in nums]\n heapq.heapify(nums)\n heapq.heapify(minus_nums)\n \n min1 = heapq.heappop(nums)\n min2 = heapq.heappop(nums)\n min3 = heapq.heappop(nums)\n min4 = heapq.heappop(nums)\n \n max1 = - heapq.heappop(minus_nums)\n max2 = - heapq.heappop(minus_nums)\n max3 = - heapq.heappop(minus_nums)\n max4 = - heapq.heappop(minus_nums)\n \n return min(max1-min4, max2-min3, max3-min2, max4-min1)\n\n# Alternative optimization by keeping a priority queue of size k (3)\n# Note, this approach is used by heapq.nlargest and heapq.nsmallest\n# O(n) time (from O(n log k)), O(n) space\nimport heapq\nclass Solution:\n def minDifference(self, nums: List[int]) -> int:\n if len(nums) <= 4:\n return 0\n maxs = heapq.nlargest(4, nums)\n mins = heapq.nsmallest(4, nums)\n return min(maxs[0]-mins[3], maxs[1]-mins[2], maxs[2]-mins[1], maxs[3]-mins[0])","sub_path":"leetcode/1509_MinimumDifferenceBetweenLargestAndSmallestValueInThreeMoves.py","file_name":"1509_MinimumDifferenceBetweenLargestAndSmallestValueInThreeMoves.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"35089804","text":"import gdb\n\nclass LogHeaderDbg (gdb.Command):\n \"\"\" \"\"\"\n loghdr_meta = []\n loghdr = []\n\n def __init__ (self):\n super (LogHeaderDbg, self).__init__ (\"lhdbg\",\n gdb.COMMAND_SUPPORT,\n gdb.COMPLETE_NONE)\n def invoke (self, args, from_tty):\n args = args.split()\n if len(args) < 1:\n print (\"usage: lhdbg [save/show] logheader_meta\")\n return\n\n if args[0] == \"save\":\n loghdr_meta = gdb.parse_and_eval(args[1])\n self.loghdr_meta.append(loghdr_meta)\n\n '''\n # example to access fields in struct\n print loghdr_meta.type\n for name, field in loghdr_meta.type.iteritems():\n print name, field\n '''\n\n loghdr = loghdr_meta['loghdr']\n self.loghdr.append(loghdr.dereference())\n\n #print loghdr_meta.dereference(loghdr_meta.loghdr)\n elif args[0] == \"show\":\n for i, l in enumerate(self.loghdr_meta):\n print (\"index - \", i)\n print (\"loghdr_meta: \", l)\n print (\"loghdr: \", self.loghdr[i])\n else:\n print (\"Unsupported command\")\n return\n\nLogHeaderDbg()\n","sub_path":"gdb_python_modules/logheader_dbg.py","file_name":"logheader_dbg.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"213054374","text":"import numpy as np\r\nimport math\r\nfrom copy import deepcopy\r\n\r\n\r\nclass KMeans(object):\r\n\r\n def __init__(self, k=3, tolerancia=0.001, max_iterations=500):\r\n self.k = k\r\n self.tolerancia = tolerancia\r\n self.max_iterations = max_iterations \r\n\r\n def fit(self, data, n_clusters=3):\r\n self.initialize_centroids(data)\r\n for _ in range(self.max_iterations):\r\n self.otimizado = True\r\n self.initialize_classes() \r\n\r\n # Calcula a distancia entre os pontos e os clusters. Escolhe o centroid mais proximo\r\n for sample in data:\r\n distances = [self.Distancia_Euclidiana(sample, centroid) for centroid in self.centroids]\r\n classification = distances.index(min(distances))\r\n self.classes[classification].append(sample)\r\n \r\n self.recalcular_centroids()\r\n\r\n # Termina se estiver otimizado: se os centroids alteram pouco a posicao(menos que a tolerancia definida)\r\n if self.otimizado:\r\n break \r\n\r\n \r\n def initialize_centroids(self, data):\r\n self.centroids = []\r\n # Os primeiros 'k' elementos do dataset serão os centroids iniciais\r\n for i in range(self.k):\r\n self.centroids.append(data[i])\r\n \r\n def initialize_classes(self):\r\n self.classes = {}\r\n for i in range(self.k):\r\n self.classes[i] = []\r\n\r\n def recalcular_centroids(self):\r\n anterior = deepcopy(self.centroids)\r\n # Recalcula os centroids com base na média dos pontos do cluster\r\n for classification in self.classes:\r\n self.centroids[classification] = np.average(self.classes[classification], axis = 0)\r\n\r\n for i in range(len(self.centroids)):\r\n centroid_original = anterior[i]\r\n centroid_atual = self.centroids[i]\r\n \r\n if np.sum(np.abs((centroid_atual - centroid_original)/centroid_original) * 100.0) > self.tolerancia:\r\n self.otimizado = False\r\n\r\n def Distancia_Euclidiana(self, matriz_A, matriz_B):\r\n distancia = 0\r\n for i in range(len(matriz_A)):\r\n distancia += (matriz_A[i] - matriz_B[i]) ** 2\r\n \r\n ed = math.sqrt(distancia)\r\n return ed \r\n","sub_path":"MachineLearning/KMeans.py","file_name":"KMeans.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"93467658","text":"import numpy as np\nimport pandas as pd\nimport itertools as it\n\nfrom . import transform_3d as t3d\n\nclass PathModelBase(object):\n def __init__(self, path_df):\n self._path_df = path_df\n self._time_s = None\n def get_heading_at_samples(self):\n raise NotImplementedError()\n def get_location_at_samples(self):\n raise NotImplementedError()\n def set_time_samples(self, time_s):\n self._time_s = time_s\n\nclass DumbHeuristicPathModel(PathModelBase):\n \"\"\"\n This model makes a couple of extremely brittle assumptions:\n 1) The car travels in a straight line in all dimensions.\n 2) Only the ordering of the time samples matters; absolute time does not.\n 3) GPS packets and LIDAR packets are sampled at the same times.\n \"\"\"\n\n def __init__(self, *args, **dargs):\n super(DumbHeuristicPathModel, self).__init__(*args, **dargs)\n self._path_df = self._path_df.sort('t')\n self._norms = self._path_df[['x', 'y', 'z']].diff().fillna(0).apply(np.linalg.norm, axis=1)\n\n def set_time_samples(self, time_s):\n super(DumbHeuristicPathModel, self).set_time_samples(time_s)\n t_min = min(self._path_df['t'].min(), self._time_s.min())\n self._path_df['t_norm'] = self._path_df['t']-t_min\n self._time_s = self._time_s-t_min\n\n def get_location_at_samples(self):\n dy = -self._norms.cumsum()[:-1]\n locs = pd.DataFrame({'x': 0.0, 'y': dy, 'z': 0.0})\n grouper = pd.cut(self._time_s,\n bins=self._path_df['t_norm'],\n labels=xrange(len(dy)),\n )\n return locs, grouper\n\n def get_heading_at_samples(self):\n N = len(self._time_s)\n vec = pd.DataFrame({'x': 0.0, 'y': 1.0, 'z': 0.0}, index=[0])\n grouper = np.zeros(N, dtype=int)\n return vec, grouper\n\n\nclass ZipperPathModel(DumbHeuristicPathModel):\n\n def __init__(self, *args, **dargs):\n super(DumbHeuristicPathModel, self).__init__(*args, **dargs)\n self._path_df = self._path_df.sort('t')\n self._norms = self._path_df[['x', 'y']].diff().fillna(0).apply(np.linalg.norm, axis=1)\n\n def get_location_at_samples(self):\n dy = -self._norms.cumsum()[:-1]\n locs = pd.DataFrame({'x': 0.0, 'y': dy, 'z': 0.0})\n counter = it.count(-1)\n time_df = pd.DataFrame({'t': self._time_s}).sort('t')\n _helper = lambda _df: pd.Series({'group': counter.next()})\n _grouper = time_df.groupby('t').apply(_helper)\n result = time_df.join(_grouper, on='t')\n return locs, result['group']\n\n\nclass Registrar(object):\n _required_fields = ['x', 'y', 'z', 't']\n \n def __init__(self, lidar_df, gps_df, lidar_axis=None, pathcls=None):\n assert all([_f in lidar_df.columns for _f in self._required_fields]), \"Required fields missing!\"\n assert all([_f in gps_df.columns for _f in self._required_fields]), \"Required fields missing!\"\n self._lidar_df = lidar_df\n self._gps_df = gps_df\n if lidar_axis is None:\n self._lidar_axis = np.array([0.0, 1.0, 0.0])\n if pathcls is None:\n self._PathModelCls = DumbHeuristicPathModel\n \n def compute(self):\n _t_s = self._lidar_df['t']\n path_model = self.build_path_model()\n path_model.set_time_samples(_t_s)\n self._result = self._lidar_df[['x', 'y', 'z']]\n self._do_translations(path_model) # have to translate before we rotate; no communitivity here\n self._do_rotations(path_model)\n \n def _do_translations(self, path_model):\n vecs, grouper = path_model.get_location_at_samples()\n transforms = vecs.apply(t3d.Translation.from_vector, axis=1)\n self._result = self._apply_many_transforms(self._result, transforms, grouper)\n\n def _do_rotations(self, path_model):\n vecs, grouper = path_model.get_heading_at_samples()\n _helper = lambda _df: t3d.Rotation.from_two_vectors(_df, self._lidar_axis)\n transforms = vecs.apply(_helper, axis=1)\n self._result = self._apply_many_transforms(self._result, transforms, grouper)\n \n def _apply_many_transforms(self, xyz_df, transforms, grouper):\n _helper = lambda _df: self._apply_transform(_df, transforms[_df.name])\n result = xyz_df.groupby(grouper).apply(_helper)\n result.reset_index(inplace=True)\n return result[['x', 'y', 'z']]\n\n def _apply_transform(self, xyz_df, transform):\n result = transform(xyz_df.T).T\n return pd.DataFrame({'x': result[:, 0], 'y': result[:, 1], 'z': result[:, 2]})\n \n def build_path_model(self):\n return self._PathModelCls(self._gps_df)\n\n def get_result(self):\n return self._result\n","sub_path":"cloudlab/pcd_reg.py","file_name":"pcd_reg.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"365221819","text":"# -*- coding: utf-8 -*-\n\n# cbz から表紙のサムネイル画像を生成する\n# -resize 200x200 オプションで画像を生成する\n\nimport configparser\nimport io\nimport os\nimport os.path\nimport subprocess\nimport sys\n\nclass CommandExecutor():\n def __init__(self):\n self.COMMAND = 'magick \"{0}\" -alpha off \"{1}\"'\n\n def execute(self, src, dst):\n command = self.COMMAND.format(src, dst)\n print(command)\n subprocess.call(command, shell=True)\n\ndef create_image(src_path, dst_path):\n executor = CommandExecutor()\n executor.execute(src_path, dst_path)\n\ndef extract_alpha():\n src = 'yamato'\n \n folders = os.listdir(src)\n for file_name in folders:\n file_path = os.path.join(src, file_name)\n dst = os.path.splitext(os.path.basename(file_name))[0] + \".png\"\n create_image(file_path, dst)\n\n print(\"finish.\")\n\nif __name__ == '__main__':\n extract_alpha()\n","sub_path":"python3/extract_alpha.py","file_name":"extract_alpha.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"443608928","text":"class Children:\n _name = \"\"\n _surname = \"\"\n _age = 0\n def __init__(self,name,surname,age):\n self._name = name\n self._surname = surname\n self._age = age\n def getData(self):\n print(\"name: \",self._name)\n print(\"surname: \",self._surname)\n print(\"age: \",self._age)\none = Children(\"OneName\",\"OneSurname\",12)\none.getData()\ntwo = Children(\"TwoName\",\"TwoSurname\",10)\ntwo.getData()","sub_path":"23.py","file_name":"23.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"353773311","text":"# Copyright 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Classes for reading and writing SAM and BAM files.\n\nAPI for reading:\n with SamReader(input_path) as reader:\n for read in reader:\n process(reader.header, read)\n\nAPI for writing:\n\n with SamWriter(output_path) as writer:\n for read in reads:\n writer.write(read)\n\nwhere read is a nucleus.genomics.v1.Read protocol buffer.\n\nIf the path contains '.tfrecord', a TFRecord file is assumed; otherwise\nit is treated as a true SAM file. Also, file names ending with '.gz'\nare assumed to be compressed.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\n\nfrom nucleus.io import genomics_reader\nfrom nucleus.io import genomics_writer\nfrom nucleus.io.python import sam_reader\nfrom nucleus.protos import index_pb2\nfrom nucleus.protos import reads_pb2\n\n\nclass NativeSamReader(genomics_reader.GenomicsReader):\n \"\"\"Class for reading from native SAM files.\n\n Most users will want to use SamReader instead, because it dynamically\n dispatches between reading native SAM files and TFRecord files based\n on the filename's extensions.\n \"\"\"\n\n def __init__(self, input_path,\n use_index=True,\n read_requirements=None,\n parse_aux_fields=False,\n hts_block_size=None,\n downsample_fraction=None,\n random_seed=None):\n \"\"\"Initializes a NativeSamReader.\n\n Args:\n input_path: string. A path to a resource containing SAM/BAM records.\n Currently supports SAM text format and BAM binary format.\n use_index: optional bool, defaulting to True. If True, we will attempt to\n load an index file for reads_source to enable the query() API call. If\n True an index file must exist. If False, we will not attempt to load an\n index for reads_source, disabling the query() call.\n read_requirements: optional ReadRequirement proto. If not None, this proto\n is used to control which reads are filtered out by the reader before\n they are passed to the client.\n parse_aux_fields: optional bool. If False, the default, we will not parse\n the auxillary fields of the SAM/BAM records (see SAM spec for details).\n Parsing the aux fields is often unnecessary for many applications, and\n adds a significant parsing cost to access. If you need these aux fields,\n set parse_aux_fields to True and these fields will be parsed and\n populate the appropriate Read proto fields (e.g., read.info).\n hts_block_size: integer or None. If None, will use the default htslib\n block size. Otherwise, will configure the underlying block size of the\n underlying htslib file object. Larger values (e.g. 1M) may be\n beneficial for reading remote files.\n downsample_fraction: None or float in the interval [0.0, 1.0]. If not\n None or 0.0, the reader will only keep each read with probability\n downsample_fraction, randomly.\n random_seed: None or int. The random seed to use with this sam reader, if\n needed. If None, a fixed random value will be assigned.\n\n Raises:\n ValueError: If downsample_fraction is not None and not in the interval\n (0.0, 1.0].\n ImportError: If someone tries to load a tfbam file.\n \"\"\"\n if input_path.endswith('.tfbam'):\n # Delayed loading of tfbam_lib.\n try:\n from tfbam_lib import tfbam_reader # pylint: disable=g-import-not-at-top\n self._reader = tfbam_reader.make_sam_reader(\n input_path,\n read_requirements=read_requirements,\n use_index=use_index,\n unused_block_size=hts_block_size,\n downsample_fraction=downsample_fraction,\n random_seed=random_seed)\n except ImportError:\n raise ImportError(\n 'tfbam_lib module not found, cannot read .tfbam files.')\n else:\n index_mode = index_pb2.INDEX_BASED_ON_FILENAME\n if not use_index:\n index_mode = index_pb2.DONT_USE_INDEX\n\n aux_field_handling = reads_pb2.SamReaderOptions.SKIP_AUX_FIELDS\n if parse_aux_fields:\n aux_field_handling = reads_pb2.SamReaderOptions.PARSE_ALL_AUX_FIELDS\n\n if downsample_fraction:\n if not 0.0 < downsample_fraction <= 1.0:\n raise ValueError(\n 'downsample_fraction must be in the interval (0.0, 1.0]',\n downsample_fraction)\n\n if random_seed is None:\n # Fixed random seed produced with 'od -vAn -N4 -tu4 < /dev/urandom'.\n random_seed = 2928130004\n\n self._reader = sam_reader.SamReader.from_file(\n input_path.encode('utf8'),\n reads_pb2.SamReaderOptions(\n read_requirements=read_requirements,\n index_mode=index_mode,\n aux_field_handling=aux_field_handling,\n hts_block_size=(hts_block_size or 0),\n downsample_fraction=downsample_fraction,\n random_seed=random_seed))\n\n self.header = self._reader.header\n\n super(NativeSamReader, self).__init__()\n\n def iterate(self):\n return self._reader.iterate()\n\n def query(self, region):\n return self._reader.query(region)\n\n def __exit__(self, exit_type, exit_value, exit_traceback):\n self._reader.__exit__(exit_type, exit_value, exit_traceback)\n\n\nclass SamReader(genomics_reader.DispatchingGenomicsReader):\n \"\"\"Class for reading Read protos from SAM or TFRecord files.\"\"\"\n\n def _native_reader(self, input_path, **kwargs):\n return NativeSamReader(input_path, **kwargs)\n\n def _record_proto(self):\n return reads_pb2.Read\n\n\nclass NativeSamWriter(genomics_writer.GenomicsWriter):\n \"\"\"Class for writing to native SAM files.\n\n Most users will want SamWriter, which will write to either native SAM\n files or TFRecords files, based on the output filename's extensions.\n \"\"\"\n\n def __init__(self, output_path, header):\n \"\"\"Initializer for NativeSamWriter.\n\n Args:\n output_path: str. A path where we'll write our SAM/BAM file.\n header: A nucleus.SamHeader protobuf. The header is used both\n for writing the header, and to control the sorting applied to\n the rest of the file.\n \"\"\"\n raise NotImplementedError\n\n def write(self, proto):\n raise NotImplementedError\n\n def __exit__(self, exit_type, exit_value, exit_traceback):\n self._writer.__exit__(exit_type, exit_value, exit_traceback)\n\n\nclass SamWriter(genomics_writer.DispatchingGenomicsWriter):\n \"\"\"Class for writing Variant protos to SAM or TFRecord files.\"\"\"\n\n def _native_writer(self, output_path, header):\n return NativeSamWriter(output_path, header)\n","sub_path":"nucleus/io/sam.py","file_name":"sam.py","file_ext":"py","file_size_in_byte":7181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"163119515","text":"import boto3\nfrom botocore.exceptions import ClientError\n\nfrom handofcats import as_command\n\n\n@as_command\ndef run(*, queue_url: str):\n \"\"\"Exercise send_sqs_message()\"\"\"\n\n # Assign this value before running the program\n sqs_client = boto3.client(\"sqs\")\n\n # Send some SQS messages\n entries = []\n for i in range(1, 6):\n entries.append(\n {\"Id\": f\"m{i}\", \"MessageBody\": f\"SQS message #{i}\", \"DelaySeconds\": 10}\n )\n\n try:\n assert (\n entries\n ), \"!! An error occurred (AWS.SimpleQueueService.EmptyBatchRequest) when calling the SendMessageBatch operation: There should be at least one SendMessageBatchRequestEntry in the request.\"\n\n response = sqs_client.send_message_batch(QueueUrl=queue_url, Entries=entries)\n print(response)\n except ClientError as e:\n print(\"!!\", e)\n","sub_path":"daily/20200129/example_sqs/00sqs-batch-send.py","file_name":"00sqs-batch-send.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"554742936","text":"from django.test import TestCase\nfrom unittest.mock import patch\nfrom django.db.utils import OperationalError\nfrom django.core.management import call_command\n\nclass CommandTests(TestCase):\n def test_wait_for_db_read(self):\n '''\n Test waiting for db is available\n '''\n with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:\n # In place of performing the action that above command run, we directly return True for it.\n gi.return_value = True\n call_command('wait_for_db')\n self.assertEqual(gi.call_count, 1)\n\n @patch('time.sleep', return_value=True)\n def test_wait_for_db(self, ts):\n '''\n Test waiting for db\n '''\n with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:\n gi.side_effect = [OperationalError] * 5 + [True]\n call_command('wait_for_db')\n self.assertEqual(gi.call_count, 6)\n","sub_path":"core/tests/tests_commands.py","file_name":"tests_commands.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"326371654","text":"# expr_runner_ide.py\n#\n# experiment launcher for value iteration used in IDE\n# -----------------------\n\nfrom experiment_creater_and_resumer.experiment_creater_and_resumer import ExprCreaterAndResumer\nimport gridworldValueIteration\n\n\ndef run_expr():\n # agent parameters\n alpha = 0.2 # learning rate\n epsilon = 0.3 # exploration rate\n\n # learning environment parameters\n display_speed = 0.5\n discount = 0.9\n delta = 0.02\n\n # generate postfix\n postfix = ''\n postfix += '_valueIteration'\n postfix += '_alpha' + str(alpha)\n postfix += '_epsilon' + str(epsilon)\n postfix += '_speed' + str(display_speed)\n\n log_dir = '/Users/lguan/Documents/Study/Research/Summer 2018/experiment-logs'\n expr_saver = ExprCreaterAndResumer(rootdir=log_dir, postfix=postfix)\n\n # save experiment runner\n expr_saver.dump_src_code_and_model_def(fname=__file__)\n # save grid world related files\n expr_saver.dump_src_code_and_model_def(fname=gridworldValueIteration.__file__)\n\n # run experiment\n exprValueIteration = gridworldValueIteration.GridworldValueIterationExperiment(learning_rate=alpha, epsilon=epsilon\n , discount=discount, delta=delta\n , display_speed=display_speed)\n exprValueIteration.start()\n\n\nif __name__ == '__main__':\n run_expr()\n","sub_path":"experiment-logs/TAMER-robust-experiment/42_preferenceTAMERAgent_alpha0.3_epsilon0.05_policyConverge_autoFeedback_no0_wrong0_noise0.1_speed2.0/all_py_files_snapshot/experiment/expr_value_iteration_launcher_ide.py","file_name":"expr_value_iteration_launcher_ide.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"481854570","text":"\"\"\"Test v0x01.utils methods.\"\"\"\nfrom unittest import TestCase\nfrom unittest.mock import MagicMock, patch, PropertyMock\n\nfrom kytos.lib.helpers import get_switch_mock, get_connection_mock\nfrom napps.kytos.of_core.v0x01.utils import (send_desc_request, send_echo,\n say_hello, send_set_config,\n handle_features_reply)\n\nfrom tests.helpers import get_controller_mock\n\n\nclass TestUtils(TestCase):\n \"\"\"Test utils.\"\"\"\n\n def setUp(self):\n \"\"\"Execute steps before each tests.\"\"\"\n self.mock_controller = get_controller_mock()\n self.mock_switch = get_switch_mock('00:00:00:00:00:00:00:01', 0x01)\n self.mock_connection = get_connection_mock(0x01, self.mock_switch)\n\n @patch('napps.kytos.of_core.v0x01.utils.emit_message_out')\n def test_send_desc_request(self, mock_emit_message_out):\n \"\"\"Test send_desc_request.\"\"\"\n send_desc_request(self.mock_controller, self.mock_switch)\n mock_emit_message_out.assert_called()\n\n def test_handle_features_reply(self):\n \"\"\"test Handle features reply.\"\"\"\n mock_event = MagicMock()\n mock_features = MagicMock()\n mock_controller = MagicMock()\n self.mock_switch.get_interface_by_port_no.side_effect = [MagicMock(),\n False]\n type(mock_features).ports = PropertyMock(return_value=[MagicMock()])\n type(mock_event).content = PropertyMock(return_value={'message':\n mock_features})\n mock_controller.get_switch_or_create.return_value = self.mock_switch\n response = handle_features_reply(mock_controller, mock_event)\n self.assertEqual(self.mock_switch, response)\n self.assertEqual(self.mock_switch.update_features.call_count, 1)\n\n self.mock_switch.update_features.call_count = 0\n response = handle_features_reply(mock_controller, mock_event)\n self.assertEqual(self.mock_switch, response)\n self.assertEqual(self.mock_switch.update_features.call_count, 1)\n\n @patch('napps.kytos.of_core.v0x01.utils.emit_message_out')\n def test_send_echo(self, mock_emit_message_out):\n \"\"\"Test send_echo.\"\"\"\n send_echo(self.mock_controller, self.mock_switch)\n mock_emit_message_out.assert_called()\n\n @patch('napps.kytos.of_core.v0x01.utils.emit_message_out')\n def test_set_config(self, mock_emit_message_out):\n \"\"\"Test set_config.\"\"\"\n send_set_config(self.mock_controller, self.mock_switch)\n mock_emit_message_out.assert_called()\n\n @patch('napps.kytos.of_core.v0x01.utils.emit_message_out')\n def test_say_hello(self, mock_emit_message_out):\n \"\"\"Test say_hello.\"\"\"\n say_hello(self.mock_controller, self.mock_switch)\n mock_emit_message_out.assert_called()\n","sub_path":"tests/unit/v0x01/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"49711339","text":"import torch\n\nfrom tqdm import tqdm\nfrom utils.utils import cross_entropy_3d, dice\n\n\ndef train(model, loader, optimizer, logger, args, epoch, print_freq = 10):\n\tlosses = []\n\tdices = []\n\tmodel.train()\n\tfor i, batch in enumerate(loader):\n\t\tindex = batch['index']\n\t\tvolume = batch['image'].cuda()\n\t\tvolume = volume.view((-1,) + volume.shape[2:])\n\t\tlabel = batch['label'].cuda()\n\t\tlabel = label.view((-1,) + label.shape[2:])\n\t\toutput, _ = model(volume)\n\t\tloss = cross_entropy_3d(output, label)\n\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\n\t\tpred = output.argmax(dim = 1)\n\t\tlabel = label.squeeze(1)\n\t\td = dice(pred.cpu().data.numpy() == 1, label.cpu().data.numpy() == 1)\n\t\tdices.append(d)\n\t\tlosses.append(loss)\n\t\tlosses.append(loss.detach().cpu().item())\n\t\tif i % print_freq == 0:\n\t\t\ttqdm.write('[Epoch {}, {}/{}] loss: {}, dice: {}'.format(epoch, i, len(loader), loss.detach().cpu().item(), d))\n\n\t\t\tlogger.log(\"train/loss\", loss)\n\t\t\tlogger.log(\"train/dice\", d)\n\t\t\tlogger.step()\n\ttqdm.write(\"[Epoch {}] avg loss: {}, avg dice: {}\".format(epoch, sum(losses) / len(losses), sum(dices) / len(dices)))\n\n\tmodel.eval()\n\tdices = []\n\n\ndef validate(model, loader, optimizer, logger, saver, args, epoch):\n\tdices = []\n\tfor i, batch in enumerate(loader):\n\t\tindex = batch['index']\n\t\tvolume = batch['image'].cuda()\n\t\tvolume = volume.view((-1,) + volume.shape[2:])\n\t\tlabel = batch['label'].cuda()\n\t\tlabel = label.view((-1,) + label.shape[2:])\n\t\tlabel = label.squeeze(1)\n\t\toutput, _ = model(volume)\n\t\tpred = output.argmax(dim = 1)\n\t\td = dice(pred.cpu().data.numpy() == 1, label.cpu().data.numpy() == 1)\n\t\tdices.append(d)\n\t\tif args.local_rank == 0:\n\t\t\tlogger.log(\"test/dice\", d)\n\t\t\tsaver.save(epoch, {\n\t\t\t\t\t'state_dict': model.state_dict(),\n\t\t\t\t\t'dice': d,\n\t\t\t\t\t'optimizer_state_dict': optimizer.state_dict()\n\t\t\t\t}, d)\n\ttqdm.write(\"[Epoch {}] test avg dice: {}\".format(epoch, sum(dices) / len(dices)))\n","sub_path":"uda/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"249028615","text":"promedio = float(input(\"ingrese su promedio \"))\nmodalidad = \"pregrado\"\ncreditos = 0\ndescuento = 0\npodraMatricular = 1\n\n\nif modalidad == \"pregrado\":\n if promedio >= 4.5:\n creditos = 28\n descuento = 0.25\n elif promedio < 4.5 and promedio >= 4:\n creditos = 25\n descuento = 0.1\n elif promedio < 4 and promedio >= 3.5:\n creditos = 20\n descuento = 0\n elif promedio < 3.5 and promedio >= 2.5:\n creditos = 15\n descuento = 0\n elif promedio < 2.5:\n print(\"no podra matricularse\")\n podraMatricular = 0\nelse:\n if promedio >= 4.5:\n creditos = 20\n descuento = 0.2\n elif promedio < 4.5 and promedio >= 0:\n creditos = 10\n descuento = 0\n \nif podraMatricular == 1:\n if modalidad == \"pregrado\":\n print(\"creditos: \" + str(creditos) + \" total a pagar: \" + str(creditos* 50000))\n print(\"descuento: \" + str((creditos*50000)*descuento) + \" total con adescuento: \" + str((creditos*50000)*(1-descuento)))\n if modalidad == \"posgrado\":\n print(\"creditos: \" + str(creditos) + \" total a pagar: \" + str(creditos* 300000))\n print(\"descuento: \" + str((creditos*300000)*descuento) + \" total con descuento: \" + str((creditos*300000)*(1-descuento)))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"634348072","text":"import os\n\nfrom watchdog.observers import Observer\nfrom watchdog.events import FileSystemEventHandler\n\n\nclass MyEventHandler(FileSystemEventHandler):\n def __init__(self, filepath, callback):\n self.filepath = filepath\n self.callback = callback\n\n def on_any_event(self, event):\n if event.src_path == self.filepath:\n self.callback(filepath=self.filepath)\n\n\ndef filepath(filepath, callback):\n event_handler = MyEventHandler(filepath, callback)\n dirpath = os.path.dirname(filepath)\n observer = Observer()\n observer.schedule(event_handler, dirpath, recursive=True)\n observer.start()\n\n return observer\n","sub_path":"k8s-simple-rolling-update/v2/k8sru/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"630844753","text":"'''\n Solution loops through each element in the nums array and appends the element to all existing subsets and appends the new subset to our subset array.\n \n Time Complexity: O(2^n) where n is the number of elements in the array.\n'''\nclass Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n subset = [[]]\n for i in range(len(nums)):\n for j in range(len(subset)):\n currentSubset = subset[j].copy()\n currentSubset.append(nums[i])\n subset.append(currentSubset)\n\n return subset\n","sub_path":"Subsets/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"406915152","text":"from PyQt5 import QtCore\nfrom PyQt5.QtWidgets import *\n\n\nclass WriteInstructionDialog(QDialog):\n def __init__(self, parent=None, input_content='', arch='', mode=''):\n super(WriteInstructionDialog, self).__init__(parent)\n\n layout = QVBoxLayout(self)\n layout.addWidget(QLabel('insert instruction'))\n self.input_widget = QLineEdit(self)\n if len(input_content) > 0:\n self.input_widget.setText(input_content)\n self.input_widget.setMinimumWidth(350)\n layout.addWidget(self.input_widget)\n\n arch_mode_layout = QHBoxLayout()\n import keystone\n ks_objs = dir(keystone.keystone_const)\n\n self.arch = QComboBox(self)\n for w in ks_objs:\n if w.startswith('KS_ARCH_'):\n self.arch.addItem(w.replace('KS_ARCH_', '').lower())\n if w == arch:\n self.arch.setCurrentIndex(self.arch.count() - 1)\n arch_mode_layout.addWidget(self.arch)\n\n self.mode = QComboBox(self)\n for w in ks_objs:\n if w.startswith('KS_MODE_'):\n self.mode.addItem(w.replace('KS_MODE_', '').lower())\n if w == mode:\n self.mode.setCurrentIndex(self.mode.count() - 1)\n arch_mode_layout.addWidget(self.mode)\n\n layout.addLayout(arch_mode_layout)\n\n buttons = QHBoxLayout()\n ok = QPushButton('Ok')\n buttons.addWidget(ok)\n ok.clicked.connect(self.accept)\n cancel = QPushButton('cancel')\n cancel.clicked.connect(self.close)\n buttons.addWidget(cancel)\n layout.addLayout(buttons)\n\n def keyPressEvent(self, event):\n super(WriteInstructionDialog, self).keyPressEvent(event)\n if event.key() == QtCore.Qt.Key_Return:\n self.accept()\n\n @staticmethod\n def show_dialog(input_content='', arch='', mode=''):\n dialog = WriteInstructionDialog(input_content=input_content, arch=arch, mode=mode)\n result = dialog.exec_()\n\n return result == QDialog.Accepted, \\\n dialog.input_widget.text(), \\\n dialog.arch.currentText(), \\\n dialog.mode.currentText()\n","sub_path":"dwarf_debugger/ui/dialogs/dialog_write_instruction.py","file_name":"dialog_write_instruction.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"257857584","text":"# 1. створити файл\n# 2. прочитати\n# 3. змінити\n# 4. видалити\nfrom flask import Flask, request\nimport datetime\nimport os\nimport json\n\napp = Flask(__name__)\n\n\n@app.route('/file/task', methods=['GET', 'POST', 'PUT', 'DELETE'])\ndef file_tasks():\n if request.method == 'GET':\n \"\"\"get info from file\"\"\"\n path = \"/created_file.txt\"\n with open(path, \"r\") as file:\n return file.read()\n\n elif request.method == 'POST':\n \"\"\"create file\"\"\"\n dater = json.loads(request.data)\n data = f\"{dater}\\n\"\n with open('created_file.txt', \"w\") as new_file:\n new_file.write(data)\n return \"201\"\n elif request.method == \"PUT\":\n \"modify/update file\"\n dateti = json.loads(request.data)\n data = f\"{dateti}\\n\"\n with open('created_file.txt', \"a\") as new_file:\n new_file.write(data)\n return \"204\"\n elif request.method == 'DELETE':\n \"\"\"delete file\"\"\"\n os.remove(\"created_file.txt\")\n return \"204\"\n else:\n return '405'\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8080)\n","sub_path":"file_helper.py","file_name":"file_helper.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"177735692","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Nov 12 17:53:23 2017\r\n\r\n@author: feebr01\r\n\"\"\"\r\n\r\nimport seaborn as sns\r\n\r\n########## DISTRIBUTION PLOTS\r\n\r\ntips = sns.load_dataset('tips')\r\ntips.head()\r\n\r\nsns.distplot(tips['total_bill'])\r\n\r\n#Compare relationships good\r\nsns.jointplot(x='total_bill', y='tip', data=tips, kind='scatter')\r\nsns.pairplot(tips, hue = 'sex', palette = 'coolwarm')\r\n\r\nsns.rugplot(tips['total_bill'])\r\n\r\n\r\n#################CATEGORICAL DATA W VALUES\r\nimport numpy as np\r\nsns.barplot(x='sex',y='total_bill',data=tips, estimator =np.std)\r\n\r\nsns.countplot(x='sex', data = tips)\r\n\r\nsns.boxplot(x='day', y='total_bill',data = tips, hue ='smoker')\r\n\r\nsns.violinplot(x='day', y='total_bill',data = tips)\r\n\r\nsns.stripplot(x='day', y='total_bill',data = tips, jitter = True)\r\n\r\nsns.swarmplot(x='day', y='total_bill',data = tips)\r\n\r\n\r\n################### MATRIX PLOTS\r\nflights = sns.load_dataset('flights')\r\n\r\n\r\nsns.heatmap(tips.corr(), annot = True)\r\n\r\n#Pivot data and make heatmap\r\npvt = flights.pivot_table(index = 'month', columns = 'year', values = 'passengers')\r\nsns.heatmap(pvt, linecolor= 'white' ,lw=.2)\r\n\r\n\r\nsns.clustermap(pvt, cmap='coolwarm')\r\n\r\n\r\n################### REGRESSIONPLOTS\r\n\r\n#color dots by sex\r\nsns.lmplot(x='total_bill', y='tip', data=tips, hue = 'sex')\r\n\r\n#separate charts side by side by sex\r\nsns.lmplot(x='total_bill', y='tip', data=tips, col = 'sex')\r\n\r\n# size = , aspect = \r\n\r\n##############GRID\r\n\r\niris = sns.load_dataset('iris')\r\n\r\nsns.pairplot(iris)\r\n\r\ng = sns.PairGrid(iris)\r\n\r\n#separate by rows and columns, map is display\r\ng = sns.FacetGrid(data=tips, col = 'time', row ='smoker')\r\ng.map(sns.distplot, 'total_bill')\r\n\r\n####################STYLES AND COLORS\r\nsns.set_context('talk')\r\nsns.countplot(x='sex', data=tips)\r\n\r\nsns.lmplot(x= 'total_bill', y = 'tip', data = tips, hue = 'sex', palette='seismic' )\r\n\r\n\r\n\r\n\r\n","sub_path":"Udemy Seaborn.py","file_name":"Udemy Seaborn.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"35376479","text":"import json\n\nimport numpy as np\nimport statistics as sts\n\nimport extras.parser as parser\nimport extras.functions as functions\nimport extras.utils as utils\nimport perceptron.autoencoder as ae\n\nwith open(\"config.json\") as file:\n config = json.load(file)\n\n# static non changeable vars\nerror_threshold: float = config[\"error_threshold\"]\n\n# read the files and get the dataset. There is no need to normalize data at this exercise\nfull_dataset, _ = parser.read_file(config[\"file\"], config[\"system_threshold\"])\n\n# activation function and its derived\nact_funcs = functions.get_activation_functions(config[\"system\"], config[\"beta\"])\n\n# normalize data\nif config[\"normalize\"]:\n full_dataset = parser.normalize_data(full_dataset)\n\n# extract the last % of the dataset\ndataset, rest = parser.extract_subset(full_dataset, config[\"training_ratio\"])\n\n# initializes the auto-encoder\nauto_encoder = ae.AutoEncoder(*act_funcs, config[\"mid_layout\"], len(dataset[0]), config[\"latent_dim\"],\n config[\"momentum\"], config[\"alpha\"])\n\n# randomize w if asked\nif bool(config[\"randomize_w\"]):\n auto_encoder.randomize_w(config[\"randomize_w_ref\"], config[\"randomize_w_by_len\"])\n\nplot_bool = bool(config[\"plot\"])\n\n# initialize plotter\nif plot_bool:\n utils.init_plotter()\n\n# get pm from config\npm: float = config[\"denoising\"][\"pm\"]\n\n# use minimizer if asked\nif config[\"optimizer\"] != \"None\" and config[\"optimizer\"] != \"\":\n # randomize the dataset\n dataset = parser.randomize_data(dataset, config[\"data_random_seed\"])\n # train with minimize\n auto_encoder.train_minimizer(parser.add_noise_dataset(dataset, pm), dataset, config[\"trust\"], config[\"use_trust\"], config[\"optimizer\"], config[\"optimizer_iter\"], config[\"optimizer_fev\"])\n # plot error vs opt step\n utils.plot_values(range(len(auto_encoder.opt_err)), 'opt step', auto_encoder.opt_err, 'error', sci_y=False)\nelse:\n # vars for plotting\n ep_list = []\n err_list = []\n\n # train auto-encoder\n for ep in range(config[\"epochs\"]):\n\n # randomize the dataset everytime\n dataset = parser.randomize_data(dataset, config[\"data_random_seed\"])\n\n # train for this epoch\n for data in dataset:\n auto_encoder.train(parser.add_noise(data, pm), data, config[\"eta\"])\n\n # apply the changes\n auto_encoder.update_w()\n\n # calculate error\n error: float = auto_encoder.error(parser.add_noise_dataset(dataset, pm), dataset, config[\"trust\"], config[\"use_trust\"])\n if error < config[\"error_threshold\"]:\n break\n\n if ep % 50 == 0:\n print(f'Iteration {ep}, error {error}')\n\n # add error to list\n ep_list.append(ep)\n err_list.append(error)\n \n # plot error vs epoch\n if plot_bool:\n utils.plot_values(ep_list, 'epoch', err_list, 'error', sci_y=False)\n\n# labels for printing (use with full_dataset)\nlabels: [] = ['@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\\\', ']', '^', '_']\n\nPM_ITER = 50\n\npm_values = [pm / 4, pm, pm * 2.5]\nx_superlist = []\nerr_superlist = []\nleg_list = ['pm=0,0625', 'pm=0,25', 'pm=0,625']\nfor pm_it in pm_values:\n err_mean: [] = []\n for data in full_dataset:\n aux: [] = []\n for i in range(PM_ITER):\n noisy_res = auto_encoder.activation(parser.add_noise(data, pm_it))\n aux.append(np.sum(abs(np.around(noisy_res[1:]) - data[1:])) / len(data[1:]))\n letter_err_mean = sts.mean(aux)\n err_mean.append(letter_err_mean)\n x_superlist.append(range(len(full_dataset)))\n err_superlist.append(err_mean)\n print(f'Using pm={pm_it}, error mean is {sts.mean(err_mean)}')\n\nif plot_bool:\n utils.plot_multiple_values(x_superlist, 'Letter', err_superlist, 'Invalid bits', leg_list, sci_y=False, xticks=labels, min_val_y=0, max_val_y=1)\n utils.plot_stackbars(x_superlist, 'Letter', err_superlist, 'Invalid bits', leg_list, sci_y=False, xticks=labels, min_val_y=0, max_val_y=1)\n\n # hold execution\n utils.hold_execution()\n","sub_path":"denoising.py","file_name":"denoising.py","file_ext":"py","file_size_in_byte":4089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"95923973","text":"#!/home/blogenv/bin/python\n# -*- coding: utf-8 -*-\nfrom django.shortcuts import render,get_object_or_404,redirect\nfrom blog.models import Post\nfrom .models import Comment\nfrom .forms import CommentForm\nfrom users.models import User\n\n# Create your views here.\n# 发布评论的视图函数\ndef post_comment(request,post_pk,user_pk):\n # 获得当前文章对象,登录的用户对象\n post=get_object_or_404(Post,pk=post_pk)\n user=get_object_or_404(User,pk=user_pk)\n # 如果为post方法\n if request.method=='POST':\n form=CommentForm(request.POST)\n # 如果表单内容合规\n if form.is_valid():\n # 创建评论对象,不保存到数据库\n comment=form.save(commit=False)\n\n comment.post=post\n comment.user=user\n\n # 保存评论表单数据到数据库,重定向当前页\n comment.save()\n return redirect(post)\n else:\n # 获得当前所有评论列表\n comment_list=post.comment_set.all()\n # 将当前页面表单内容,文章,评论列表保存后,重新渲染页面\n context={\n 'post':post,\n 'form':form,\n 'comment_list':comment_list\n }\n return render(request,'blog/detail.html',context=context)\n else:\n # get访问时,刷新页面\n return redirect(post)","sub_path":"comments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"581173268","text":"#!/usr/bin/python\r\n# _*_coding: utf-8 _*_\r\n\r\nimport os\r\nimport sys\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\n\r\n\r\nurl = \"https://news.sina.com.cn/china/\" #IEEE.sh\r\nres = requests.get(url)\r\n\r\nres.encoding = 'utf-8'\r\n#soup = BeautifulSoup(res.text, 'lxml')\r\n#print(soup.title.text)\r\nsoup=BeautifulSoup(res.text,'html.parser')\r\n#print(soup.text)\r\nif os.path.exists(\"code/news.txt\"):\r\n os.remove(\"code/news.txt\")\r\ngetnews =\"\"\r\nfor news in soup.select(\".right-content\"):\r\n #print(news)\r\n #print (news.select(\"a\"))\r\n new_as=news.select(\"a\")\r\n for news_a in new_as:\r\n getnews = getnews + (news_a.text) +('\\n')\r\n\r\nprint(\"[+] 获取新闻(来自:\" + url +\"):\" )\r\nprint(getnews)\r\n\r\n\r\nwith open('code/news.txt','w') as f:\r\n f.write(getnews)","sub_path":"code/getnews.py","file_name":"getnews.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"420827866","text":"#! /usr/bin/python\n# Author: Nordine Sebkhi\n\nfrom gps import *\nfrom time import *\nimport time\nimport threading\nfrom datetime import datetime\nfrom math import cos, sin, radians, degrees, atan2\n\n\nclass GpsReader(threading.Thread):\n\n def __init__(self, outputDir):\n threading.Thread.__init__(self)\n self.gpsPoller = GpsPoller()\n self.outDir = outputDir\n\n def run(self):\n print(\"GPS: Connecting...\")\n self.gpsPoller.start()\n\n gpsFound = False\n prevLat = 0\n prevLon = 0\n bearing = 0\n\n while True:\n gpsData = self.gpsPoller.getCurrentValue()\n\n if (gpsData is not None and hasattr(gpsData, 'mode') and gpsData.mode == 3):\n\n # Get time stamp of current sample\n ts = datetime.now()\n\n # Create new file at each new data collection\n if not gpsFound:\n print(\"GPS : Connected!!\")\n gpsFound = True\n gpsFile = self.outDir + \"gps_{0}-{1}-{2}_{3}-{4}-{5}.csv\".format(\n ts.year, ts.month, ts.day, ts.hour, ts.minute, ts.second)\n\n with open(gpsFile, 'w') as f:\n f.write('LAT,LON,BEARING,ALT,SPEED,CLIMB,GPS_TIME,RPI_TIME\\n')\n\n # Append gps data\n timeGPS = gpsData.time\n lat = gpsData.lat # Degrees (North:+ South:-)\n lon = gpsData.lon # Degrees (East:+ West:-)\n alt = gpsData.alt # Meters\n speed = gpsData.speed # Meters per second\n climb = gpsData.climb # Climb(+) or Sink(-) in meters per second\n\n # Update Bearing\n if (prevLat != 0):\n bearing += self.getBearing(prevLat, prevLon, lat, lon)\n\n # Write results to output file\n with open(gpsFile, 'a') as f:\n f.write('{0},{1},{2},{3},{4},{5},{6},{7}\\n'.format(\n lat, lon, bearing, alt, speed, climb, timeGPS, ts))\n\n # Prepare for next iteration\n prevLat = lat\n prevLon = lon\n time.sleep(0.5)\n\n def getBearing(self, lat_start_deg, lon_start_deg, lat_end_deg, lon_end_deg):\n latStart = radians(lat_start_deg)\n lonStart = radians(lon_start_deg)\n latEnd = radians(lat_end_deg)\n lonEnd = radians(lon_end_deg)\n\n d_Lon = lonEnd - lonStart\n\n x = cos(latEnd) * sin(d_Lon)\n y = cos(latStart) * sin(latEnd) - sin(latStart) * cos(latEnd) * cos(d_Lon)\n\n bearingRad = atan2(x, y)\n return ((degrees(bearingRad) + 360.0) % 360)\n\n\nclass GpsPoller(threading.Thread):\n\n def __init__(self):\n threading.Thread.__init__(self)\n self.current_value = None\n\n def getCurrentValue(self):\n return self.current_value\n\n def run(self):\n\n gpsd = gps(mode=WATCH_ENABLE) # starting the stream of info\n\n try:\n while True:\n self.current_value = gpsd.next()\n\n except StopIteration:\n pass\n\n##################################\n# Test\n#############################\nif __name__ == \"__main__\":\n latStart = 39.099912\n lonStart = -94.581213\n latEnd = 38.627089\n lonEnd = -90.200203\n\n gpsReader = GpsReader(\"don't care'\")\n bearing = gpsReader.getBearing(latStart, lonStart, latEnd, lonEnd)\n print(\"Bearing (deg) = {0:f}\".format(bearing))","sub_path":"gpsReader.py","file_name":"gpsReader.py","file_ext":"py","file_size_in_byte":3491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"169073929","text":"from PIL import Image, ImageDraw\nfrom random import randint, randrange, choice\nimport sys\nsys.setrecursionlimit(2000)\n\nclass Data:\n def __init__(self, file):\n self.file = file\n self.list_of_rows = []\n with open(self.file) as data:\n for line in data.readlines():\n self.list_of_rows.append([int(num) for num in line.split()])\n self.width = len(self.list_of_rows[0])\n self.height = len(self.list_of_rows)\n self.min = min([min(row) for row in self.list_of_rows])\n self.max = max([max(row) for row in self.list_of_rows])\n\n def rgb(self, point_x, point_y):\n rgb_value = int(((self.list_of_rows[point_y][point_x] - self.min) / (self.max - self.min)) * 255)\n return (rgb_value, rgb_value, rgb_value)\n\n def get_elevation(self, point):\n return self.list_of_rows[point[1]][point[0]]\n\n def get_rgb(self, point):\n return self.rgb(point[0], point[1])[0]\n\nclass Map:\n def __init__(self, data):\n self.data = data\n self.image = Image.new('RGB', (self.data.width, self.data.height))\n self.draw = ImageDraw.Draw(self.image)\n\n def draw_map(self):\n for y in range(self.data.height):\n for x in range(self.data.width):\n self.draw.point((x, y), self.data.rgb(x, y))\n return self\n\n def draw_path(self, path, color):\n for point in path[1]:\n self.draw.point(point, color)\n return self\n\n def display(self):\n return self.image.show()\n\nclass Pathfinder:\n def __init__(self, data):\n self.data = data\n self.recursive_results = {}\n self.iterative_results = []\n\n def greedy_path(self, starting_point):\n current_point = starting_point\n path_cost = 0\n path = []\n path.append(current_point)\n for step in range(self.data.width-1):\n up = (step+1, max(current_point[1]-1, 0))\n straight = (step+1, current_point[1])\n down = (step+1, min(current_point[1]+1, self.data.height-1))\n choices = [up, straight, down]\n\n up_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(up))\n straight_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(straight))\n down_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(down))\n costs = [up_cost, straight_cost, down_cost]\n\n choices_costs = dict(zip(choices, costs))\n sorted_choices = sorted(choices_costs, key=choices_costs.__getitem__)\n\n if up_cost >= straight_cost <= down_cost:\n path_cost += straight_cost\n path.append(straight)\n current_point = straight\n elif up_cost == down_cost:\n options = [up, down]\n decision = choice(options)\n path_cost += choices_costs[decision]\n path.append(decision)\n current_point = decision \n else:\n path_cost += choices_costs[sorted_choices[0]]\n path.append(sorted_choices[0])\n current_point = sorted_choices[0]\n\n return (path_cost, path)\n\n\n def recursive_best(self, starting_point):\n if starting_point in self.recursive_results:\n return self.recursive_results[starting_point]\n\n current_point = starting_point\n \n up = (current_point[0]+1, max(current_point[1]-1, 0))\n straight = (current_point[0]+1, current_point[1])\n down = (current_point[0]+1, min(current_point[1]+1, self.data.height-1))\n choices = [up, straight, down]\n\n up_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(up))\n straight_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(straight))\n down_cost = abs(self.data.get_elevation(current_point)-self.data.get_elevation(down))\n costs = [up_cost, straight_cost, down_cost]\n\n choices_costs = dict(zip(choices, costs))\n sorted_choices = sorted(choices_costs, key=choices_costs.__getitem__)\n\n if current_point[0] == self.data.width - 2:\n self.recursive_results[current_point] = (choices_costs[sorted_choices[0]], [sorted_choices[0]])\n return self.recursive_results[current_point]\n\n paths = [self.recursive_best(point) for point in choices]\n new_paths = [(paths[0][0]+costs[0], [current_point]+paths[0][1]),\n (paths[1][0]+costs[1], [current_point]+paths[1][1]),\n (paths[2][0]+costs[2], [current_point]+paths[2][1])]\n sorted_paths = sorted(new_paths, key=lambda x: x[0])\n self.recursive_results[current_point] = sorted_paths[0]\n return self.recursive_results[current_point]\n\n\n def iterative_best(self, starting_point):\n new_data = [column for column in zip(*self.data.list_of_rows)]\n self.iterative_results = [(0, [(len(new_data)-1, y)]) for y, point in enumerate(new_data[-1])]\n\n for x, column in enumerate(new_data[-2::-1]):\n new_results = []\n for y, point in enumerate(column):\n current_point = ((len(new_data)-x)-2, y)\n up_path = [current_point] + self.iterative_results[max(y-1, 0)][1]\n straight_path = [current_point] + self.iterative_results[y][1]\n down_path = [current_point] + self.iterative_results[min(y+1, len(new_data[0])-1)][1]\n\n up_cost = abs(point-self.data.get_elevation(up_path[1]))\n straight_cost = abs(point-self.data.get_elevation(straight_path[1]))\n down_cost = abs(point-self.data.get_elevation(down_path[1]))\n\n up_choice = (up_cost + self.iterative_results[y][0], up_path)\n straight_choice = (straight_cost + self.iterative_results[y][0], straight_path)\n down_choice = (down_cost + self.iterative_results[y][0], down_path)\n\n choices = [up_choice, straight_choice, down_choice]\n sorted_choices = sorted(choices, key=lambda x: x[0])\n new_results.append(sorted_choices[0])\n self.iterative_results = new_results\n \n return self.iterative_results\n\ndata = Data('elevation_large.txt')\na_map = Map(data)\na_map.draw_map()\npaths = []\npathfinder = Pathfinder(data)\n\n\n#Greedy algorithm\nfor y in range(data.height - 1):\n path = pathfinder.greedy_path((0, y))\n a_map.draw_path(path, (0, 255, 0))\n paths.append(path)\nsorted_paths = sorted(paths, key=lambda x: x[0])\na_map.draw_path(sorted_paths[0], (0, 0, 255))\na_map.display()\nprint(sorted_paths[0][0])\n\n#Iterative algorithm\n# paths = pathfinder.iterative_best((0, 300))\n# for path in paths:\n# a_map.draw_path(path, (0, 255, 0))\n# sorted_paths = sorted(paths, key=lambda x: x[0])\n# a_map.draw_path(sorted_paths[0], (0, 0, 255))\n# a_map.display()\n# print(sorted_paths[0][0])\n\n#Recursive algorithm\n# for y in range(data.height - 1):\n# path = pathfinder.recursive_best((0, y))\n# a_map.draw_path(path, (0, 255, 0))\n# paths.append(path)\n# sorted_paths = sorted(paths, key=lambda x: x[0])\n# a_map.draw_path(sorted_paths[0], (0, 0, 255))\n# a_map.display()\n# print(sorted_paths[0][0])","sub_path":"pathfinder_with_classes.py","file_name":"pathfinder_with_classes.py","file_ext":"py","file_size_in_byte":7312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"25430485","text":"import serial\nimport matplotlib.pyplot as plt\nimport math\nimport scipy.optimize\nimport numpy as np\nfrom more_itertools import *\n\n\ndef frequency_motor(s, run_time, frequency, gain):\n s.write(b'time report on\\n')\n s.write(b'motor left report on\\n')\n\n start_time = None\n p = None\n\n times = []\n positions = []\n powers = []\n\n while True:\n line = s.readline()\n if b',' in line and b':' in line:\n time = None\n position = None\n\n words = line.split(b',')\n for word in words:\n parts = word.split(b':')\n if parts[0] == b'T':\n try:\n time = int(parts[1])\n except ValueError:\n print('Error parsing {} from {}'.format(parts[1], line))\n elif parts[0] == b'LM':\n try:\n position = int(parts[1])\n except ValueError:\n print('Error parsing {} from {}'.format(parts[1], line))\n\n if start_time is None:\n start_time = time\n else:\n time = time - start_time\n if time is not None and position is not None:\n times.append(time)\n positions.append(position)\n powers.append(p)\n\n if time % 10 == 0:\n p = gain / 2 + gain / 2 * math.sin(2 * math.pi * time * frequency)\n s.write(b'motor left set %d\\n' % p)\n\n if time >= run_time:\n s.write(b'motor left set 0\\n')\n s.write(b'motor left report off\\n')\n s.write(b'time report off\\n')\n break\n\n return times, positions, powers\n\n\ns = serial.Serial('/dev/ttyUSB0', 230400, timeout=1)\n\n\ndef calc_velocity(d):\n (last_time, last_position), (next_time, next_position) = d\n return (next_position - last_position) - (next_time - last_time)\n\n\ntimes, positions, powers = frequency_motor(s, 5000, 0.002, 10000)\n\ntimes, positions, powers = unzip(filter(lambda d: d[0] >= 1000, zip(times, positions, powers)))\n\ntimes = list(times)\npositions = list(positions)\npowers = list(powers)\n\nvelocities = list(map(calc_velocity, windowed(zip(times, positions), 2)))\n\nvelocities.append(last(velocities))\n\n\ndef sine(t, offset, gain, frequency, phase):\n return offset + gain * np.sin(2*math.pi*t*frequency + phase)\n\n\npopt, pcov = scipy.optimize.curve_fit(sine, times, velocities, p0=[3, 2.5, 0.002, 0])\n\nprint(popt)\nprint(pcov)\n\nfit_velocities = list(map(lambda t: sine(t, *popt), times))\n\nplt.plot(\n #times, velocities,\n times, list(map(lambda p: p / 1000 if p is not None else None, powers)),\n times, fit_velocities,\n linewidth=1\n)\nplt.show()\n","sub_path":"software/micromouse_analysis/analyze_frequency_motors.py","file_name":"analyze_frequency_motors.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"56677","text":"from flask import Flask, request, jsonify, make_response, abort\nfrom werkzeug.exceptions import HTTPException\n\nfrom db import db\n\napp = Flask(__name__)\n\n\n@app.errorhandler(HTTPException)\ndef err_handler(error):\n return make_response(jsonify({'status': error.code,\n 'reason': error.description}), error.code)\n\n\ndef validate_post(err_code):\n if not request.json:\n abort(err_code, \"Fields is required\")\n if 'movie' not in request.json:\n abort(err_code, \"Field 'movie' is required\")\n\n for i in ['title', 'year', 'director', 'length', 'rating']:\n if i not in request.json['movie']:\n abort(err_code, \"Field '\" + i + \"' is required\")\n\n\ndef gen_return(movie):\n return {\"movie\": {\n \"id\": movie.id,\n \"title\": movie.title,\n \"year\": movie.year,\n \"director\": movie.director,\n \"length\": movie.length,\n \"rating\": movie.rating\n }}\n\n\ndef get_movies():\n ret = {\"list\": []}\n for movies in db.session.query(db.Movies):\n ret[\"list\"].append(gen_return(movies)['movie'])\n return jsonify(ret)\n\n\ndef post_movies():\n validate_post(500)\n json_inp = request.json['movie']\n try:\n movie = db.Movies(json_inp['title'], json_inp['year'],\n json_inp['director'], json_inp['length'],\n json_inp['rating']) # TODO: А разве надо использовать id от клиенета\n except AssertionError as e:\n abort(400, e.args[0])\n else:\n db.session.add(movie)\n db.session.commit()\n return jsonify(gen_return(movie))\n\n\ndef get_one_movies(movies_id):\n movie = db.session.query(db.Movies).get(movies_id)\n if movie is None:\n abort(404, \"Movie not found\")\n return jsonify(gen_return(movie))\n\n\ndef patch_movie(movies_id):\n validate_post(400) # Код нужен потому, что в задание они указаны разные\n movie = db.session.query(db.Movies).get(movies_id)\n if movie is None:\n abort(404, \"Movie not found\")\n json_inp = request.json['movie']\n\n try:\n movie.title = json_inp['title']\n movie.year = json_inp['year']\n movie.director = json_inp['director']\n movie.length = json_inp['length']\n movie.rating = json_inp['rating']\n except AssertionError as e:\n abort(400, e.args[0])\n else:\n db.session.commit()\n return jsonify(gen_return(movie))\n\n\ndef delete_movie(movies_id):\n movie = db.session.query(db.Movies).get(movies_id)\n if movie is None:\n abort(404, \"Movie not found\")\n db.session.delete(movie)\n db.session.commit()\n return make_response(jsonify(\"Accepted\"), 202)\n\n\n@app.route('/api/movies', methods=[\"GET\", \"POST\"])\ndef api_movies():\n if request.method == \"GET\":\n return get_movies()\n elif request.method == \"POST\":\n return post_movies()\n\n\n@app.route('/api/movies/', methods=[\"GET\", \"PATCH\", \"DELETE\"])\ndef api_movies_ids(movies_id):\n if request.method == \"GET\":\n return get_one_movies(movies_id)\n elif request.method == \"PATCH\":\n return patch_movie(movies_id)\n elif request.method == \"DELETE\":\n return delete_movie(movies_id)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"133054155","text":"#!/usr/bin/python3\n\"\"\"\nUse fabric to create tgz file of static code\n\"\"\"\nfrom fabric.api import local, task, env\nfrom datetime import datetime\n\n\n@task\ndef do_pack():\n \"\"\"\n Run tar command to compress files\n \"\"\"\n now = datetime.now()\n file_name = \"web_static_{}{}{}{}{}{}.tgz\".format(\n now.year,\n now.month,\n now.day,\n now.hour,\n now.minute,\n now.second\n )\n try:\n local(\"sudo tar -cvzf {} ./web_static\".format(file_name))\n local(\"sudo mkdir -p versions\")\n local(\"sudo mv ./{} versions/\".format(file_name))\n except:\n return (None)\n return (\"versions/{}\".format(file_name))\n","sub_path":"1-pack_web_static.py","file_name":"1-pack_web_static.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"1772173","text":"#!/usr/bin/env python\n\nimport sys, os\nfrom struct import pack, unpack\nfrom hexdump import hexdump\n\nfrom pcie_lib import *\nfrom uefi import *\n\nHV_INFO_ADDR = STATUS_ADDR - (8 * 5)\n\ndef main():\n\n payload = sys.argv[1] if len(sys.argv) > 1 else None\n\n dev = dxe_inject(payload = payload)\n \n if payload is None: return 0\n\n print('[+] DXE driver was planted, waiting for backdoor init...') \n\n while True:\n\n # wait for DXE backdoor\n status = dev.mem_read_8(STATUS_ADDR)\n \n if status == 0:\n\n # not ready yet\n time.sleep(1)\n continue\n\n print('[+] DXE driver was executed')\n\n break\n\n print('[+] Waiting for Hyper-V init...')\n\n dev.mem_write_8(HV_INFO_ADDR, 0)\n\n while True:\n\n # wait for hypervisor\n status, winload_cr3, hv_cr3, hv_entry, hv_base = unpack('qQQQQ', dev.mem_read(HV_INFO_ADDR, 8 * 5))\n\n if status == 0:\n\n # not ready yet\n time.sleep(1)\n continue\n\n if status == -1:\n\n print('ERROR: DXE driver is unable to locate winload.efi')\n break\n\n if status == -2:\n\n print('ERROR: DXE driver is unable to locate winload!HvlpBelow1MbPage')\n break\n\n if status == -3:\n\n print('ERROR: HvlpBelow1MbPage is not allocated, Hyper-V wasn\\'t started')\n break\n\n if status == -4:\n\n print('ERROR: DXE driver is unable to locate winload!HvlpTransferToHypervisor')\n break\n\n print('[+] Hyper-V image entry was executed\\n')\n print(' Winload CR3: 0x%.16x' % winload_cr3)\n print(' Hyper-V CR3: 0x%.16x' % hv_cr3)\n print(' Hyper-V VM exit handler: 0x%.16x' % hv_base)\n print(' Hyper-V image entry: 0x%.16x\\n' % hv_entry)\n\n break\n\n print('[+] DONE')\n \n dev.close()\n\n return 0\n\nif __name__ == '__main__':\n\n exit(main())\n","sub_path":"python/uefi_backdoor_hv.py","file_name":"uefi_backdoor_hv.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"504063154","text":"\"\"\"\nQ001\nTwo Sum\nEasy\n\n04/01/2021 revisit\n\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n save = {}\n for i in range(len(nums)):\n if target-nums[i] not in save:\n save[nums[i]] = i\n else:\n return [i, save[target-nums[i]]]\n\n\nnums = [2, 7, 11, 15]\ntarget = 13\n\nsol = Solution()\nprint(sol.twoSum(nums, target))\n\n\n\n","sub_path":"Q001-v2.py","file_name":"Q001-v2.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"197480708","text":"import streamlit as st\nimport os\nimport PIL\nimport streamlit.components.v1 as stc\n\n\ndef write():\n st.set_option('deprecation.showfileUploaderEncoding', False)\n st.title(\"Flurosis Tooth Detection By A.Adithya Sherwood IX-E\")\n st.subheader('Disclaimer: Please check with your local specialized dentist, if you are in doubt please try atleast twice.')\n conf_score = st.slider('Please Choose A Confidence Value',0.1,1.0,0.05)\n uploaded_file = st.file_uploader(\"Choose an image\", type=\"jpg\") \n\n \n if uploaded_file is not None:\n image = PIL.Image.open(uploaded_file)\n image = image.resize((416,416))\n image.save(f'./Test_Flurosis.jpg')\n image_flurosis = open(f'./Test_Flurosis.jpg','rb')\n st.image(image, caption='Uploaded Image.', use_column_width=True)\n st.write(\"\")\n os.system(f\"python3 detect.py --weights './weights/best (2).pt' --img 416 --conf {str(conf_score)} --source ./Test_Flurosis.jpg --output ./test.jpg\")\n image_pred = PIL.Image.open(f'./test.jpg')\n st.image(image_pred, caption='Predictions.', use_column_width=True)\n","sub_path":"flurosis_tooth_detection.py","file_name":"flurosis_tooth_detection.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"593161774","text":"import logging\nimport pkgutil\nimport re\nimport typing\nfrom abc import ABCMeta, abstractmethod\nfrom functools import lru_cache\n\nfrom googleapiclient import discovery\nfrom googleapiclient import errors\n\nfrom util.config_utils import is_copying_labels_from_project, iris_prefix\nfrom util.utils import cls_by_name, shorten, methods\n\nPLUGINS_MODULE = \"plugins\"\n\n\nclass Plugin(object, metaclass=ABCMeta):\n __project_access_client = discovery.build(\"cloudresourcemanager\", \"v1\")\n __proj_regex = re.compile(r\"[a-z]([-a-z0-9]*[a-z0-9])?\")\n subclasses = []\n\n def __init__(self):\n self.counter = 0\n self._google_client = discovery.build(*self.discovery_api())\n self._batch = self._google_client.new_batch_http_request(\n callback=self.__batch_callback\n )\n\n @classmethod\n @abstractmethod\n def discovery_api(cls) -> typing.Tuple[str, str]:\n pass\n\n @classmethod\n def is_labeled_on_creation(cls) -> bool:\n \"\"\"\n Only a few classes are labeled on creation, and these classes should override this method.\n \"\"\"\n return True\n\n @lru_cache(maxsize=256)\n def _project_labels(self, project_id) -> typing.Dict:\n\n assert self.__proj_regex.match(project_id), project_id\n\n request = self.__project_access_client.projects().get(projectId=project_id)\n try:\n response = request.execute()\n return response.get(\"labels\", {}) # Handle case where project has no labels\n except errors.HttpError as e:\n logging.exception(f\"Failing to get labels for project {project_id}: {e}\")\n return {}\n\n def __iris_labels(self, gcp_object) -> typing.Dict[str, str]:\n pfx = \"_gcp_\"\n\n def legalize_value(s):\n \"\"\"\n Only hyphens (-), underscores (_), lowercase characters,\n and numbers are allowed in label values. International characters are allowed.\n \"\"\"\n label_chars = re.compile(r\"[\\w\\d_-]\") # cached\n return \"\".join(c if label_chars.match(c) else \"_\" for c in s).lower()[:62]\n\n def value(func, gcp_obj):\n return legalize_value(func(gcp_obj))\n\n def key(func) -> str:\n return iris_prefix() + \"_\" + func.__name__[len(pfx) :]\n\n ret = {key(f): value(f, gcp_object) for f in methods(self, pfx)}\n\n return ret\n\n def __batch_callback(self, request_id, response, exception):\n\n if exception is not None:\n logging.error(\n \"in __batch_callback(), %s\",\n exception,\n )\n\n def do_batch(self):\n \"\"\"In do_label, we loop over all objects. But for efficienccy, we do not process\n then all at once, but rather gather objects and process them in batches of\n 1000 as we loop; then parse the remaining at the end of the loop\"\"\"\n try:\n self._batch.execute()\n except Exception as e:\n logging.exception(e)\n self.counter = 0\n\n @abstractmethod\n def do_label(self, project_id):\n \"\"\"Label all objects of a type in a given project\"\"\"\n pass\n\n @abstractmethod\n def get_gcp_object(self, log_data):\n \"\"\"Parse logging data to get a GCP object\"\"\"\n pass\n\n @abstractmethod\n def label_one(self, gcp_object: typing.Dict, project_id: str):\n \"\"\"Tag a single new object based on its description that comes from alog-line\"\"\"\n pass\n\n @abstractmethod\n def api_name(self):\n pass\n\n @abstractmethod\n def method_names(self):\n pass\n\n @classmethod\n def init(cls):\n def load_plugin_class(name):\n module_name = PLUGINS_MODULE + \".\" + name\n __import__(module_name)\n assert name == name.lower(), name\n plugin_cls = cls_by_name(PLUGINS_MODULE + \".\" + name + \".\" + name.title())\n return plugin_cls\n\n for _, module, _ in pkgutil.iter_modules([PLUGINS_MODULE]):\n plugin_class = load_plugin_class(module)\n Plugin.subclasses.append(plugin_class)\n\n assert Plugin.subclasses, \"No plugins defined\"\n\n @staticmethod\n def create_plugin(plugin_name: str) -> \"Plugin\":\n cls = cls_by_name(\n PLUGINS_MODULE + \".\" + plugin_name.lower() + \".\" + plugin_name\n )\n plugin = cls()\n return plugin\n\n def _build_labels(self, gcp_object, project_id):\n \"\"\"\n :return dict including original labels, project labels (if the system is configured to add those)\n and new labels. But if that would result in no change, return None\n \"\"\"\n\n original_labels = gcp_object[\"labels\"] if \"labels\" in gcp_object else {}\n project_labels = (\n self._project_labels(project_id) if is_copying_labels_from_project() else {}\n )\n iris_labels = self.__iris_labels(gcp_object)\n all_labels = {**iris_labels, **project_labels, **original_labels}\n if all_labels == original_labels:\n # Skip labeling because no change\n return None\n else:\n labels = {\"labels\": all_labels}\n fingerprint = gcp_object.get(\"labelFingerprint\", \"\")\n if fingerprint:\n labels[\"labelFingerprint\"] = fingerprint\n\n return labels\n\n def _name_after_slash(self, gcp_object):\n return self.__name(gcp_object, separator=\"/\")\n\n def _name_no_separator(self, gcp_object):\n return self.__name(gcp_object, separator=\"\")\n\n def __name(self, gcp_object, separator=\"\"):\n try:\n name = gcp_object[\"name\"]\n if separator:\n index = name.rfind(separator)\n name = name[index + 1 :]\n return name\n except KeyError as e:\n logging.exception(e)\n return None\n","sub_path":"plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"130704425","text":"import gui\nroot = gui.Tk()\n#import date2\n#root = date2.Tk()\nroot.title('MUKYALA MUKASA KAWEMPE DOMICILIARY CLINIC')\nroot['bg']='black'\nfrmmenu=gui.FormMenu(root)\n#frmmenu._init_menu()\nfrmmenu._init_widgets()\n\n#root.geometry(\"1040x650+0+0\")\nroot.geometry(\"1040x650+200+100\")\nroot.resizable(False,False)\n\nroot.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"502030722","text":"import numpy\nimport mainwindow\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import *\n\n\nclass MainWindow(QMainWindow, mainwindow.Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n\n self.size = 0\n self.setupBtn.clicked.connect(self.reset_view)\n\n self.lambda_matrix = []\n self.calcBtn.clicked.connect(self.calculate)\n\n def enable_cell(self, i, j):\n item = QTableWidgetItem(\"-\")\n item.setFlags(Qt.ItemIsEnabled)\n brush = QBrush(QColor(168, 168, 168))\n brush.setStyle(Qt.SolidPattern)\n item.setBackground(brush)\n\n self.intensTable.setItem(i, j, item)\n\n def reset_view(self):\n self.intensTable.clear()\n self.intensTable.setRowCount(self.spinBox.value())\n self.intensTable.setColumnCount(self.spinBox.value())\n self.probTable.setRowCount(self.spinBox.value())\n self.size = self.spinBox.value()\n\n self.enable_cell(0, 0)\n self.enable_cell(0, self.size - 1)\n self.enable_cell(self.size - 1, 0)\n self.enable_cell(self.size - 1, self.size - 1)\n\n for i in range (1, self.size - 1):\n for j in range (1, self.size - 1):\n self.enable_cell(i, j)\n\n def calculate(self):\n matrix = []\n for i in range(self.size):\n row = []\n for j in range(self.size):\n val = self.intensTable.item(i, j).text()\n if val == \"-\":\n row.append(0)\n else:\n row.append(float(val))\n matrix.append(row)\n\n left = []\n for i in range(self.size):\n p = []\n sum = 0\n for j in range(self.size):\n p.append(matrix[j][i])\n sum += matrix[i][j]\n p[i] = -sum\n left.append(p)\n norm = [1.0 for i in range(self.size)]\n left[self.size - 1] = norm\n right = [0 for i in range(self.size)]\n right[self.size - 1] = 1\n numpy_matrix = numpy.array(left)\n numpy_vector = numpy.array(right)\n\n array = numpy.linalg.solve(numpy_matrix, numpy_vector)\n\n for i in range(self.size):\n item = QTableWidgetItem(str(round(array[i]*100, 3))+\"%\")\n self.probTable.setItem(i, 0, item)\n\n\nif __name__ == \"__main__\":\n import sys\n app = QApplication(sys.argv)\n mw = MainWindow()\n mw.show()\n sys.exit(app.exec_())\n","sub_path":"Kolmogorov equations/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"634231800","text":"# https://app.codesignal.com/arcade/intro/level-2/xskq4ZxLyqQMCLshr/solutions\n\ndef matrix_elements_sum(matrix):\n\n result = 0\n banned_idx = []\n\n for row in matrix:\n for el_idx in range(len(row)):\n if row[el_idx] <= 0:\n banned_idx.append(el_idx)\n if el_idx in banned_idx:\n continue\n else:\n result += row[el_idx]\n\n return result\n","sub_path":"intro/08_matrix_elements_sum.py","file_name":"08_matrix_elements_sum.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"541010286","text":"import pygame as pg\nfrom LabyrinthUtility import *\nfrom LabyrinthGenerator import *\nimport random as rand\n\npg.init()\nendGame = False\nclock = pg.time.Clock()\n(xsize, ysize) = (100,50)\nlabyrinth = Labyrinth(xsize,ysize)\nscale = 15 #pixels per cell in labyrinth\nscreen = pg.display.set_mode((xsize*scale,ysize*scale))\nwallwidth = 3\n\ndepthFirstNoRec(labyrinth,rand.randint(0,xsize-1),rand.randint(0,ysize-1))\n\nplayer = Player(labyrinth,0,0)\nlabyrinth.characters.append(player)\n\ndef drawLabyrinth(labyrinth):\n\tscreen.fill((255,255,255))\n\tfor x in range(labyrinth.xsize):\n\t\tfor y in range(labyrinth.ysize):\n\t\t\tif labyrinth.array[x,y].pathLeft == False:\n\t\t\t\tpg.draw.line(screen, (0,0,0),(x*scale,y*scale),(x*scale,(y+1)*scale-1),wallwidth)\n\t\t\tif labyrinth.array[x,y].pathRight == False:\n\t\t\t\tpg.draw.line(screen, (0,0,0),((x+1)*scale-1,y*scale),((x+1)*scale-1,(y+1)*scale-1),wallwidth)\n\t\t\tif labyrinth.array[x,y].pathUp == False:\n\t\t\t\tpg.draw.line(screen, (0,0,0),(x*scale,y*scale),((x+1)*scale-1,y*scale),wallwidth)\n\t\t\tif labyrinth.array[x,y].pathDown == False:\n\t\t\t\tpg.draw.line(screen, (0,0,0),(x*scale,(y+1)*scale-1),((x+1)*scale-1,(y+1)*scale-1),wallwidth)\n\tfor c in labyrinth.characters:\n\t\tpg.draw.circle(screen,(255,0,0),(int(c.x*scale+scale/2),int(c.y*scale+scale/2)),int(scale/2-wallwidth))\n\nmoveCountDown = 0\nwhile not endGame:\n\tfor event in pg.event.get():\n\t\tif event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):\n\t\t\tendGame = True\n\tpressed = pg.key.get_pressed()\n\tif (pressed[pg.K_UP] or pressed[pg.K_DOWN] or pressed[pg.K_LEFT] or pressed[pg.K_RIGHT]) and moveCountDown == 0:\n\t\tmoveCountDown = 7\n\t\tif pressed[pg.K_UP]: player.moveUp()\n\t\tif pressed[pg.K_DOWN]: player.moveDown()\n\t\tif pressed[pg.K_LEFT]: player.moveLeft()\n\t\tif pressed[pg.K_RIGHT]: player.moveRight()\n\tif moveCountDown > 0:\n\t\tmoveCountDown -= 1\n\tdrawLabyrinth(labyrinth)\n\tpg.display.flip()\n\tclock.tick(60)\n","sub_path":"LabyrinthGame.py","file_name":"LabyrinthGame.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"193315894","text":"#---Control servos in robotic arm with xbox controller---#\nimport pygame, RPi.GPIO as gpio, time\n\n#control update frequency\nCLOCK = pygame.time.Clock()\nclock_speed = 20\n\n#vars for if program running\n#and if joystick connected\njoystick_connect = True\nrunning = True\n\n#used to end program\ndef stop():\n for i in range(0, len(servos)):\n servos[i].stop()\n pygame.quit()\n print('clean exit')\n\n#convert a value to degrees, 0-180\ndef convert(x):\n return ((1.0/18.0)*x+3)\n\ndef reset():\n for i in range(0, len(pins)):\n servos[i].ChangeDutyCycle(convert(start_pos[i]))\n \n#initailize the pygame library\npygame.init()\n\n#use first joystick connected since only\n#one xbox remote is used\njoystick = pygame.joystick.Joystick(0)\n#initailize joystick\njoystick.init()\n\n#initailize GPIO\ngpio.setmode(gpio.BCM)\ngpio.setwarnings(False)\n\n#array to hold pin numbers\npins = [21,16,12,23,25,18]\n#array of servos\nservos = []\n\n#increment variable\nstart_pos = [-15, 120, 15, 120, 35, 150]\nservo_pos = [-15, 120, 15, 120, 35, 150]\n\n#loop through all pins\n#setup pins for output mode\n#add pin to servo array\n#start servo\nfor i in range(0, len(pins)):\n gpio.setup(pins[i], gpio.OUT)\n servos.append(gpio.PWM(pins[i],50))\n\nservos[0].start(convert(servo_pos[0]))\nservos[1].start(convert(servo_pos[1]))\nservos[2].start(convert(servo_pos[2]))\nservos[3].start(convert(servo_pos[3]))\nservos[4].start(convert(servo_pos[4]))\nservos[5].start(convert(servo_pos[5]))\n \n#limits for servos\nUPPER = 180\nLOWER = 3\n\n#BOOOLS for movement\nbase_mv = False\nbase_mv_opp = False\nshoulder_mv = False\nshoulder_mv_opp = False\nelbow_mv = False\nelbow_mv_opp = False\nswivel_mv = False\nswivel_mv_opp = False\nwrist_mv = False\nwrist_mv_opp = False\ngripper_mv = False\ngripper_mv_opp = False\n\n#increment amounts\nb_mv = 3\ns_mv = 5\ne_mv = 5\nsw_mv = 5\nw_mv = 5\ng_mv = 5\n'''\nservo:\n 0 - base\n 1 - shoulders\n 2 - elbow\n 3 - swivel\n 4 - wrist\n 5 - gripper\n'''\n#run until user decides to quit\nwhile(running):\n # User did something, this applies to keyboard or\n #joystick\n for event in pygame.event.get(): \n \n # Check if anything on joystick changed \n if event.type == pygame.JOYBUTTONDOWN or event.type == pygame.JOYAXISMOTION:\n\n #Control swivel base\n #Top left axis, left direction\n #does not allow user to go past limits\n if joystick.get_axis(0) > .9:\n base_mv = True\n elif joystick.get_axis(0) < -.9:\n base_mv_opp = True\n else:\n base_mv = False\n base_mv_opp = False\n\n #Shoulders - left stick, up/down\n if joystick.get_axis(1) < -.9:\n shoulder_mv = True\n elif joystick.get_axis(1) > .9:\n shoulder_mv_opp = True\n else:\n shoulder_mv = False\n shoulder_mv_opp = False\n\n #elbow - right stick, up/down\n if joystick.get_axis(4) < -.5:\n elbow_mv = True\n elif joystick.get_axis(4) > .5:\n elbow_mv_opp = True\n else:\n elbow_mv = False\n elbow_mv_opp = False\n\n #wrist joint - A and Y, A down Y up\n if joystick.get_button(0):\n swivel_mv = True\n elif joystick.get_button(3):\n swivel_mv_opp = True\n else:\n swivel_mv = False\n swivel_mv_opp = False\n \n\n #wrist swivel - RB and LB, RB left (CCW) LB right (CW)\n if joystick.get_button(5):\n wrist_mv = True\n elif joystick.get_button(4):\n wrist_mv_opp = True\n else:\n wrist_mv = False\n wrist_mv_opp = False\n\n #gripper - Left trigger open, right trigger close\n if joystick.get_axis(2) < -.5:\n gripper_mv = True\n elif joystick.get_axis(5) < -.5:\n gripper_mv_opp = True\n else:\n gripper_mv = False\n gripper_mv_opp = False\n \n #exit program if start button pressed\n if joystick.get_button(7):\n running = False\n \n #move base\n if base_mv and servo_pos[0] <= 100:\n servo_pos[0] += b_mv\n servos[0].ChangeDutyCycle(convert(servo_pos[0]))\n elif base_mv_opp and servo_pos[0] >= -15:\n servo_pos[0] -= b_mv\n servos[0].ChangeDutyCycle(convert(servo_pos[0]))\n\n #Shoulders - left stick, up/down\n if shoulder_mv and servo_pos[1] <= 130:\n servo_pos[1] += s_mv\n servos[1].ChangeDutyCycle(convert(servo_pos[1]))\n elif shoulder_mv_opp and servo_pos[1] >= 0:\n servo_pos[1] -= s_mv\n servos[1].ChangeDutyCycle(convert(servo_pos[1]))\n\n #elbow - right stick, up/down\n if elbow_mv and servo_pos[2] <= 100:\n servo_pos[2] += e_mv\n servos[2].ChangeDutyCycle(convert(servo_pos[2]))\n elif elbow_mv_opp and servo_pos[2] >= 0:\n servo_pos[2] -= e_mv\n servos[2].ChangeDutyCycle(convert(servo_pos[2]))\n\n #wrist joint - A and Y, A down Y up\n if swivel_mv and servo_pos[3] <= 170:\n servo_pos[3] += sw_mv\n servos[3].ChangeDutyCycle(convert(servo_pos[3]))\n elif swivel_mv_opp and servo_pos[3] >= 10:\n servo_pos[3] -= sw_mv\n servos[3].ChangeDutyCycle(convert(servo_pos[3]))\n\n #wrist swivel - RB and LB, RB left (CCW) LB right (CW)\n if wrist_mv and servo_pos[4] <= 180:\n servo_pos[4] += w_mv\n servos[4].ChangeDutyCycle(convert(servo_pos[4]))\n elif wrist_mv_opp and servo_pos[4] >= 0:\n servo_pos[4] -= w_mv\n servos[4].ChangeDutyCycle(convert(servo_pos[4]))\n\n #gripper - Left trigger open, right trigger close\n if gripper_mv and servo_pos[5] <= 170:\n servo_pos[5] += g_mv\n servos[5].ChangeDutyCycle(convert(servo_pos[5]))\n elif gripper_mv_opp and servo_pos[5] >= 0:\n servo_pos[5] -= g_mv\n servos[5].ChangeDutyCycle(convert(servo_pos[5]))\n\n \n #set clock speed to limit cpu usage\n CLOCK.tick(clock_speed)\n \n#end program\nstop()\n\n","sub_path":"robo_arm.py","file_name":"robo_arm.py","file_ext":"py","file_size_in_byte":6182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"442411170","text":"# Val Chapple\n# Cody Dhein\n# Date: Nov 22, 2017\n#\n# Resources:\n# Overall Concepts: An introductory tutorial on kd trees by Andrew W Moore\n# Carnegie Mellon University, Extract from Andrew Moore's PhD Thesis\n# Construction: https://www.cise.ufl.edu/class/cot5520fa09/CG_RangeKDtrees.pdf\n# Querying: https://web.engr.oregonstate.edu/~tgd/classes/534/slides/part3.pdf\n#\nimport sys\nfrom operator import itemgetter\n\n#import random\nimport heapq\nimport math\nimport timeit\nimport numpy as np\n\n# KDTreeNN\n#\n# Build KDTree from city data points\n# Use Tree to find solution\ndef kdTreeNN(filename, outfilename):\n # Read file with city Id, city x, and city y\n try:\n inFile = open(filename, \"r\")\n except:\n print(\"No file named: \" + filename)\n sys.exit()\n\n text = inFile.read().splitlines()\n\n # Save data as 2D list [ [ id, x, y ],... ]\n points = [ [int(i[0]), int(i[1]), int(i[2])] for i in [j.split() for j in text ]]\n\n # Create kd-tree structure with points, 0 start depth, and 2D(x and y)\n root = kDTree( points, 0, 2)\n\n # Set Max number of Nearest Neighbors to Keep\n numNN = len(points) * .002\n if (numNN < 10):\n numNN = 10\n\n (totalDist, route, distSqdMatrix) = kDTreeSearchNN(root, len(points), numNN)\n\n # if (len(points) <= 400 ):\n # (totalDist, route) = twoOptImprove(route , distSqdMatrix)\n\n # Save route\n outFile = open(outfilename, \"w\")\n outFile.write(str(totalDist) + \"\\n\")\n for i in route:\n outFile.write(str(i.city[0]) + \"\\n\")\n return\n\n# kDNode\n# Nodes of trees\n# value is the city\n# left and right point to other nodes\n# dim represents the splitting axis (aka index to use on the city data)\nclass kDNode:\n def __init__(self, city, left, right, dim):\n self.city = city\n self.visited = False\n self.left = left\n self.right = right\n self.dim = dim # 0 or 1\n self.nn = []\n\n def addNN( self, distSqd, node, maxNN ):\n if ( distSqd, node ) in self.nn:\n return\n if len(self.nn) < maxNN:\n self.nn.append( ( distSqd, node ) )\n else:\n self.nn.sort(key=itemgetter(0), reverse=True)\n if (self.nn[0][0] > distSqd):\n # Replace largest dist with dist\n self.nn[0] = ( distSqd, node )\n\n def getNNs( self ):\n return [ x for x in self.nn ]\n\n def __str__(self, level=1):\n ret = \"\"\n ret += \"\\t\"*(level-1)+\"-----\"+repr(self.city[0])+\"\\n\"\n if self.left != None:\n ret += self.left.__str__(level+1)\n if self.right != None:\n ret += self.right.__str__(level+1)\n return ret\n\n\n# kDTree\n# Creates kd-tree recursively with city data, depth into tree and dimensions (k)\n# Returns a kDNode and its subtree\ndef kDTree( points, depth, k ):\n # Check that points has a list\n if len(points) < 1:\n return None\n\n # sort by axis chosen to find median:\n # even for x= equation, and odd for y= equation\n points.sort(key=itemgetter(depth % k + 1))\n mid = len(points) / 2\n\n return kDNode(\n points[mid],\n kDTree(points[:mid], depth + 1, k),\n kDTree(points[mid+1:], depth + 1, k),\n depth % k + 1\n )\n\n# kDTreeSearchNN\n# Determines a tour distance and route\n# Uses greedy method of finding nearest unvisited city to target city\ndef kDTreeSearchNN( tree, numCities, maxNN ):\n start = tree\n target = tree\n tree.visited = True\n route = [ tree ]\n totalDist = 0\n\n distSqdMatrix = [[ -1 for i in range(0,numCities)] for j in range(0,numCities)]\n\n # Find nearest city for entire loop\n while len(route) < numCities:\n #print(str(len(route)) + \" \" + str(numCities))\n heap = []\n bestDistSqd = float('inf')\n bestNode = None\n\n # Add to priority queue\n heapq.heappush( heap, (0 , tree ) )\n # Get target's nearest neighbors\n bestSumDists = float('inf')\n while len(heap) != 0:\n (d, node) = heapq.heappop( heap )\n if (d >= bestDistSqd):\n continue # No node is closer, continue while loop\n if node == None:\n continue # Skip node\n\n # Get distance squared value for comparison\n dist = distSqdMatrix[ node.city[0] ][ target.city[0] ]\n\n if dist == -1:\n dist = dist_sqd( node.city, target.city )\n distSqdMatrix[ target.city[0] ][ node.city[0] ] = dist\n distSqdMatrix[ node.city[0] ][ target.city[0] ] = dist\n target.addNN( dist , node, maxNN)\n\n if node.visited == False:\n if (dist < bestDistSqd ):\n bestDistSqd = dist\n bestNode = node\n\n # Add child nodes to priority queue, adjusting priority left/right\n if (target.city[node.dim] <= node.city[node.dim]):\n heapq.heappush(heap, (0, node.left ))\n heapq.heappush(heap, (dist, node.right )) # sorting by dist?\n else:\n heapq.heappush(heap, (0, node.right ))\n heapq.heappush(heap, (dist, node.left ))\n\n # Add nearest neighbor to route, mark visited, update target\n if bestNode != None:\n bestNode.visited = True\n route.append(bestNode)\n target = bestNode\n totalDist += int(round(math.sqrt(bestDistSqd)))\n\n # Add distance from last target city to start city\n totalDist += int(round(math.sqrt(dist_sqd(target.city, start.city))))\n return (totalDist, route, distSqdMatrix)\n\ndef dist_sqd( city1, city2 ):\n x_dist = abs(city2[1] - city1[1])\n y_dist = abs(city2[2] - city1[2])\n return x_dist*x_dist + y_dist*y_dist\n\n# swaps edges\n# accepts the full route and the indices for two nodes to swap\ndef twoOptSwap(route,i,j):\n\tnew_route = route[:i]\n\ttmp = list(reversed(route[i:j+1]))\n\tnew_route.extend(tmp)\n\tnew_route.extend(route[j+1:])\n\treturn new_route\n\n# Performs a twoOpt improvement on the candidate solution\ndef twoOptImprove(route,distances):\n noSwap = route[0]\n currentBest = calcLength(route,distances)\n prevBest = currentBest + 1\n n = 0\n while currentBest < prevBest:\n n += 1\n #print(str(n))\n prevBest = currentBest\n for i in range(1,len(route)-2):\n for j in range(i+1,len(route)-1):\n #print 'Try swap ' + str(route[i]) + ', ' + str(route[j])\n candidate = twoOptSwap(route,i,j)\n candidate_dist = calcLength(candidate,distances)\n if candidate_dist < currentBest:\n route = candidate\n currentBest = candidate_dist\n #break\n # else:\n\t\t\t# \tcontinue\n # break\n currentBest = calcLength(route,distances)\n return (currentBest, route )\n\n# calculates total length of the given tour\n# accepts the tour and a distance Matrix\ndef calcLength(tour, dists):\n length = 0\n\n for i in range(len(tour)-1):\n j = i+1\n c1 = tour[i]\n c2 = tour[j]\n length += int(round(math.sqrt(dists[c1][c2])))\n length += int(round(math.sqrt(dists[ tour[0] ][ tour[len(tour)-1] ] )))\n return length\n\n\nif __name__ == '__main__':\n t1= timeit.default_timer()\n # Check input file name exists\n try:\n filename = sys.argv[1]\n except:\n print(\"Usage: \" + sys.argv[0] + \" \")\n sys.exit()\n #random.seed(1)\n outfilename = filename + \".tour\"\n kdTreeNN(filename, outfilename)\n\n t2 = timeit.default_timer()\n\n fileWrite = open(filename + \".tourTime\", \"w\")\n fileWrite.write(str(t2-t1) + \"\\n\")\n fileWrite.close()\n","sub_path":"NN_KDTree/KDTree-FAST-results/KDTree-FAST.py","file_name":"KDTree-FAST.py","file_ext":"py","file_size_in_byte":7694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"85178183","text":"class Node:\n def __init__(self, value=None, next=None):\n self.value = value\n self.next = next\n\n def get_value(self):\n return self.value\n\n def get_next(self):\n return self.next\n\n def set_next(self, new_next):\n self.next = new_next\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n # self.tail = None\n\n def add_to_head(self, value):\n node = Node(value)\n\n if self.head is not None:\n node.set_next(self.head)\n self.head = node\n\n def add_to_tail(self, value):\n node = Node(value)\n\n if self.tail is not None:\n self.tail.next = node\n self.tail = node\n\n def contains(self, value):\n if not self.head:\n return False\n\n current = self.head\n\n while current:\n if current.get_value() == value:\n return True\n\n current = current.get_next()\n\n return False\n\n # def reverse_list(self, node, prev): what the _ is prev????\n def reverse_list(self, node, prev):\n marker1 = None\n marker2 = self.head\n \n while marker2 is not None:\n marker3 = marker2.next\n marker2.next = marker1\n marker1 = marker2\n marker2 = marker3\n self.head = marker1\n\n def reverse_recursive_list(self, node, prev):\n if self.head is None:\n return\n cascade = False\n next = None\n \n if node.next is None: \n self.head = node\n cascade = True\n next = node.next\n node.next = prev\n\n if cascade == True:\n return\n self.reverse_recursive_list(next, node)\n \n\nmyLL = LinkedList()\nmyLL.add_to_head(1)\nmyLL.add_to_head(2)\nmyLL.add_to_head(3)\nmyLL.add_to_head(4)\nmyLL.add_to_head(5)\nmyLL.reverse_recursive_list(myLL.head, None)\n","sub_path":"reverse/reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"585728495","text":"#coding:utf-8\n\n\"\"\"\n斐波那契数列\n\"\"\"\n\nclass Fabs:\n def __init__(self,max):\n self.max = max\n self.a = 0\n self.b = 1\n\n def __iter__(self):\n return self\n\n def __index__(self):\n Fabs = self.a\n if Fabs > self.max:\n raise StopIteration\n self.a,self.b = self.b,self.a+self.b\n return Fabs\n\nf = Fabs(10000)\nlst = [f.__index__() for i in range(10)]\nprint(lst)\n\n","sub_path":"迭代器方式计算斐波那契数列.py","file_name":"迭代器方式计算斐波那契数列.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"509265685","text":"import cgitb\nimport configparser\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom pymongo import MongoClient\nimport mwfeeds.controlers.ParseFeed\nimport mwfeeds.controlers.GetRegionFixFeed\nimport mwfeeds.controlers.GetJavascriptPage\nimport urllib\nimport urllib.request\nimport re\nimport json\n\ncgitb.enable()\nconfig = configparser.ConfigParser()\nconfig.read('/opt/python/current/app/mwfeeds/mwfeeds.cfg')\n\ndef index(req):\n c = {}\n id = req.GET.get('id', '')\n if id == '':\n c[\"message\"] = \"Invalid Feed Id\"\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n else:\n client = MongoClient(config.get(\"active\", \"DBUrl\"))\n db = client.mwfeeds\n feedsCollection = db.feeds\n myFeed = feedsCollection.find_one({\"_id\":int(id.strip())})\n if myFeed[\"feedType\"] == \"HTML\":\n return getHTMLFeed(req, myFeed)\n elif myFeed[\"feedType\"] == \"SetEncoding\":\n return getSetEncodingFeed(req, myFeed)\n elif myFeed[\"feedType\"] == \"Sharepoint\":\n return getFeedSharepoint(req, myFeed)\n elif myFeed[\"feedType\"] == \"Combine\":\n return getCombineFeed(req, myFeed)\n elif myFeed[\"feedType\"] == \"RegionFix\" or myFeed[\"feedType\"] == \"Region Fix\":\n return mwfeeds.controlers.GetRegionFixFeed.processFeed(req,myFeed)\n #c[\"message\"] = \"Region Fix Feed\"\n #return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n else:\n c[\"message\"] = \"Feed type \" + myFeed[\"feedType\"] + \" is not supported.\"\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n\ndef getHTMLFeed(req,myFeed):\n c = {}\n try:\n HTMLSource = \"\"\n if myFeed[\"javascriptEnabled\"] == True:\n HTMLSource = mwfeeds.controlers.GetJavascriptPage.getJavascriptPage(myFeed[\"url\"])\n else:\n fp = urllib.request.urlopen(myFeed[\"url\"])\n mybytes = fp.read()\n HTMLSource = mybytes.decode(\"utf8\")\n myFeed[\"HTMLSourceText\"] = HTMLSource\n try:\n c[\"CustomTitle\"] = myFeed[\"CustomTitle\"]\n except:\n c[\"CustomTitle\"] = \"\"\n try:\n c[\"CustomDescription\"] = myFeed[\"CustomDescription\"]\n except:\n c[\"CustomDescription\"] = \"\"\n c[\"ItemList\"] = mwfeeds.controlers.ParseFeed.parseFeed(myFeed)\n return render(req, \"RSSTemplate.pyv\", c, content_type=\"text/xml\")\n except Exception as ex:\n c[\"message\"] = str(ex)\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n\ndef getSetEncodingFeed(req,myFeed):\n try:\n myHeaders = {'User-Agent': \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:47.0) Gecko/20100101 Firefox/47.0\"}\n request = urllib.request.Request(myFeed[\"url\"], headers=myHeaders)\n source = \"\"\n with urllib.request.urlopen(request) as response:\n encoding = response.headers.get_content_charset()\n if encoding is not None:\n source = response.read().decode(encoding)\n else:\n source = response.read().decode(\"ISO-8859-1\")\n rssText = re.sub(\"(\\<\\?xml(?:.+?)\\?\\>)\",\"\",source)\n returnValue = \"\"\n try:\n returnValue += rssText.encode('utf-8')\n except:\n returnValue += rssText\n c={}\n c[\"message\"] = returnValue\n return HttpResponse(rssText, content_type=\"text/xml\")\n except Exception as ex:\n c={}\n c[\"message\"] = str(ex)\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"application/rss+xml\")\n\ndef getFeedSharepoint(req,myFeed):\n c = {}\n try:\n c[\"BaseUrl\"] = config.get(\"active\", \"baseUrl\")\n c[\"CustomTitle\"] = myFeed[\"CustomTitle\"]\n c[\"CustomDescription\"] = myFeed[\"CustomDescription\"]\n c[\"id\"] = myFeed[\"_id\"]\n itemList = []\n fp = urllib.request.urlopen(myFeed[\"url\"])\n mybytes = fp.read()\n source = mybytes.decode(\"utf8\")\n allItems = re.findall(\"(.+?)\", source, re.DOTALL | re.IGNORECASE)\n for thisItem in allItems:\n thisItem = thisItem.replace(\"[<\", \"[\").replace('&','&').replace('&','&').replace('amp;','&')\n itemToAdd = {}\n itemToAdd[\"Title\"] = re.findall(\"\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"Title\"]) == 0):\n itemToAdd[\"Title\"] = re.findall(\"(.+?)\", thisItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"Title\"] = flatten(itemToAdd[\"Title\"])\n\n itemToAdd[\"ItemURL\"] = re.findall(\"(.+?)\", thisItem, re.DOTALL | re.IGNORECASE)\n try:\n itemToAdd[\"ItemURL\"] = flatten(itemToAdd[\"ItemURL\"])\n except:\n itemToAdd[\"ItemURL\"] = flatten(itemToAdd[\"ItemURL\"])\n \n\n itemToAdd[\"date\"] = re.findall(\"(.+?)\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"date\"]) == 0):\n itemToAdd[\"date\"] = re.findall(\"<(?:.*?)date>(.+?)(?:.*?)date>\", thisItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"date\"] = flatten(itemToAdd[\"date\"])\n itemToAdd[\"Description\"] = re.findall(\"\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"Description\"]) == 0):\n itemToAdd[\"Description\"] = re.findall(\"<(?:.*?)description>(.+?)(?:.*?)description>\", thisItem,\n re.DOTALL | re.IGNORECASE)\n itemToAdd[\"Description\"] = flatten(itemToAdd[\"Description\"])\n itemToAdd[\"SourceURL\"] = re.findall(\"\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"SourceURL\"]) == 0):\n itemToAdd[\"SourceURL\"] = re.findall(\"(.*?)\", thisItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"SourceURL\"] = flatten(itemToAdd[\"SourceURL\"])\n itemToAdd[\"SourceName\"] = re.findall(\"(.*?)\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"SourceName\"]) == 0):\n itemToAdd[\"SourceName\"] = re.findall(\"(.*?)\", thisItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"SourceName\"] = flatten(itemToAdd[\"SourceName\"])\n itemList.append(itemToAdd)\n c[\"ItemList\"] = itemList\n return render(req, \"SharepointRSSTemplate.pyv\", c, content_type=\"text/xml\")\n except Exception as ex:\n c[\"message\"] = str(ex)\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n\ndef getCombineFeed(req,myFeed):\n c = {}\n try:\n sourceFeeds = myFeed[\"sourceFeeds\"]\n c[\"BaseUrl\"] = config.get(\"active\", \"baseUrl\")\n c[\"id\"] = myFeed[\"_id\"]\n itemList = []\n for o in sourceFeeds:\n fp = urllib.request.urlopen(o)\n mybytes = fp.read()\n CombineSource = mybytes.decode(\"utf8\")\n strainedItems = re.findall(\"(.+?)\", CombineSource,re.DOTALL|re.IGNORECASE)\n for thisItem in strainedItems:\n thisStrainedItem = str(thisItem)\n itemToAdd = {}\n itemToAdd[\"TitleA\"] = re.findall(\"\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"TitleA\"]) == 0):\n itemToAdd[\"TitleA\"] = re.findall(\"(.+?)\", thisItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"ItemURL\"] = \"\"\n itemToAdd[\"ItemURLA\"] = re.findall(\"\",thisStrainedItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"ItemURLA\"]) == 0):\n itemToAdd[\"ItemURLA\"] = re.findall(\"(.+?)\", thisStrainedItem,re.DOTALL | re.IGNORECASE)\n itemToAdd[\"SourceName\"] = \"\"\n itemToAdd[\"SourceNameA\"] = re.findall(\"(.+?)\",thisStrainedItem,re.DOTALL|re.IGNORECASE)\n itemToAdd[\"SourceURL\"] = \"\"\n itemToAdd[\"SourceURLA\"] = re.findall(\"\", thisItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"SourceURLA\"]) == 0):\n itemToAdd[\"SourceURLA\"] = re.findall(\"(.*?)\", thisItem,re.DOTALL | re.IGNORECASE)\n itemToAdd[\"Date\"] = \"\"\n itemToAdd[\"DateA\"] = re.findall(\"(.+?)\", thisStrainedItem, re.DOTALL | re.IGNORECASE)\n itemToAdd[\"DateA\"] = flatten(itemToAdd[\"Date\"])\n if (len(itemToAdd[\"DateA\"]) == 0):\n itemToAdd[\"Date\"] = re.findall(\"<(?:.*?)Date>(.+?)(?:.*?)Date>\", thisItem,re.DOTALL | re.IGNORECASE)\n itemToAdd[\"DateA\"] = flatten(itemToAdd[\"Date\"])\n\n\n itemToAdd[\"Description\"] = \"\"\n itemToAdd[\"DescriptionA\"] = re.findall(\"\", thisStrainedItem, re.DOTALL | re.IGNORECASE)\n if (len(itemToAdd[\"DescriptionA\"]) == 0):\n itemToAdd[\"DescriptionA\"] = re.findall(\"(.+?)\", thisStrainedItem, re.DOTALL | re.IGNORECASE)\n if isinstance(itemToAdd[\"TitleA\"], list):\n if len(itemToAdd[\"TitleA\"])>0:\n itemToAdd[\"Title\"] = itemToAdd[\"TitleA\"][0]\n else:\n itemToAdd[\"Title\"] = \"\"\n else:\n itemToAdd[\"Title\"] = itemToAdd[\"TitleA\"]\n if isinstance(itemToAdd[\"ItemURLA\"], list):\n if len(itemToAdd[\"ItemURLA\"])>0:\n itemToAdd[\"ItemURL\"] = itemToAdd[\"ItemURLA\"][0]\n else:\n itemToAdd[\"ItemURL\"] = \"\"\n else:\n itemToAdd[\"ItemURL\"] = itemToAdd[\"ItemURLA\"]\n if isinstance(itemToAdd[\"SourceNameA\"], list):\n if len(itemToAdd[\"SourceNameA\"])>0:\n itemToAdd[\"SourceName\"] = itemToAdd[\"SourceNameA\"][0]\n else:\n itemToAdd[\"SourceName\"] = \"\"\n else:\n itemToAdd[\"SourceName\"] = itemToAdd[\"SourceNameA\"]\n if isinstance(itemToAdd[\"SourceURLA\"], list):\n if len(itemToAdd[\"SourceURLA\"])>0:\n itemToAdd[\"SourceURL\"] = itemToAdd[\"SourceURLA\"][0]\n else:\n itemToAdd[\"SourceURL\"] = \"\"\n else:\n itemToAdd[\"SourceURL\"] = itemToAdd[\"SourceURLA\"]\n \n if isinstance(itemToAdd[\"DateA\"], list):\n if (len(itemToAdd[\"DateA\"])>0):\n itemToAdd[\"Date\"] = itemToAdd[\"DateA\"][0]\n else:\n itemToAdd[\"Date\"] = \"\"\n else:\n itemToAdd[\"Date\"] = itemToAdd[\"DateA\"]\n\n if isinstance(itemToAdd[\"DescriptionA\"], list):\n if len(itemToAdd[\"DescriptionA\"])>0:\n itemToAdd[\"Description\"] = itemToAdd[\"DescriptionA\"][0]\n else:\n itemToAdd[\"Description\"] = \"\"\n else:\n itemToAdd[\"Description\"] = itemToAdd[\"DescriptionA\"]\n itemList.append(itemToAdd)\n c[\"ItemList\"] = itemList\n return render(req, \"CombineRSSTemplate.pyv\", c, content_type=\"text/xml\")\n except Exception as ex:\n c[\"message\"] = str(ex)\n return render(req, \"GetFeedMessage.pyv\", c, content_type=\"text/plain\")\n\ndef flatten(arrayIn):\n if len(arrayIn)>0 :\n return arrayIn[0]\n else:\n return \"\"\n\n\n\n\n\n\n","sub_path":"mwfeeds/controlers/GetFeed.py","file_name":"GetFeed.py","file_ext":"py","file_size_in_byte":11946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"60607136","text":"#Store Hangman game into a function\nimport random\n\ndef hangman():\n\tword = [\"test\", \"idea\", \"something\", \"else\", \"random\", \"things\"]\n\tword = random.choice(word)\n\n\twrong = 0\n\tstages = [\"\",\n\t\t\t\"--------- \",\n\t\t\t\"| | \",\n\t\t\t\"| | \",\n\t\t\t\"| 0 \",\n\t\t\t\"| /|\\\\ \",\n\t\t\t\"| / \\\\ \",\n\t\t\t\"| \"\n\t\t\t]\n\tremaining_letters = list(word)\n\tboard = [\"_\"] * len(word)\n\twin = False\n\tprint(\"Welcome to Hangman\")\n\t\n\twhile wrong < len(stages) - 1:\n\t\tprint(\"\\n\")\n\t\tmsg = \"Guess a letter: \"\n\t\tchar = input(msg)\n\t\tif char in remaining_letters:\n\t\t\tcind = remaining_letters.index(char)\n\t\t\tboard[cind] = char\n\t\t\tremaining_letters[cind] = '$'\n\t\telse:\n\t\t\twrong += 1\n\t\tprint((\" \".join(board)))\n\t\te = wrong + 1\n\t\tprint(\"\\n\".join(stages[0: 3]))\n\t\tif \"_\" not in board:\n\t\t\tprint(\"You win!\")\n\t\t\tprint(\" \".join(board))\n\t\t\twin = True\n\t\t\tbreak\n\tif not win:\n\t\tprint(\"\\n\".join(stages[0: wrong]))\n\t\tprint(\"You lose! It was {}.\".format(word))\n\nhangman()\n\n","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"404910387","text":"import os\nimport boto3\nimport mimetypes\nfrom botocore.config import Config\n\n\ndef run():\n bucket = os.environ['INPUT_BUCKET']\n bucket_region = os.environ['INPUT_BUCKET-REGION']\n dist_folder = os.environ['INPUT_DIST-FOLDER']\n\n configuration = Config(region_name=bucket_region)\n\n # s3_client = boto3.client('s3', config=configuration)\n\n # for root, subdirs, files in os.walk(dist_folder):\n # for file in files:\n # s3_client.upload_file(\n # os.path.join(root, file),\n # bucket,\n # os.path.join(root, file).replace(dist_folder + '/', ''),\n # ExtraArgs={\"ContentType\": mimetypes.guess_type(file)[0]}\n # )\n\n website_url = f'http://{bucket}.s3-website-{bucket_region}.amazonaws.com'\n # The below code sets the 'website-url' output (the old ::set-output syntax isn't supported anymore - that's the only thing that changed though)\n with open(os.environ['GITHUB_OUTPUT'], 'a') as gh_output:\n print(f'web-url={website_url}', file=gh_output)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":".github/actions/deploy-s3-docker/deployment.py","file_name":"deployment.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"583815220","text":"from django.core.paginator import Paginator\n\n\nclass PaginatorMixin():\n queryset = None\n per_page = None\n filterset = None\n request = None\n\n def __init__(self, queryset, filterset, request):\n \"\"\"Initialization\"\"\"\n self.queryset = queryset\n self.filterset = filterset\n self.request = request\n\n def set_per_page(self, per_page):\n \"\"\"Set the number of rows per page\"\"\"\n self.per_page = per_page\n\n def get_queryset(self):\n \"\"\"Get the filtered queryset\"\"\"\n return self.filterset(self.request.GET, queryset=self.queryset)\n\n def get_page(self):\n \"\"\"Get the page number from request and remove the page\n query string\"\"\"\n page = self.request.GET.get('page')\n request_without_page = self.request.GET.copy()\n if page:\n request_without_page.pop('page')\n self.request.GET = request_without_page \n return page\n\n def get_paginator(self):\n \"\"\"Get the filtered list and paginator resources\"\"\"\n object_list = self.get_queryset()\n paginator = Paginator(object_list.qs, self.per_page)\n page = self.get_page()\n\n object_list._qs = paginator.get_page(page)\n\n page_range = range(1, paginator.num_pages +1)\n page_list = list(page_range)\n\n return {'object_list': object_list, 'page_list': page_list, 'request': self.request}","sub_path":"intranet/core/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"234027627","text":"#!/usr/bin/env python\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# QEcalc by DANSE Inelastic group\n# Nikolay Markovskiy\n# California Institute of Technology\n# (C) 2009 All Rights Reserved\n#\n# File coded by: Nikolay Markovskiy\n#\n# See AUTHORS.txt for a list of people who contributed.\n# See LICENSE.txt for license information.\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nimport os.path\nimport StringIO\n\nclass SubSetting:\n def __init__(self): pass\n\nclass Setting:\n def __init__(self, filename = None, configString = None):\n self._paths = SubSetting()\n try:\n if filename == None:\n if configString == None:\n raise NameError(\"Config should be initialized either with a \\\n filename or configString\")\n else:\n self.filename = None #StringIO.StringIO(configString)\n self.configString = configString\n else:\n self.filename = filename\n self.configString = open(filename,'r').read()\n except NameError:\n raise \n\n \n def section(self, sectionName, configDic = {}):\n \"\"\"\n will parse self.configFileName values not found in the file will be\n initialized from configDic\n \"\"\"\n import ConfigParser\n config = ConfigParser.SafeConfigParser()\n config.optionxform = str\n \n file = StringIO.StringIO(self.configString)\n config.readfp(file)\n \n if not config.has_section(sectionName):\n config.add_section(sectionName)\n\n for varName in configDic.keys():\n if varName not in dir(self):\n setattr(self, varName, configDic[varName])\n\n for option in config.options(sectionName):\n varValue = config.get(sectionName, option)\n setattr(self, option, varValue)\n\n Vars = dir(self)\n for varName in Vars:\n if 'input' in varName or 'Input' in varName:\n if os.path.isfile(varName):\n file = open(varName, 'r')\n string = file.read()\n setattr(self, '_'+varName+'Str', string)\n\n def set(self, name, value):\n setattr(self, name, value)\n\n def get(self, name):\n if name in dir(self) and getattr(self,name) != None:\n return getattr(self,name)\n else:\n if name in dir(self._paths):\n return getattr(self._paths, name)\n else:\n return None\n\n def syncAllPathsInNamelist(self, param, namelist, varName, input, defaults = None):\n \"\"\"\n Syncs path attribute in namelist with setting variable varName\n if varName was not set in Setting it will be initialized from QE\n input. If it is not in QE input it will be initialized from QE default\n values\n \"\"\"\n var = getattr(self, varName, None)\n if var != None:\n input.namelist(namelist).add(param, var, quotes = True)\n setattr(self._paths, varName, var)\n else:\n if input.namelist(namelist).exists(param):\n inputVar = input.namelist(namelist).param(param, quotes = False)\n setattr(self, varName, inputVar)\n setattr(self._paths, varName, inputVar)\n else:\n setattr(self, varName, defaults[varName])\n setattr(self.setting._paths, varName, defaults[varName])\n\n def getAllPathsInNamelist(self, param, namelist, varName, input, defaults = None):\n \"\"\"\n Retrieves all the filenames relevant to given namelist. Variables\n from class Setting override ones from QE input files. If both are\n empty, default values are used\n \"\"\"\n var = getattr(self, varName, None)\n fileDict = {}\n if var != None:\n fileDict[param] = var\n else:\n if input.namelist(namelist).exists(param):\n fileDict[param] = input.namelist(namelist).param(param, \\\n quotes = False)\n else:\n fileDict[param] = defaults[varName]\n\n\n def syncPathInNamelist(self, param, namelist, varName, input, defaults = None):\n \"\"\"\n Syncs path attribute in namelist with setting variable varName\n \"\"\"\n var = getattr(self, varName, None)\n if var != None:\n input.namelist(namelist).add(param, var, quotes = True)\n setattr(self._paths, varName, var)\n else:\n if input.namelist(namelist).exists(param):\n inputVar = input.namelist(namelist).param(param, quotes = False)\n setattr(self._paths, varName, inputVar)\n else:\n setattr(self._paths, varName, defaults[varName])","sub_path":"espresso/tags/qecalc-0.2.2/qecalc/qetask/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"20701008","text":"from __future__ import print_function\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution3D, MaxPooling3D\nfrom keras.optimizers import SGD\n\nmodel = Sequential()\n# input: 100x100 images with 3 channels -> (3, 100, 100) tensors.\n# this applies 32 convolution filters of size 3x3 each.\nmodel.add(Convolution3D(32, 3, 3, 3, border_mode='valid', input_shape=(None, 20, 50, 50)))\nmodel.add(Activation('relu'))\nmodel.add(Convolution3D(32, 3, 3, 3))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling3D(pool_size=(2, 2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Convolution3D(64, 3, 3, 3, border_mode='valid'))\nmodel.add(Activation('relu'))\nmodel.add(Convolution3D(64, 3, 3, 3))\nmodel.add(Activation('relu'))\nmodel.add(MaxPooling3D(pool_size=(2, 2, 2)))\nmodel.add(Dropout(0.25))\n\nmodel.add(Flatten())\n# Note: Keras does automatic shape inference.\nmodel.add(Dense(256))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.5))\n\nmodel.add(Dense(10))\nmodel.add(Activation('softmax'))\n\nsgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)\nmodel.compile(loss='categorical_crossentropy', optimizer=sgd)\n\nX_train = np.load('../images.npy')\nY_train = np.load('../labels.npy')\n\nmodel.fit(X_train, Y_train, batch_size=2, nb_epoch=1)\n","sub_path":"DataScienceBowl/scripts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"324484281","text":"# vim:ts=4:et\n# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\nimport bpy\nfrom mathutils import Vector, Quaternion\n\nfrom .. import properties\nfrom ..mu import Mu\nfrom ..mu import MuObject, MuTransform, MuTagLayer\nfrom ..utils import strip_nnn\n\nfrom .animation import collect_animations, find_path_root, make_animations\nfrom .collider import make_collider\nfrom .cfgfile import generate_cfg\nfrom .volume import model_volume\n\ndef make_transform(obj):\n transform = MuTransform()\n transform.name = strip_nnn(obj.name)\n transform.localPosition = Vector(obj.location)\n if obj.rotation_mode != 'QUATERNION':\n transform.localRotation = obj.rotation_euler.to_quaternion()\n else:\n transform.localRotation = Quaternion(obj.rotation_quaternion)\n transform.localScale = Vector(obj.scale)\n return transform\n\ndef make_tag_and_layer(obj):\n tl = MuTagLayer()\n tl.tag = obj.muproperties.tag\n tl.layer = obj.muproperties.layer\n return tl\n\ntype_handlers = {} # filled in by the modules that handle the obj.data types\nexported_objects = set()\n\ndef is_collider(obj):\n muprops = obj.muproperties\n if muprops.collider and muprops.collider != 'MU_COL_NONE':\n return True\n return False\n\ndef find_single_collider(objects):\n colliders = []\n for o in objects:\n if is_collider(o):\n colliders.append(o)\n if len(colliders) == 1:\n mat = colliders[0].matrix_local\n if mat == mat.Identity(4):\n return colliders[0]\n return None\n\ndef make_obj_core(mu, obj, path, muobj):\n if path:\n path += \"/\"\n path += muobj.transform.name\n mu.object_paths[path] = muobj\n muobj.tag_and_layer = make_tag_and_layer(obj)\n if is_collider(obj):\n exported_objects.add(obj)\n muobj.collider = make_collider(mu, obj)\n return muobj\n elif type(obj.data) in type_handlers:\n mu.path = path #needs to be reset as a type handler might modify it\n muobj = type_handlers[type(obj.data)](obj, muobj, mu)\n if not muobj:\n # the handler decided the object should not be exported\n return None\n exported_objects.add(obj)\n col = find_single_collider(obj.children)\n if col:\n exported_objects.add(col)\n muobj.collider = make_collider(mu, col)\n for o in obj.children:\n if o in exported_objects:\n # the object has already been exported\n continue\n muprops = o.muproperties\n #check whether the object should be exported (eg, props should not be\n #exported as part of an IVA, and IVAs should not be exported as part\n #of a part (that sounds odd)\n if muprops.modelType in mu.special:\n if mu.special[muprops.modelType](mu, o):\n continue\n child = make_obj(mu, o, path)\n if child:\n muobj.children.append(child)\n return muobj\n\ndef make_obj(mu, obj, path):\n if obj in exported_objects:\n # the object has already been \"exported\"\n return None\n muobj = MuObject()\n muobj.transform = make_transform (obj)\n return make_obj_core(mu, obj, path, muobj)\n\ndef add_internal(mu, obj):\n if not mu.internal:\n mu.internal = obj\n return True\n\ndef add_prop(mu, obj):\n mu.props.append(obj)\n return True\n\nspecial_modelTypes = {\n 'NONE': {},\n 'PART': {'INTERNAL':add_internal},\n 'PROP': {},\n 'INTERNAL': {'PROP':add_prop},\n}\n\ndef export_object(obj, filepath):\n exported_objects.clear()\n animations = collect_animations(obj)\n anim_root = find_path_root(animations)\n mu = Mu()\n mu.name = strip_nnn(obj.name)\n mu.object_paths = {}\n mu.materials = {}\n mu.textures = {}\n mu.nodes = []\n mu.props = []\n mu.messages = []\n mu.internal = None\n mu.type = obj.muproperties.modelType\n mu.CoMOffset = None\n mu.CoPOffset = None\n mu.CoLOffset = None\n mu.inverse = obj.matrix_world.inverted()\n mu.special = special_modelTypes[mu.type]\n mu.obj = make_obj(mu, obj, \"\")\n mu.materials = list(mu.materials.values())\n mu.materials.sort(key=lambda x: x.index)\n mu.textures = list(mu.textures.values())\n mu.textures.sort(key=lambda x: x.index)\n if anim_root and anim_root in mu.object_paths:\n anim_root_obj = mu.object_paths[anim_root]\n anim_root_obj.animation = make_animations(mu, animations, anim_root)\n mu.write(filepath)\n mu.skin_volume, mu.ext_volume = model_volume(obj)\n generate_cfg(mu, filepath)\n return mu\n","sub_path":"All_In_One/addons/io_object_mu/export_mu/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"425339717","text":"# Free test configuration file for MessageLogger service:\n# Behavior implied by S. Naumann but unexpected on our part.\n\nimport FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"TEST\")\n\nimport FWCore.Framework.test.cmsExceptionsFatal_cff\nprocess.options = FWCore.Framework.test.cmsExceptionsFatal_cff.options\n\nprocess.load(\"FWCore.MessageService.test.Services_cff\")\n\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\nprocess.MessageLogger.cerr.threshold = 'INFO'\nprocess.MessageLogger.cerr.INFO = cms.untracked.PSet(\n default = cms.untracked.PSet( limit = cms.untracked.int32( 0)\n),\n expect_specific = cms.untracked.PSet( limit = cms.untracked.int32(-1)\n),\n)\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(2)\n)\n\nprocess.source = cms.Source(\"EmptySource\")\n\nprocess.sendSomeMessages = cms.EDAnalyzer( \"UnitTestClient_E\")\n\nprocess.p = cms.Path(process.sendSomeMessages)\n","sub_path":"FWCore/MessageService/test/t1_cfg.py","file_name":"t1_cfg.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"224066157","text":"import random\n\ndef rnd():\n\treturn (random.randint(10, 25))\n\nschool = { \"1a\": rnd(), \"1b\": rnd(), \"2a\": rnd(), \"2b\": rnd(), \"3a\": rnd(),\n\t\t\t\"3b\": rnd(), \"4a\": rnd(), \"4b\": rnd(), \"5a\": rnd(), \"5b\": rnd() }\n\nprint (school[\"1a\"], \" children in 1a class\")\n\ndel(school[\"1a\"])\n\nschool[\"2a\"] = rnd()\nschool[\"3a\"] = rnd()\nschool[\"4a\"] = rnd()\n\nschool[\"6a\"] = rnd()\nschool[\"6b\"] = rnd()\n\nprint (school)\n","sub_path":"lab21.py","file_name":"lab21.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"494717574","text":"import numpy as np\n\n\n# sigmoid function\ndef nonlin(x, deriv=False):\n if deriv:\n return x * (1 - x) # formula for derivative of output of sigmoid\n return 1 / (1 + np.exp(-x))\n\n\n##############################\n# 2 Layer Neural Network\n##############################\ndef two_layer_nn():\n # input dataset\n X = np.array([[0, 0, 1],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 1]\n ])\n\n # outputs (original)\n y = np.array([[0, 0, 1, 1]]).T # transpose to make it a row vector\n\n # seed random values to get decent same results every time\n np.random.seed(1)\n\n # initialize weights randomly\n syn0 = 2 * np.random.random((3, 1)) - 1\n\n for i in range(10):\n # forward propagation\n l0 = X\n l1 = nonlin(np.dot(X, syn0)) # make the prediction of output\n\n # Loss estimation\n l1_error = y - l1 # calculate error, your loss function\n\n # gradient descent\n l1_delta = l1_error * nonlin(l1, True) # multiplying element-wise\n\n # update weights\n syn0 += np.dot(l0.T, l1_delta)\n\n if i % 1 == 0:\n print(f\"iteration {i}: \\nOutput {l1}\")\n\n # print(\"Output After Training\")\n # print(l1)\n\n\n##############################\n# 3 Layer Neural Network\n##############################\ndef three_layer_nn():\n # inputs\n X = np.array([\n [0, 0, 1],\n [0, 1, 1],\n [1, 0, 1],\n [1, 1, 1]])\n\n # outputs\n # y = np.array([[0, 1, 1, 0]]).T\n y = np.array([[0], [1], [1], [0]])\n\n # set random seed and initialize weights\n np.random.seed(1)\n\n # weights of two different layers\n # randomly initialize with mean 0\n syn0 = 2 * np.random.random((3, 4)) - 1\n syn1 = 2 * np.random.random((4, 1)) - 1\n\n for i in range(60000):\n # feed forward\n l0 = X\n # l1 = nonlin(np.dot(X, syn0))\n l1 = nonlin(np.dot(l0, syn0))\n l2 = nonlin(np.dot(l1, syn1))\n\n # calculate error\n out_error = y - l2\n\n #############################################\n # MY METHOD\n #############################################\n # wrong back propagate\n # error_deriv = nonlin(out_error, deriv=True)\n # l0_error = l1 * error_deriv\n # l1_error = l2 * error_deriv\n\n # update weights\n # syn0 += np.dot(l0.T, l0_error)\n # syn1 += np.dot(l1.T, l1_error)\n\n #############################################\n # Reference Method\n #############################################\n l2_delta = out_error * nonlin(l2, True)\n\n # how much each value contributes to error\n # back propagation\n l1_error = l2_delta.dot(syn1.T)\n\n # now go for first layer eror\n l1_delta = l1_error * nonlin(l1, True)\n\n syn1 += l1.T.dot(l2_delta)\n syn0 += l0.T.dot(l1_delta)\n\n print(\"Output After Training\")\n print(l2)\n\n\ntwo_layer_nn()\n# three_layer_nn()\n","sub_path":"part1/x_neuralnetwork.py","file_name":"x_neuralnetwork.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"19901161","text":"import numpy\nimport os\nimport csv\nfrom pandas import read_csv\nfrom datetime import datetime, timedelta\nfrom pandas import DatetimeIndex, to_datetime\nimport time\nfrom subprocess import call\nimport re\nimport datetime\nimport windowCreator\nfrom random import shuffle\nimport Logger\n\n############################################\n\nCOMPRESSED_HISTORY_FILE = 'D:\\\\dukascopy_history\\\\EURUSD-2016_01_01-2017_08_14.csv.npz'\nRESULT_FILE = 'C:\\\\dukascopy_input\\\\EURUSD-2016_01_01-2017_08_14-evaluation'\nNUMBER_OF_VALIDATION_DAYS = 21 # average number of work days in month\n#NUMBER_OF_VALIDATION_DAYS = 1\n\n############################################\n\ndef loadCompressedHistory():\n loggerToken = Logger.phaseLogStart('Loading compressed history')\n\n history = numpy.load(COMPRESSED_HISTORY_FILE)['arr_0']\n\n enumeratedHistoryFeatures = ['timestamp', 'month', 'dayOfMonth', 'dayOfWeek', 'timeOfDay', 'ask', 'bid', 'askVolume', 'bidVolume']\n\n Logger.phaseLogEnd(loggerToken)\n\n return history, enumeratedHistoryFeatures\n\ndef trimHistoryToEvaluationDaysOnly(history):\n loggerToken = Logger.phaseLogStart('Trimming history')\n\n trainDays = history[-NUMBER_OF_VALIDATION_DAYS :]\n\n Logger.phaseLogEnd(loggerToken)\n\n return trainDays\n\ndef prepareEvaluationHistory():\n history, enumeratedHistoryFeatures = loadCompressedHistory()\n\n history = trimHistoryToEvaluationDaysOnly(history)\n\n return numpy.array(history.tolist()), enumeratedHistoryFeatures\n\n#def extractWindows(history):\n# loggerToken = Logger.phaseLogStart('Extracting windows')\n\n# allWindows = []\n\n# for historyDay in history:\n# index = 0\n\n# while True:\n# window = windowCreator.createWindow(historyDay, index)\n\n# if window is None:\n# break;\n\n# print(window[1])\n\n# index = index + 1\n\n# allWindows.append(window)\n\n# Logger.phaseLogEnd(loggerToken)\n\n# return numpy.array(allWindows)\n\ndef extractWindows(history, enumeratedHistoryFeatures):\n loggerToken = Logger.phaseLogStart('Extracting windows')\n\n features = ['ask', 'bid']\n\n allowedOverlap = 0 # no overlap\n\n windowWidth = 30 * 60 * 1000 # 30 minutes\n\n predictionRange = 5 * 1000 # 5 seconds\n\n allWindows = windowCreator.createWindows(history, enumeratedHistoryFeatures, allowedOverlap, features, windowWidth, predictionRange)\n\n Logger.phaseLogEnd(loggerToken)\n \n return allWindows\n\ndef storeResult(windows):\n loggerToken = Logger.phaseLogStart('Storing result on disk')\n\n numpy.save(RESULT_FILE, windows)\n\n Logger.phaseLogEnd(loggerToken)\n\n############################################\n\nloggerToken = Logger.phaseLogStart('Preparing evaluation input')\n\nhistory, enumeratedHistoryFeatures = prepareEvaluationHistory()\n\nevaluationWindows = extractWindows(history, enumeratedHistoryFeatures)\n\nstoreResult(evaluationWindows)\n\nLogger.phaseLogEnd(loggerToken)\n","sub_path":"dukascopy/EvaluationInputCreator.py","file_name":"EvaluationInputCreator.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"1743602","text":"import numpy as np\nimport torch\nfrom torchvision import transforms\nimport cv2\nfrom PIL import Image, ImageDraw\nfrom CFA import CFA\n\nclass Landmark():\n\n # コンストラクタ\n def __init__(self, input_file):\n input_img_name = input_file\n self.num_landmark = 24\n self.img_width = 128\n checkpoint_name = 'checkpoint_landmark_191116.pth.tar'\n\n face_detector = cv2.CascadeClassifier('lbpcascade_animeface.xml')\n landmark_detector = CFA(output_channel_num=self.num_landmark + 1, checkpoint_name=checkpoint_name).cpu()\n\n normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5, 0.5, 0.5])\n train_transform = [transforms.ToTensor(), normalize]\n train_transform = transforms.Compose(train_transform)\n\n img = cv2.imread(input_img_name)\n faces = face_detector.detectMultiScale(img)\n img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\n draw = ImageDraw.Draw(img)\n\n for x_, y_, w_, h_ in faces:\n\n # 顔のサイズを調整\n self.x = max(x_ - w_ / 8, 0)\n rx = min(x_ + w_ * 9 / 8, img.width)\n self.y = max(y_ - h_ / 4, 0)\n by = y_ + h_\n self.w = rx - self.x\n self.h = by - self.y\n\n\n # 画像変換\n img_tmp = img.crop((self.x, self.y, self.x+self.w, self.y+self.h))\n img_tmp = img_tmp.resize((self.img_width, self.img_width), Image.BICUBIC)\n img_tmp = train_transform(img_tmp)\n img_tmp = img_tmp.unsqueeze(0).cpu()\n\n # ヒートマップを推定\n self.heatmaps = landmark_detector(img_tmp)\n self.heatmaps = self.heatmaps[-1].cpu().detach().numpy()[0]\n\n def get_landmark(self, key):\n res = np.empty((0, 2))\n for i in range(self.num_landmark):\n heatmaps_tmp = cv2.resize(self.heatmaps[i], (self.img_width, self.img_width), interpolation=cv2.INTER_CUBIC)\n landmark = np.unravel_index(np.argmax(heatmaps_tmp), heatmaps_tmp.shape)\n landmark_y = landmark[0] * self.h / self.img_width\n landmark_x = landmark[1] * self.w / self.img_width\n\n if key == \"right_eye\" and (i == 10 or i == 11 or i == 12 or i == 13 or i == 14):\n res = np.append(res, [[self.x + landmark_x, self.y + landmark_y]], axis = 0)\n\n if key == \"left_eye\" and (i == 15 or i == 16 or i == 17 or i == 18 or i == 19):\n res = np.append(res, [[self.x + landmark_x, self.y + landmark_y]], axis = 0)\n \n\n #顔パーツ(右目、左目、口、顔、前髪、鼻)の座標を取得\n if i == 0:\n r_ear_rx,r_ear_ry = landmark_x,landmark_y\n elif i == 2:\n l_ear_lx,l_ear_ly = landmark_x,landmark_y\n elif i == 9:\n nose_x, nose_y = landmark_x, landmark_y\n elif i == 10:\n r_eye_rx,r_eye_ry = landmark_x,landmark_y\n elif i == 11:\n r_eye_ux,r_eye_uy = landmark_x,landmark_y\n elif i == 12:\n r_eye_lx,r_eye_ly = landmark_x,landmark_y\n elif i == 13:\n r_eye_dx,r_eye_dy = landmark_x,landmark_y\n elif i == 15:\n l_eye_rx,l_eye_ry = landmark_x,landmark_y\n elif i == 16:\n l_eye_ux,l_eye_uy = landmark_x,landmark_y\n elif i == 17:\n l_eye_lx,l_eye_ly = landmark_x,landmark_y\n elif i == 18:\n l_eye_dx,l_eye_dy = landmark_x,landmark_y\n elif i == 20:\n mouth_rx,mouth_ry = landmark_x,landmark_y\n elif i == 21:\n mouth_ux,mouth_uy = landmark_x,landmark_y\n elif i == 22:\n mouth_lx,mouth_ly = landmark_x,landmark_y\n elif i == 23:\n mouth_dx,mouth_dy = landmark_x,landmark_y\n\n res = res.astype('int64')\n\n # 認識する顔パーツの長方形座標\n if key == \"right_eye\":#右目を認識して矩形を自動で作成する\n rect = (int(self.x + r_eye_rx - 15) , int(self.y + r_eye_uy - 8),int(self.x + r_eye_lx + 8),int(self.y + r_eye_dy + 5))\n elif key == \"left_eye\":#左目を認識\n rect = (int(self.x + l_eye_rx - 8) , int(self.y + l_eye_uy - 8),int(self.x + l_eye_lx + 15),int(self.y + l_eye_dy + 5))\n elif key == \"mouth\":#口\n rect = (int(self.x + mouth_rx - 8), int(self.y + mouth_uy - 8),int(self.x + mouth_lx + 8),int(self.y + mouth_dy + 5))\n elif key == \"face\":#顔\n rect = (int(self.x),int(self.y),int(self.x + self.w),int(self.y + self.h))\n elif key == \"bangs\":#前髪\n rect = (int(self.x),int(self.y),int(self.x + self.w),int(self.y + self.h/2 + 20))\n elif key == \"nose\":#鼻\n rect =(int(self.x + nose_x - 10),int(self.y + nose_y + 10),int(self.x + nose_x + 10),int(self.y + nose_y - 10))\n \n\n return res, rect\n","sub_path":"landmark.py","file_name":"landmark.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"352486363","text":"from mdp.action.gym_action import GymAction, RewardCalculator\nfrom mdp.policy.greedy_policy import GreedyPolicy\n\n\nclass NStepAction(GymAction):\n def __init__(self, discount_factor, gym_value, **kwargs):\n super().__init__(discount_factor, gym_value, **kwargs)\n\n def update(self, reward_calculator, next_actions, **kwargs):\n time_step = kwargs['time_step']\n evaluated_action_value = 0\n if next_actions:\n next_action = GreedyPolicy().pick_action(next_actions)\n evaluated_action_value = next_action.evaluate()\n reward_calculator = self.reward_calculators[time_step]\n g = reward_calculator.get_reward() + reward_calculator.get_next_discount() * evaluated_action_value\n self.learn(g)\n del self.reward_calculators[time_step]\n\n def cache_reward(self, reward, step=9e20):\n for rc in self.reward_calculators.values():\n rc.cache_reward(reward, step)\n\n","sub_path":"src/mdp/action/n_step_action.py","file_name":"n_step_action.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"309738232","text":"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport os\nimport time\nimport numpy as np\nimport logging\nimport commands\nimport re\nimport paddle\nimport paddle.fluid as fluid\nimport paddle.fluid.incubate.fleet.base.role_maker as role_maker\nfrom paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet\nfrom paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig\nimport py_reader_generator as py_reader\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(\"fluid\")\nlogger.setLevel(logging.INFO)\n\n\nclass FleetRunnerBase(object):\n \"\"\"\n Distribute training base class:\n This class abstracts the training process into several major steps:\n 1. input_data\n 2. net\n 3. run_pserver\n 4. run_dataset_trainer\n 5. run_pyreader_trainer\n 6. run_infer\n 7. py_reader\n 8. dataset_reader\n 9. runtime_main\n ...\n \"\"\"\n\n def input_data(self, params):\n \"\"\"\n Function input_data: Definition of input data format in the network\n Args:\n :params: the hyper parameters of network\n Returns:\n defined by users\n \"\"\"\n raise NotImplementedError(\n \"input_data should be implemented by child classes.\")\n\n def net(self, inputs, params):\n \"\"\"\n Function net: Definition of network structure\n Args:\n :inputs: input data, eg: dataset and labels. defined by funtion: self.input_data\n :params: the hyper parameters of network\n Returns:\n evaluation parameter, defined by users\n \"\"\"\n raise NotImplementedError(\"net should be implemented by child classes.\")\n\n def run_pserver(self, params):\n \"\"\"\n Function run_pserver: Operation method of parameter server\n Args\n :params the hyper parameters of network\n Returns:\n None\n \"\"\"\n # step1: define the role of node, configure communication parameter\n role = role_maker.UserDefinedRoleMaker(\n current_id=params.current_id,\n role=role_maker.Role.SERVER,\n worker_num=params.trainers,\n server_endpoints=params.pserver_endpoints)\n fleet.init(role)\n\n # step2: define the input data of network\n reader = None\n inputs = self.input_data(params)\n if params.is_pyreader_train:\n reader = self.py_reader(params)\n inputs = fluid.layers.read_file(reader)\n elif not params.is_dataset_train:\n raise ValueError(\"Program must has Date feed method: is_pyreader_train / is_dataset_train\")\n\n # step3: define the network\n # For the model: ctr-dnn, we use loss,auc,batch_auc to measure the performance of network\n # Replace it with your network evaluation index,\n loss, auc_var, batch_auc_var = self.net(inputs, params)\n\n # step4: define the optimizer for your model\n optimizer = fluid.optimizer.Adam(params.learning_rate)\n optimizer = fleet.distributed_optimizer(optimizer, self.strategy)\n optimizer.minimize(loss)\n\n fleet.init_server()\n logger.info(\"PServer init success!\")\n fleet.run_server()\n\n def run_dataset_trainer(self, params):\n \"\"\"\n Function run_trainer: Operation method of dataset training node\n Args:\n :params params: the hyper parameters of network\n Returns\n :train_result: the dict of training log\n \"\"\"\n # step1: define the role of node, configure communication parameter\n role = role_maker.UserDefinedRoleMaker(\n current_id=params.current_id,\n role=role_maker.Role.WORKER,\n worker_num=params.trainers,\n server_endpoints=params.pserver_endpoints)\n fleet.init(role)\n\n # step2: define the input data of network\n inputs = self.input_data(params)\n\n # step3: define the network, same with PSERVER\n # For the model: ctr-dnn, we use loss,auc,batch_auc to measure the performance of network\n # Replace it with your network evaluation index,\n loss, auc_var, batch_auc_var = self.net(inputs, params)\n\n # step4: define the optimizer for your model\n optimizer = fluid.optimizer.Adam(params.learning_rate)\n optimizer = fleet.distributed_optimizer(optimizer, self.strategy)\n optimizer.minimize(loss)\n\n # step5: define Executor and run startup program\n exe = fluid.Executor(fluid.CPUPlace())\n fleet.init_worker()\n # No need to exe.run(fluid.default_main_program())\n exe.run(fleet.startup_program)\n\n # step6: init dataset reader\n # Notice: Both dataset and py_reader method don't using feed={dict} to input data\n # Paddle Fluid get data by variable name\n # When we do the definition of the reader, the program has established the workflow\n dataset = self.dataset_reader(inputs, params)\n file_list = [\n str(params.train_files_path) + \"/%s\" % x\n for x in os.listdir(params.train_files_path)]\n if params.is_local_cluster:\n file_list = fleet.split_files(file_list)\n logger.info(\"file list: {}\".format(file_list))\n logger.info('----------------------NO.%s trainer ready----------------' % (params.current_id))\n\n # step7: begin to train your model, good luck\n train_result = {}\n for epoch in range(params.epochs):\n dataset.set_filelist(file_list)\n start_time = time.clock()\n\n # Notice: function train_from_dataset does not return fetch value\n exe.train_from_dataset(\n program=fleet.main_program,\n dataset=dataset,\n fetch_list=[auc_var],\n fetch_info=['auc'],\n print_period=10,\n debug=False)\n end_time = time.clock()\n self.record_time(epoch, train_result, end_time - start_time)\n self.record_memory(epoch, train_result)\n logger.info(\"epoch %d finished, use time=%d\\n\" % ((epoch), end_time - start_time))\n if params.is_first_trainer:\n model_path = str(params.model_path) + '/trainer_' + str(params.current_id) + '_epoch_' + str(epoch)\n fleet.save_persistables(executor=exe, dirname=model_path)\n\n if params.is_first_trainer:\n train_method = '_dataset_train'\n model_path = str(params.model_path + '/final' + train_method)\n fleet.save_persistables(executor=exe, dirname=model_path)\n\n logger.info(\"Train Success!\")\n fleet.stop_worker()\n return train_result\n\n def run_pyreader_trainer(self, params):\n \"\"\"\n Function run_trainer: Operation method of py_reader training node\n Args:\n :params params: the hyper parameters of network\n Returns\n :train_result: the dict of training log\n \"\"\"\n # step1: define the role of node, configure communication parameter\n role = role_maker.UserDefinedRoleMaker(\n current_id=params.current_id,\n role=role_maker.Role.WORKER,\n worker_num=params.trainers,\n server_endpoints=params.pserver_endpoints)\n fleet.init(role)\n\n # step2: define the input data of network\n inputs = self.input_data(params)\n reader = self.py_reader(params)\n inputs = fluid.layers.read_file(reader)\n\n # step3: define the network, same with PSERVER\n # For the model: ctr-dnn, we use loss,auc,batch_auc to measure the performance of network\n # Replace it with your network evaluation index,\n loss, auc_var, batch_auc_var = self.net(inputs, params)\n\n # step4: define the optimizer for your model\n # define the optimizer for your model\n optimizer = fluid.optimizer.Adam(params.learning_rate)\n optimizer = fleet.distributed_optimizer(optimizer, self.strategy)\n optimizer.minimize(loss)\n\n # step5: define Executor and run startup program\n exe = fluid.Executor(fluid.CPUPlace())\n fleet.init_worker()\n # No need to exe.run(fluid.default_main_program())\n exe.run(fleet.startup_program)\n\n # step6: init py_reader reader\n # Notice: Both dataset and py_reader method don't using feed={dict} to input data\n # Paddle Fluid get data by variable name\n # When we do the definition of the reader, the program has established the workflow\n train_generator = py_reader.CriteoDataset(params.sparse_feature_dim)\n file_list = [\n str(params.train_files_path) + \"/%s\" % x\n for x in os.listdir(params.train_files_path)]\n if params.is_local_cluster:\n file_list = fleet.split_files(file_list)\n logger.info(\"file list: {}\".format(file_list))\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n train_generator.train(file_list, params.trainers,\n params.current_id),\n buf_size=params.batch_size * 100),\n batch_size=params.batch_size)\n reader.decorate_paddle_reader(train_reader)\n\n # step7: define the compiled program\n exec_strategy = fluid.ExecutionStrategy()\n exec_strategy.num_threads = int(params.cpu_num)\n build_strategy = fluid.BuildStrategy()\n build_strategy.async_mode = self.async_mode\n if params.sync_mode == 'async':\n build_strategy.memory_optimize = False\n if int(params.cpu_num) > 1:\n build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce\n compiled_prog = fluid.compiler.CompiledProgram(\n fleet.main_program).with_data_parallel(\n loss_name=loss.name,\n build_strategy=build_strategy,\n exec_strategy=exec_strategy)\n logger.info('----------------------NO.%s trainer ready----------------' % (params.current_id))\n\n # step8: begin to train your model, good luck\n train_result = {}\n for epoch in range(params.epochs):\n # Notice: py_reader should use try & catch EOFException method to enter the dataset\n # reader.start() must declare in advance\n reader.start()\n start_time = time.clock()\n batch_id = 0\n try:\n while True:\n loss_val, auc_val, batch_auc_val = exe.run(\n program=compiled_prog,\n fetch_list=[\n loss.name, auc_var.name, batch_auc_var.name\n ])\n loss_val = np.mean(loss_val)\n auc_val = np.mean(auc_val)\n batch_auc_val = np.mean(batch_auc_val)\n if batch_id % 10 == 0 and batch_id != 0:\n logger.info(\n \"TRAIN --> pass: {} batch: {} loss: {} auc: {}, batch_auc: {}\"\n .format(epoch, batch_id, loss_val / params.\n batch_size, auc_val, batch_auc_val))\n batch_id += 1\n except fluid.core.EOFException:\n reader.reset()\n\n end_time = time.clock()\n train_result = self.record_time(epoch, train_result, end_time - start_time)\n train_result = self.record_memory(epoch, train_result)\n logger.info(\"epoch %d finished, use time=%d\\n\" % ((epoch), end_time - start_time))\n if params.is_first_trainer:\n model_path = str(params.model_path) + '/trainer_' + str(params.current_id) + '_epoch_' + str(epoch)\n fleet.save_persistables(executor=exe, dirname=model_path)\n\n if params.is_first_trainer:\n train_method = '_pyreader_train'\n model_path = str(params.model_path + '/final' + train_method)\n fleet.save_persistables(executor=exe, dirname=model_path)\n\n logger.info(\"Train Success!\")\n fleet.stop_worker()\n return train_result\n\n def run_infer(self, params):\n \"\"\"\n Function run_infer: Operation method of training node\n Args:\n :params params: the hyper parameters of network\n Returns\n :infer_result, type:dict, record the evalution parameter and program resource usage situation\n \"\"\"\n place = fluid.CPUPlace()\n dataset = py_reader.CriteoDataset(params.sparse_feature_dim)\n file_list = [\n str(params.test_files_path) + \"/%s\" % x\n for x in os.listdir(params.test_files_path)\n ]\n test_reader = paddle.batch(\n dataset.test(file_list), batch_size=params.batch_size)\n startup_program = fluid.framework.Program()\n test_program = fluid.framework.Program()\n\n def set_zero(var_name):\n param = fluid.global_scope().var(var_name).get_tensor()\n param_array = np.zeros(param._get_dims()).astype(\"int64\")\n param.set(param_array, place)\n\n with fluid.framework.program_guard(test_program, startup_program):\n with fluid.unique_name.guard():\n inputs = self.input_data(params)\n loss, auc_var, batch_auc_var= self.net(inputs, params)\n\n exe = fluid.Executor(place)\n feeder = fluid.DataFeeder(feed_list=inputs, place=place)\n\n train_method = ''\n if params.is_pyreader_train:\n train_method = '_pyreader_train/'\n else:\n train_method = '_dataset_train/'\n model_path = params.model_path + '/final' + train_method\n fluid.io.load_persistables(\n executor=exe,\n dirname=model_path,\n main_program=fluid.default_main_program())\n\n auc_states_names = ['_generated_var_0','_generated_var_1','_generated_var_2', '_generated_var_3']\n for name in auc_states_names:\n set_zero(name)\n\n run_index = 0\n infer_auc = 0\n L = []\n for batch_id, data in enumerate(test_reader()):\n loss_val, auc_val = exe.run(test_program,\n feed=feeder.feed(data),\n fetch_list=[loss, auc_var])\n run_index += 1\n infer_auc = auc_val\n L.append(loss_val / params.batch_size)\n if batch_id % 1000 == 0:\n logger.info(\"TEST --> batch: {} loss: {} auc: {}\".format(\n batch_id, loss_val / params.batch_size, auc_val))\n\n infer_loss = np.mean(L)\n infer_result = {}\n infer_result['loss'] = infer_loss\n infer_result['auc'] = infer_auc\n log_path = params.log_path + '/infer_result.log'\n print(str(infer_result))\n with open(log_path, 'w+') as f:\n f.write(str(infer_result))\n logger.info(\"Inference complete\")\n return infer_result\n\n def py_reader(self, params):\n \"\"\"\n Function py_reader: define the data read method by fluid.layers.py_reader\n help: https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/layers_cn/io_cn.html#py-reader\n Args:\n :params params: the hyper parameters of network\n Returns:\n defined by user\n \"\"\"\n raise NotImplementedError(\n \"py_reader should be implemented by child classes.\")\n\n def dataset_reader(self, inputs, params):\n \"\"\"\n Function dataset_reader: define the data read method by fluid.dataset.DatasetFactory\n help: https://www.paddlepaddle.org.cn/documentation/docs/zh/1.5/api_cn/dataset_cn.html#fluid-dataset\n Args:\n :params inputs: input data, eg: dataset and labels. defined by funtion: self.input_data\n :params params: the hyper parameters of network\n Returns:\n defined by user\n \"\"\"\n raise NotImplementedError(\n \"dataset_reader should be implemented by child classes.\")\n\n def record_time(self, epoch, train_result, time):\n \"\"\"\n record the operation time\n \"\"\"\n train_result[epoch] = {}\n train_result[epoch]['time'] = time\n return train_result\n\n def record_memory(self, epoch, train_result):\n info = process_info()\n logger.info(info)\n train_result[epoch]['memory'] = info['mem']\n train_result[epoch]['cpu'] = info['cpu']\n train_result[epoch]['rss'] = info['rss']\n train_result[epoch]['vsa'] = info['vsa']\n return train_result\n \n def runtime_main(self, params):\n \"\"\"\n Function runtime_main: the entry point for program running\n Args:\n :params params: the hyper parameters of network\n \"\"\"\n\n # Step1: get the environment variable, mainly related to network communication parameters\n params.role = os.getenv(\"TRAINING_ROLE\")\n logger.info(\"Training role: {}\".format(params.role))\n\n params.current_id = int(os.getenv(\"PADDLE_TRAINER_ID\"))\n logger.info(\"Current Id: {}\".format(params.current_id))\n\n params.trainers = int(os.getenv(\"PADDLE_TRAINERS_NUM\"))\n logger.info(\"Trainer num: {}\".format(params.trainers))\n\n params.pserver_ports = os.getenv(\"PADDLE_PORT\")\n logger.info(\"Pserver ports: {}\".format(params.pserver_ports))\n\n params.pserver_ip = os.getenv(\"PADDLE_PSERVERS\")\n logger.info(\"Pserver IP: {}\".format(params.pserver_ip))\n\n params.current_endpoint = os.getenv(\"POD_IP\", \"localhost\") + \":\" + params.pserver_ports\n\n params.cpu_num = int(os.getenv(\"CPU_NUM\"))\n logger.info(\"cpu num: {}\".format(params.cpu_num))\n\n # Step2: decide communication mode between PSERVER & TRAINER\n # recommended mode: pyreader + sync_mode / dataset + async_mode\n self.strategy = DistributeTranspilerConfig()\n if params.sync_mode == 'sync':\n self.strategy.sync_mode = True\n self.strategy.runtime_split_send_recv = False\n self.async_mode = False\n params.batch_size = int(params.batch_size / params.trainers)\n elif params.sync_mode == 'half_async':\n self.strategy.sync_mode = False\n self.async_mode = False\n self.strategy.runtime_split_send_recv = False\n elif params.sync_mode == 'async' or params.is_dataset_train:\n self.strategy.sync_mode = False\n self.async_mode = True\n self.strategy.runtime_split_send_recv = True\n\n # Step3: Configure communication IP and ports\n # If we use local cluster simulate real distributed environment:\n # -- PSERVER have same IP but different port\n # In the real distributed cluster computing environment:\n # -- PSERVER have same port but different IP\n if params.is_local_cluster:\n for port in params.pserver_ports.split(\",\"):\n params.pserver_endpoints.append(':'.join(\n [params.pserver_ip, port]))\n else:\n for ip in params.pserver_ip.split(\",\"):\n params.pserver_endpoints.append(':'.join(\n [ip, params.pserver_ports]))\n\n params.endpoints = \",\".join(params.pserver_endpoints)\n logger.info(\"pserver_endpoints: {}\".format(params.pserver_endpoints))\n\n if params.role == \"TRAINER\" and params.current_id == 0:\n params.is_first_trainer = True\n\n # Step4: According to the environment parameters-> TRAINING_ROLE, decide which method to run\n train_result = {}\n if params.role == \"PSERVER\":\n self.run_pserver(params)\n elif params.role == \"TRAINER\":\n if params.is_dataset_train:\n train_result = self.run_dataset_trainer(params)\n elif params.is_pyreader_train:\n train_result = self.run_pyreader_trainer(params)\n else:\n raise ValueError(\"Please choice training role for current node : PSERVER / TRAINER\")\n \n # Step5: If the role is first trainer, after training, perform verification on the test data\n result = dict()\n infer_result = {}\n if params.is_first_trainer:\n infer_result = self.run_infer(params)\n result[0] = dict()\n result[0]['loss'] = infer_result['loss']\n result[0]['auc'] = infer_result['auc']\n result[1] = train_result[0]['time']\n elif params.role == \"TRAINER\" and params.current_id != 0:\n result[1] = train_result[0]['time']\n result_path = params.log_path + '/' + str(params.current_id) + '_result.log'\n with open(result_path, 'w') as f:\n f.write(str(result))\n\n logger.info(\"Distribute train success!\")\n\ndef process_info():\n pid = os.getpid()\n res = commands.getstatusoutput('ps aux|grep ' + str(pid))[1].split('\\n')[0]\n p = re.compile(r'\\s+')\n l = p.split(res)\n info = {'user': l[0],\n 'pid': l[1],\n 'cpu': l[2],\n 'mem': l[3],\n 'vsa': l[4],\n 'rss': l[5], }\n return info\n","sub_path":"examples/distribute_ctr/distribute_base.py","file_name":"distribute_base.py","file_ext":"py","file_size_in_byte":22072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"226646206","text":"#!/usr/bin/python3\n\nfrom itertools import product\nfrom functools import reduce\nfrom copy import deepcopy\n\n\n\ndef point_and_neighbors(point):\n x, y = point\n return {(x + dx, y + dy) for dx, dy in product([-1, 0, +1], [-1, 0, +1])}\n\n\ndef all_neighbors(points):\n return reduce(\n lambda a, b: a | b,\n map(lambda point: point_and_neighbors(point), points),\n set()\n ) - set(points)\n\n\ndef neighbors(points, graph):\n return all_neighbors(points) & graph\n\n\ndef connectivity_components(graph):\n points = deepcopy(graph)\n components = []\n current_component = set()\n while points:\n start_point = points.pop()\n current_component = {start_point}\n neighborhood = neighbors(current_component, points)\n while neighborhood:\n current_component |= neighborhood\n points -= neighborhood\n neighborhood = neighbors(neighborhood, points)\n components.append(current_component)\n return components\n\n\ndef mass_center(points):\n if not points:\n raise ZeroDivisionError(\"points must be not empty\")\n center = reduce(lambda a, b: (a[0]+b[0], a[1]+b[1]), points, (0, 0))\n return (center[0] * 1. / len(points), center[1] * 1. / len(points))\n\n\n\n\n\n\n\n\n# if __name__ == '__main__':\n \n# c1 = {\n# (0, 0), (0, 1), (1, 1), (1, 0), (2, 1), (1, 2),\n# (2, 2), (3, 1), (1, 3), (3, 2), (2, 3), (3, 3), \n# (4, 2), (5, 2), (4, 3), (5, 3), \n# (2, 4), (2, 5), (3, 4), (3, 5), \n# (4, 4), (4, 5), (5, 4), (5, 5)\n# } \n# c2 = {\n# (7, 0), (8, 0), (7, 1), (7, 2)\n# } \n# c3 = {\n# (7, 4), (8, 4), (8, 5)\n# }\n\n# graph = c1 | c2 | c3\n\n# print mass_center(c1)\n# print mass_center(c2)\n# print mass_center(c3)\n\n# print connectivity_components(graph)\n\n \n\n","sub_path":"src/graph_utils.py","file_name":"graph_utils.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"633227221","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nhtml = urlopen(\"https://stepik.org/media/attachments/lesson/209723/3.html\").read().decode(\"utf-8\")\nsoup = BeautifulSoup(html, 'html.parser')\nelements = soup.find_all('td')\nsum = 0\nfor i in elements:\n sum += int(i.contents[0])\nprint(sum)\n","sub_path":"1_beautifulsoup_web_pages_parsing/1_4_1_table_parsing.py","file_name":"1_4_1_table_parsing.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"51"}
+{"seq_id":"281269162","text":"from keras_retinanet import models\nfrom keras_retinanet.preprocessing.csv_generator import CSVGenerator\nimport numpy as np\nfrom keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\nfrom keras_retinanet.utils.visualization import draw_box, draw_caption\nfrom keras_retinanet.utils.colors import label_color\n\n# import miscellaneous modules\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\n\n\nmodel = models.load_model('/home/nader/scratch/resnet50_csv_50_inf.h5', backbone_name='resnet50')\n# anns = '/home/nader/scratch/anns_test.csv'\nanns = '/home/nader/scratch/inf_boxes_huon_13_ans.csv'\ncls = '/home/nader/scratch/classes.csv'\n\nvalidation_generator = CSVGenerator(anns, cls)\n\n# ims = np.array(())\n# for i in range(validation_generator.size()):\n# im = validation_generator.load_image(i)\n# ims.append(im)\n# print(np.shape(ims))\n\nfor i in range(validation_generator.size()):\n im = validation_generator.load_image(i)\n # im = np.reshape(im,(1,1024,1360,3))\n # copy to draw on\n draw = im.copy()\n draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)\n\n # preprocess image for network\n im = preprocess_image(im)\n im, scale = resize_image(im)\n\n\n labels_to_names = {0: 'lobster'}\n\n boxes,scores,labels = model.predict_on_batch(np.expand_dims(im, axis=0))\n\n\n\n # correct for image scale\n boxes /= scale\n for box, score, label in zip(boxes[0], scores[0], labels[0]):\n # scores are sorted so we can break\n if score < 0.5:\n break\n\n color = label_color(label)\n\n b = box.astype(int)\n draw_box(draw, b, color=color)\n\n caption = \"{} {:.3f}\".format(labels_to_names[label], score)\n draw_caption(draw, b, caption)\n print(score)\n plt.figure(figsize=(15, 15))\n plt.axis('off')\n plt.imshow(draw)\n plt.show()\n # imname = validation_generator.image_names[i][:-4]+'out.png'\n # cv2.imwrite(imname, draw)\n\n# cv2.imwrite('out.png', draw)","sub_path":"retinanet_inf.py","file_name":"retinanet_inf.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"524470890","text":"def throwHand(card):\n\tif card == \"a\":\n\t\ttmp = handA[0]\n\t\tlist(handA).pop(0)\n\telif card == \"b\":\n\t\ttmp = handB[0]\n\t\tlist(handB).pop(0)\n\telif card == \"c\":\n\t\ttmp = handC[0]\n\t\tlist(handC).pop(0)\n\t\n\treturn tmp\n\n\nhandA = input()\nhandB = input()\nhandC = input()\nlist(handA)\nlist(handB)\nlist(handC)\ntmpMain = throwHand(handA[0])\n\nwhile len(handA) > 0 or len(handB) > 0 or len(handC):\n\ttmpMain = throwHand(tmpMain)\n\nprint(tmpMain)","sub_path":"inazuma/python/Bprob/045.py","file_name":"045.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"52"}
+{"seq_id":"306246475","text":"import os\nfrom selenium import webdriver\nfrom pathlib import Path\n\n\"\"\"\nFlow\nStore the path to all files reports from robot report.html in a list\nOpen each file using for loop\nOnce opened, scrap the page to get the test status\nStore the test name and the status in a list\nAt the end, output the list test name and result to one flake_report.html\n\nIMPORTANT: Need to have the output directory previously created.\n\"\"\"\n\n\nclass FlakeTestsReport:\n\n def __init__(self, in_path: str, out_path: str):\n self.driver = None\n self.root_path = in_path\n self.files_list = []\n self.output_directory = out_path\n\n @staticmethod\n def get_build_number_sort(elem):\n return str(elem).split('-')[0]\n\n def get_files_path(self):\n logs_name = os.listdir(self.root_path)\n logs_name.sort(key=FlakeTestsReport.get_build_number_sort)\n\n for ln in logs_name:\n self.files_list.append(self.root_path + ln)\n\n def get_reports_status(self):\n self.driver = webdriver.Firefox(executable_path=os.getcwd() + '/geckodriver')\n final_list = []\n\n for fl in self.files_list:\n build_number = str(fl).split('/')[-1].split('-')[0]\n self.driver.get('file://' + fl)\n self.driver.find_element_by_id('radio-critical').click()\n test_name = self.driver.find_elements_by_xpath('//td[@class=\"details-col-name\"]')\n test_result = self.driver.find_elements_by_xpath('//td[@class=\"details-col-status\"]')\n list_size = len(test_name)\n\n for i in range(list_size):\n test = [build_number, test_name[i].text, test_result[i].text]\n if test not in final_list:\n final_list.append(test)\n else:\n index = final_list.index(test)\n final_list[index].insert(len(test), test[2])\n self.driver.quit()\n\n return final_list\n\n def generate_final_report(self, final_list: list, first_build: int, last_build: int):\n\n # Create the build header for report\n build_header = ''\n for bq in range((last_build + 1) - first_build):\n build_header = build_header + '
Build ' + str(first_build + bq) + '
'\n\n new_column = '
'\n test = ''\n\n for fl in final_list:\n results = fl[2:]\n color_result = ''\n for r in results:\n if r.__eq__('PASS'):\n color_result = color_result + '